Beispiel #1
0
def test_logreg():
    # xs: (C, N), y: (N,)
    xs = np.array([[0.52, 0.88, 0.52, 0.74], [1.12, -1.08, 0.06, -2.49],
                   [0.77, 0.15, -1.3, 1.39]])
    ys = np.array([1, 1, 0, 1])
    max_iter = 10
    alpha = 0.1
    thetas = np.array([0., 0., 0.])

    np_logreg = NumpyLogreg(xs, ys, thetas)

    C, N = ng.make_axis(length=3), ng.make_axis(length=4)

    # input tensors
    xs_v = ng.placeholder((C, N))
    ys_v = ng.placeholder([N])
    alpha_v = ng.placeholder(())
    thetas_var = ng.variable([C], initial_value=thetas)

    # define ops
    ys_pred = ng.sigmoid(ng.dot(thetas_var, xs_v))
    log_likelihoods = ng.log(ys_pred) * ys_v + ng.log(1 - ys_pred) * (1 - ys_v)
    loss = -ng.sum(log_likelihoods, reduction_axes=[N])
    grad_comp = ng.deriv(loss, thetas_var)
    weight_update = ng.sequential(
        [ng.assign(thetas_var, thetas_var - alpha_v * grad_comp), thetas_var])

    # transformer
    with ExecutorFactory() as ex:
        train_eval_func = ex.executor([grad_comp, loss, weight_update], xs_v,
                                      ys_v, alpha_v)

        # evaluate
        for i in range(max_iter):
            grad_np, loss_np, thetas_np = np_logreg.optimize(alpha)
            grad_ng, loss_ng, thetas_ng = train_eval_func(xs, ys, alpha)
            ng.testing.assert_allclose(loss_np, loss_ng, rtol=1e-05, atol=1e-05, \
                                       transformer_overwrite=False)
            ng.testing.assert_allclose(grad_np, grad_ng,  rtol=1e-05, atol=1e-05, \
                                       transformer_overwrite=False)
            ng.testing.assert_allclose(thetas_np, thetas_ng, rtol=1e-05, atol=1e-05, \
                                       transformer_overwrite=False)
def test_linear_axes_nout():
    feature_axis = ng.make_axis(1, name='A')
    batch_axis = ng.make_axis(2, name='N')

    x = ng.placeholder([feature_axis, batch_axis])
    linear = Linear(nout=3, init=UniformInit(1.0, 1.0))(x)

    assert feature_axis not in linear.axes
    assert batch_axis in linear.axes
    assert linear.axes.batch_axis().length == 2
    assert linear.axes.sample_axes().lengths == (3, )
    def baseline_derivative(self, x):
        X = ng.placeholder([ng.make_axis(), ng.make_axis(name='N')])
        X.axes.set_shape(x.shape)
        with ExecutorFactory() as ex:
            activation_derivative = ex.derivative(self.neon_activation(X), X)

            # hack to get derivatives
            result = activation_derivative(x)
            result = result.ravel()[0:result.size:(x.size + 1)]
            result = result.reshape(x.shape)

            return result
def test_linear_W_axes_nout():
    feature_axis = ng.make_axis(1, name='A')
    batch_axis = ng.make_axis(2, name='N')

    x = ng.placeholder([feature_axis, batch_axis])
    linear = Linear(nout=3, init=UniformInit(1.0, 1.0))
    linear(x)

    assert linear.W.axes.batch_axis() is None
    assert feature_axis in linear.W.axes
    assert len(linear.W.axes - feature_axis) == 1
    assert (linear.W.axes - feature_axis)[0].length == 3
Beispiel #5
0
def test_dropout_bprop_single_comp(nin, batch_size, keep):
    # set inputs
    N = ng.make_axis(batch_size, name='N')
    F = ng.make_axis(nin, name='F')

    mul_factor = ng.placeholder(())
    inp = ng.placeholder([F, N])
    layer = Dropout(keep=keep)
    fprop = layer(inp * mul_factor)
    out_graph = ng.sum(fprop, out_axes=())
    bprop = ng.deriv(out_graph, mul_factor)

    # create data
    x = np.random.uniform(size=(nin, batch_size))

    # evaluate
    with ExecutorFactory() as ex:
        comp = ex.executor([fprop, bprop, layer.mask], inp, mul_factor)
        fout, bout, mask = comp(x, 2)
        # Calculate derivative by hand and compare
        ng.testing.assert_allclose(bout, (x * mask[:, None]).sum(), rtol=1e-6)
def test_dilated_conv(dilation):
    """Test that the dilated convolution layer output matches expected. This test compares
    the maximum output value to an expected max output value. The expected value is computed
    based on the dilation parameter. The test also checks that the output size matches the
    expected size based on the dilaton parameter value."""
    image_size = 3
    batch_size = 1
    init_val = 0.1
    conv_size = 3
    pad = 3
    N_filters = 1
    image_channels = 3
    model = Sequential([
        Convolution((conv_size, conv_size, N_filters),
                    filter_init=ConstantInit(val=init_val),
                    padding=pad,
                    dilation=dilation)
    ])
    X = np.ones(shape=(batch_size, 3, image_size,
                       image_size))  # Create dummy image
    data = {'image': X, 'iteration': 1}
    data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size),
                             ('W', image_size)])
    ax = [
        ng.make_axis(length=data_size[k], name=k)
        for k in list(data_size.keys())
    ]
    p_axes = ng.make_axes(ax)
    named_inputs = {'image': ng.placeholder(p_axes)}
    outputs = model(named_inputs['image'])
    named_outputs = {outputs.name: outputs}
    with closing(ngt.make_transformer()) as transformer:
        m = make_bound_computation(transformer, named_outputs, named_inputs)
    output = m(data)[list(m(data).keys())[0]]
    filter_size = dilation * (conv_size -
                              1) + 1  # Compute expected filter size
    # Compute the expected output size based on convolution parameters
    out_size = (image_size + 2 * pad - filter_size) + 1
    filt_tmp = np.zeros(filter_size)
    filt_tmp[0::dilation] = 1
    # max overlap between dilated filter and image (in 1-d)
    max_overlap = int(np.min([filter_size, image_size]))
    exp_max_output = init_val * image_channels * (np.sum(
        filt_tmp[0:max_overlap]))**2
    # Expected max output changes for different dilation parameter values#
    assert int(10 * np.max(output)) == int(10 * exp_max_output), \
        ("Dilated conv max outputs do not match expected: "
         "{} != {}").format(np.max(output),
                            init_val * conv_size * ((image_size - (dilation - 1))**2))
    assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \
        ("Dilated conv output is not expected size: "
         "{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size))
Beispiel #7
0
def test_learning_policy_fixed_with_input():
    base_learning_rate = 0.1

    iteration = ng.placeholder((), dtype=np.dtype(np.uint32))
    lro = LearningRateOptimizer(learning_rate=base_learning_rate, iteration=iteration)

    with ExecutorFactory() as ex:
        fixed_learning_rate = ex.transformer.computation(lro.lrate, iteration)

        for iter_input in [10, 50, 90, 6, 15]:
            baseline_value = fixed_learning_rate(iter_input)

            ng.testing.assert_allclose(baseline_value, base_learning_rate, rtol=1e-6)
def test_tensor_slice():
    """
    slicing a tensor should work like numpy
    """
    input_axes = ng.make_axes(
        [ng.make_axis(10), ng.make_axis(20),
         ng.make_axis(5)])

    x = ng.placeholder(axes=input_axes)

    assert x[:5].axes.full_lengths == (5, 20, 5)
    assert x[:, 2:7].axes.full_lengths == (10, 5, 5)
    assert x[:5, :, :-1].axes.full_lengths == (5, 20, 4)
def test_tensor_size():
    n, m = 3, 4

    N = ng.make_axis(length=n)
    M = ng.make_axis(length=m)

    aaxes = ng.make_axes([N, M])
    x = ng.placeholder(aaxes)

    size_fun = ng.tensor_size(x)
    nptensor = np.arange(n * m).reshape(n, m)

    with executor(size_fun, x) as ex:
        assert ex(nptensor) == n * m
Beispiel #10
0
    def make_placeholders(self):
        batch_axis = ng.make_axis(length=self.batch_size, name="N")
        time_axis = ng.make_axis(length=self.time_steps, name="REC")
        feature_axis = ng.make_axis(length=self.nfeatures, name="feature_axis")

        dict = {}
        for k in self.data_arrays.keys():
            if k == 'inp_txt' or k == 'teacher_tgt':
                p_axes = ng.make_axes([batch_axis, time_axis, feature_axis])
            else:
                p_axes = ng.make_axes([batch_axis, time_axis])
            dict[k] = ng.placeholder(p_axes)

        return dict
Beispiel #11
0
def test_add_with_scalar():

    H = ng.make_axis(length=1, name='height')
    W = ng.make_axis(length=4, name='width')
    a = ng.placeholder(axes=[H, W])

    d = ng.add(a, -5)
    with executor(d, a) as _add:
        d_val = _add([10, 20, 30, 40])

        # compute reference through numpy
        d_val_ref = np.add(np.array([10, 20, 30, 40], dtype=np.float32).reshape(1, 4),
                           np.array([-5], dtype=np.float32))

    assert np.allclose(d_val[0], d_val_ref)
Beispiel #12
0
def test_flatten_deriv_simplified():
    """
    Test derivative with dot and flatten
    """
    ax_N = ng.make_axis(length=3)
    ax_Y = ng.make_axis(length=2)

    x = ng.placeholder(ng.make_axes([ax_N]))
    w = ng.constant([5, 2], axes=ng.make_axes([ax_Y]))
    logits = ng.dot(x, w)
    cost = ng.sum(logits, reduction_axes=logits.axes)

    delta = 0.001
    u = rng.uniform(.1, 5.0, x.axes)
    check_derivative(cost, x, delta, u, atol=1e-2, rtol=1e-2)
Beispiel #13
0
def test_sum():

    H = ng.make_axis(length=2)
    W = ng.make_axis(length=2)
    H1 = ng.make_axis(length=1)
    W1 = ng.make_axis(length=4)

    input1 = ng.placeholder(axes=[H, W])
    input2 = ng.placeholder(axes=[H1, W1])

    # does reduction sum operation along axis[0]:H
    sum_op_1 = ng.sum(input1, reduction_axes=H)

    # sum elements across all the axis
    sum_op_2 = ng.sum(input2)

    with ExecutorFactory() as ex:
        _sum = ex.executor(sum_op_1, input1)
        _sum_val = _sum([[1, 2], [3, 4]])
        assert np.array_equal(_sum_val, [4, 6])

        _sum = ex.executor(sum_op_2, input2)
        _sum_val = _sum([1, 2, 3, 4])
        assert np.array_equal(_sum_val, 10)
Beispiel #14
0
def test_multiple_computation():
    H = ng.make_axis(length=1, name='height')
    W = ng.make_axis(length=1, name='width')
    a = ng.placeholder(axes=[H, W])
    b = ng.placeholder(axes=[H, W])

    _mul = ng.multiply(a, b)
    _add = ng.add(a, b)

    with ExecutorFactory() as ex:
        # Define computations
        _mul_computation = ex.executor(_mul, a, b)
        _mul_val = _mul_computation([10], [20])
        _add_computation = ex.executor(_add, a, b)
        _add_val = _add_computation([10], [20])

        # compute reference value
        _mul_ref = np.multiply(np.full([1, 1], 10, dtype=np.float32),
                               np.full([1, 1], 20, dtype=np.float32))
        _add_ref = np.add(np.full([1, 1], 10, dtype=np.float32),
                          np.full([1, 1], 20, dtype=np.float32))

        assert np.allclose(_add_val, _add_ref)
        assert np.allclose(_mul_val, _mul_ref)
def test_deconv():
    """
    basic test of deconv fprop.
    ngraph/tests/test_conv.py tests ng.deconvolution bprop
    """

    # filter params
    R, S = 5, 5
    fshape = (R, S, 1)
    strides = 2
    filter_val_nz = np.arange(1, R * S + 1).reshape(R, S)
    filter_val = np.zeros(fshape)
    filter_val[:, :, 0] = filter_val_nz

    deconv = Deconvolution(fshape,
                           filter_init=ConstantInit(filter_val),
                           strides=strides,
                           padding=0,
                           dilation=1)

    N = ng.make_axis(name='N', length=1)  # batch
    image_shape = (1, 8, 8)  # CHW
    image_axes = ng.make_axes(
        [ng.make_axis(name=nm, length=l) for nm, l in zip('CHW', image_shape)])
    image_axes |= N
    image = ng.placeholder(axes=image_axes)

    output = deconv(image)

    with closing(ngt.make_transformer()) as transformer:
        comp = transformer.add_computation(ng.computation(output, image))
        input_val = np.zeros(image_shape + (N.length, ), dtype=float)
        input_val[0, 0, 0] = 1
        input_val[0, 5, 5] = 1
        input_val[0, 7, 7] = 1
        result = comp(input_val)
        feature_map = np.squeeze(result)

        assert (feature_map[:5, :5] == filter_val_nz).all()

        result2 = filter_val_nz.copy()
        result2[-1, -1] = 26
        assert (feature_map[10:15, 10:15] == result2).all()

        result3 = filter_val_nz.copy()
        result3[0, 0] = 26
        assert (feature_map[-5:, -5:] == result3).all()
Beispiel #16
0
def test_slice_deriv():
    C = ng.make_axis(length=2)
    D = ng.make_axis(length=3)

    x_np = np.array([[10, 20, 30], [1, 2, 3]], dtype='float32')
    x = ng.placeholder([C, D]).named('x')

    x_slice = x[0, :] + x[1, :]

    with ExecutorFactory() as ex:
        sym_deriv_fun = ex.derivative(x_slice, x)
        val_ng = sym_deriv_fun(x_np)
        val_np = np.zeros((D.length, C.length, D.length))
        for i in range(D.length):
            for j in range(C.length):
                val_np[i, j, i] = 1
        ng.testing.assert_allclose(val_ng, val_np)
def make_placeholder(input_size, sequence_length, batch_size, extra_axes=0):

    input_axis = ng.make_axis(name='features')
    recurrent_axis = ng.make_axis(name='REC')
    batch_axis = ng.make_axis(name='N')

    input_axes = ng.make_axes([input_axis, recurrent_axis, batch_axis])
    input_axes.set_shape((input_size, sequence_length, batch_size))
    input_axes = ng.make_axes([
        ng.make_axis(length=1, name='features_' + str(i))
        for i in range(extra_axes)
    ]) + input_axes

    input_placeholder = ng.placeholder(input_axes)
    input_value = rng.uniform(-0.01, 0.01, input_axes)

    return input_placeholder, input_value
Beispiel #18
0
def test_cast_axis():
    """
    Test AxesCastOp
    """
    H = ng.make_axis(length=1, name='height')
    W = ng.make_axis(length=4, name='width')
    axes_input = [H, W]
    a = ng.placeholder(axes=axes_input)
    axes_output = ng.make_axes([ng.make_axis(name=ax.name + 'p', length=ax.length)
                                for ax in axes_input])

    b = ng.cast_axes(a, axes_output)

    with executor(b, a) as _cast_axis:
        a_val = np.array([10, 20, 30, 40], dtype=np.float32).reshape(1, 4)
        b_val = _cast_axis(a_val)

        b_val_ref = a_val
        assert np.allclose(b_val, b_val_ref)
Beispiel #19
0
def test_learning_policy_step():
    base_learning_rate = 1.0
    drop_factor = 0.1
    step = 20

    lr_params = {'name': 'step',
                 'base_lr': base_learning_rate,
                 'gamma': drop_factor,
                 'step': step}

    iteration = ng.placeholder((), dtype=np.dtype(np.uint32))
    lro = LearningRateOptimizer(learning_rate=lr_params, iteration=iteration)

    with ExecutorFactory() as ex:
        stepped_learning_rate = ex.transformer.computation(lro.lrate, iteration)

        for iter_input in [10, 50, 90, 6, 15]:
            baseline_value = stepped_learning_rate(iter_input)
            reference_value = base_learning_rate * (drop_factor ** (iter_input // step))

            ng.testing.assert_allclose(baseline_value, reference_value, rtol=1e-5)
Beispiel #20
0
def make_convolution_placeholder(shape=None):
    """
    Create a placeholder op for inputs to a convolution layer

    Arguments:
        shape (tuple): The desired shape of the placeholder,
                       with axes in the order of C, D, H, W, N

    Returns:
        5-D placeholder op
    """

    H = ng.make_axis(name="H", docstring="Height")
    W = ng.make_axis(name="W", docstring="Width")
    D = ng.make_axis(name="D", docstring="Depth")
    C = ng.make_axis(name="C", docstring="Channel")

    x = ng.placeholder(axes=ng.make_axes([C, D, H, W, ax.N]))
    if shape is not None:
        x.axes.set_shape(shape)

    return x
Beispiel #21
0
def test_dropout_inference(nin, batch_size):
    # set inputs
    N = ng.make_axis(batch_size, name='N')
    F = ng.make_axis(nin, name='F')

    inp = ng.placeholder([F, N])
    layer = Dropout(keep=0.5)
    with Layer.inference_mode_on():
        fprop = layer(inp)

    # create data
    x = np.random.uniform(size=(nin, batch_size))

    # evaluate
    with ExecutorFactory() as ex:
        comp = ex.executor(fprop, inp)
        out = comp(x)
        numpy_out = x * 0.5
        ng.testing.assert_allclose(out, numpy_out, atol=atol, rtol=rtol)
        out1 = out.copy()
        out2 = comp(x)
        ng.testing.assert_allclose(out1, out2, atol=atol, rtol=rtol)
def test_conv1d(transformer_factory, filter_width, num_filters, strides,
                padding, time_steps, feature_dimension, batch_size):

    dilation = 1  # reference conv does not support dilation

    F = ng.make_axis(name='F', length=feature_dimension)
    REC = ng.make_axis(name='REC', length=time_steps)
    N = ng.make_axis(name='N', length=batch_size)
    in_axes = ng.make_axes([F, REC, N])

    inputs = ng.placeholder(axes=in_axes)
    input_vals = np.random.randn(*in_axes.lengths)

    filter_init = GaussianInit()

    conv1d = Convolution((filter_width, num_filters),
                         filter_init,
                         strides=strides,
                         padding=padding,
                         dilation=dilation,
                         bias_init=None,
                         activation=Rectlin(),
                         batch_norm=None)

    result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': 'REC'})

    with closing(ngt.make_transformer()) as transformer:
        result_comp = transformer.add_computation(
            ng.computation(result_op, inputs))
        filter_vals = transformer.add_computation(ng.computation(
            conv1d.conv.W))()

        result_ng = result_comp(input_vals)
        result_np = np.squeeze(
            reference_conv1d(input_vals, filter_vals,
                             lambda x: np.maximum(0, x)))
        ng.testing.assert_allclose(result_ng, result_np)
Beispiel #23
0
    def __init__(self, lr_params):
        self.compute_lr_op_creation = None

        if hasattr(lr_params, '__call__'):
            # If argument is a function, set it as a callback, which allows user to
            # define a policy.
            # This function should create subgraph for computing learning rate.
            # Buffer containing current iteration number will be passed as parameter
            self.compute_lr_op_creation = lr_params
        else:
            if isinstance(lr_params, numbers.Real):
                # If argument is real number, set policy to fixed and use given value as base_lr
                lr_params = {'name': 'fixed', 'base_lr': lr_params}
            policies = lrp.lr_policies
            if lr_params['name'] not in policies:
                raise NotImplementedError('Unsupported learning rate policy: '
                                          '\nGiven: ' + lr_params['name'] +
                                          '\nSupported policies are: ' +
                                          str(policies.keys()))
            else:
                if all([
                        x in lr_params.keys()
                        for x in policies[lr_params['name']]['args']
                ]):
                    # Check if lr_params contains all required parameters for selected policy.
                    self.compute_lr_op_creation = policies[
                        lr_params['name']]['obj'](lr_params)
                else:
                    raise ValueError(
                        'Too few arguments passed to CommonSGDOptimizer'
                        '\nGiven: ' + str(lr_params.keys()) + '\nExpected: ' +
                        str(policies[lr_params['name']]['args']))

        self._iteration_buffer = ng.placeholder(axes=(),
                                                dtype=np.dtype(np.uint32))
        self.compute_lr_op = self.compute_lr_op_creation(
            self.get_iter_buffer())
def test_multiple_computations():
    """
    Create multiple computations for the same value.
    """
    C = ng.make_axis(length=2)
    D = ng.make_axis(length=3)

    x = ng.placeholder([C, D])

    x0_slice = x[0, :]
    x1_slice = x[1, :]

    y1 = x0_slice * 2 + x1_slice * 3

    x_np = np.array([[10, 20, 30], [1, 2, 3]], dtype='float32')
    y1_np = x_np[0, :] * 2 + x_np[1, :] * 3

    with ExecutorFactory() as ex:
        fs = [ex.executor(y1, x) for i in range(5)]
        vals_np = [y1_np for f in fs]
        vals = [f(x_np) for f in fs]
        # print(vals_np)
        # print(vals)
        ng.testing.assert_allclose(vals, vals_np)
Beispiel #25
0
    def __init__(self, input_placeholder, output_size, RNN, bn_params):

        # Set up axes
        F, T, N = tuple(input_placeholder.axes)
        H = ng.make_axis(length=output_size, name="hidden")
        H2 = ng.make_axis(length=output_size, name="hidden_tmp")

        self.input_placeholder = input_placeholder

        # Make reference placeholder
        self.reference_input = ng.placeholder(axes=[H, T, N])

        # Create weight matrices
        w_rec_axes = ng.make_axes([H, H2])
        w_in_axes = ng.make_axes([H, F])
        self.W_rec = rng.uniform(-1, 1, w_rec_axes)
        self.W_in = rng.uniform(-1, 1, w_in_axes)
        self.W_id = np.eye(output_size).astype("float32")

        self.rnn_args = dict(nout=output_size,
                             init_inner=self.W_rec,
                             return_sequence=True,
                             activation=Tanh())

        self.reference_rnn = RNN(init=self.W_id, **self.rnn_args)
        self.rnn = RNN(init=self.W_in, batch_norm=True, **self.rnn_args)

        if self.has_gates:
            self.batch_norm_dict = self.rnn.batch_norm
        else:
            self.batch_norm_dict = {'gate': self.rnn.batch_norm}

        self.default_gate = list(self.batch_norm_dict.keys())[0]

        for bn in self.batch_norm_dict.values():
            bn.__dict__.update(bn_params)
Beispiel #26
0
def test_reorder_spatial_toomany_spatial(CDHWN, axis_a):
    tensor = ng.placeholder(CDHWN + axis_a)
    with pytest.raises(IncompatibleAxesError):
        reorder_spatial_axes(tensor, "C", ("D", "H", "W"))
Beispiel #27
0
def test_reorder_spatial_triple_spatial(CDHWN):
    # Reorder to NCWHD
    tensor = ng.placeholder(
        [CDHWN[-1], CDHWN[0], CDHWN[3], CDHWN[2], CDHWN[1]])
    new_axes = reorder_spatial_axes(tensor, "C", ("D", "H", "W")).axes
    assert new_axes == CDHWN
Beispiel #28
0
def test_reorder_spatial_no_spatial(CDHWN):
    tensor = ng.placeholder([CDHWN[0], CDHWN[-1]])
    with pytest.raises(IncompatibleAxesError):
        reorder_spatial_axes(tensor, "C", ("D", "H", "W"))
Beispiel #29
0
def test_reorder_spatial_double_spatial(CDHWN):
    # Reorder to NCWH
    tensor = ng.placeholder([CDHWN[-1], CDHWN[0], CDHWN[3], CDHWN[2]])
    new_axes = reorder_spatial_axes(tensor, "C", ("D", "H", "W")).axes
    assert new_axes == CDHWN
    assert new_axes[1].length == 1  # D has been added with length 1
Beispiel #30
0
def test_reorder_spatial_no_channel(CDHWN):
    tensor = ng.placeholder(CDHWN[-2:])
    new_axes = reorder_spatial_axes(tensor, "C", ("D", "H", "W")).axes
    assert len(new_axes) == 5
    assert new_axes[0].name == 'C'
    assert new_axes[0].length == 1