Beispiel #1
0
def test_gpu_send_and_recv(hetr_device):
    pytest.xfail(
        "GitHub issue: #2007, Unknown error - investigation is needed")
    # put x+1 on cpu numpy
    with ng.metadata(device='cpu'):
        x = ng.placeholder(())
        x_plus_one = x + 1
    # put x+2 on gpu numpy
    with ng.metadata(device='gpu'):
        x_plus_two = x_plus_one + 1

    with ExecutorFactory() as ex:
        computation = ex.executor(x_plus_two, x)
        for i in [10, 20, 30]:
            assert computation(i) == i + 2

    # put x+1 on gpu numpy
    with ng.metadata(device='gpu'):
        x = ng.placeholder(())
        x_plus_one = x + 1
    # put x+2 on cpu numpy
    with ng.metadata(device='cpu'):
        x_plus_two = x_plus_one + 1

    with ExecutorFactory() as ex:
        computation = ex.executor(x_plus_two, x)
        for i in [10, 20, 30]:
            assert computation(i) == i + 2
Beispiel #2
0
def test_gpu_send_and_recv():
    # put x+1 on cpu numpy
    with ng.metadata(device='cpu'):
        x = ng.placeholder(())
        x_plus_one = x + 1
    # put x+2 on gpu numpy
    with ng.metadata(device='gpu'):
        x_plus_two = x_plus_one + 1

    with ExecutorFactory() as ex:
        computation = ex.executor(x_plus_two, x)
        for i in [10, 20, 30]:
            assert computation(i) == i + 2

    # put x+1 on gpu numpy
    with ng.metadata(device='gpu'):
        x = ng.placeholder(())
        x_plus_one = x + 1
    # put x+2 on cpu numpy
    with ng.metadata(device='cpu'):
        x_plus_two = x_plus_one + 1

    with ExecutorFactory() as ex:
        computation = ex.executor(x_plus_two, x)
        for i in [10, 20, 30]:
            assert computation(i) == i + 2
def test_LabelCrossEntropy():
    workspace.ResetWorkspace()
    batch = 8
    classes = 16
    y_shape = (batch, classes)
    t_shape = (batch, )
    y_values = np.random.uniform(0, 1, y_shape)
    t_values = np.random.randint(0, classes, t_shape)

    net = core.Net("net")
    Y = net.GivenTensorFill([], "Y", shape=y_shape, values=y_values)
    T = net.GivenTensorIntFill([], "T", shape=t_shape, values=t_values)
    net.LabelCrossEntropy([Y, T], "xent")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("xent")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert(np.allclose(f_result, workspace.FetchBlob("xent"), equal_nan=False))
Beispiel #4
0
def test_NHWC2NCHW():
    workspace.ResetWorkspace()

    # NHWC
    shape = [2, 3, 4, 5]
    data1 = [float(i) for i in range(np.prod(shape))]

    net = core.Net("net")
    X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
    X.NCHW2NHWC([], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.array_equal(f_result, workspace.FetchBlob("Y")))
Beispiel #5
0
def test_variable_init(transformer_factory, C):
    w_init = np.random.rand(C.length)
    W = ng.variable(ng.make_axes([C]), initial_value=w_init)

    with ExecutorFactory() as ex:
        result = ex.executor(W)()
    ng.testing.assert_allclose(result, w_init)
def test_multiple_computations(transformer_factory):
    """
    Create multiple computations for the same value.

    Args:
        transformer_factory:

    Returns:

    """
    C = ng.make_axis(length=2)
    D = ng.make_axis(length=3)

    x = ng.placeholder([C, D])

    x0_slice = x[0, :]
    x1_slice = x[1, :]

    y1 = x0_slice * 2 + x1_slice * 3

    x_np = np.array([[10, 20, 30], [1, 2, 3]], dtype='float32')
    y1_np = x_np[0, :] * 2 + x_np[1, :] * 3

    with ExecutorFactory() as ex:
        fs = [ex.executor(y1, x) for i in range(5)]
        vals_np = [y1_np for f in fs]
        vals = [f(x_np) for f in fs]
        # print(vals_np)
        # print(vals)
        ng.testing.assert_allclose(vals, vals_np)
Beispiel #7
0
def test_dropout_train(nin, batch_size, keep, transformer_factory):

    # set inputs
    N = ng.make_axis(batch_size, name='N')
    F = ng.make_axis(nin, name='F')

    inp = ng.placeholder([F, N])
    layer = Dropout(keep=keep)
    fprop = layer(inp)

    # create data
    x = np.random.uniform(size=(nin, batch_size))

    # evaluate
    with ExecutorFactory() as ex:
        comp = ex.executor([fprop, layer.mask], inp)
        out, mask = comp(x)
        numpy_out = x * mask[:, None]
        ng.testing.assert_allclose(out, numpy_out, atol=atol, rtol=rtol)

        if keep < 1.0:
            out1, mask1 = out.copy(), mask.copy()
            out2, mask2 = comp(x)
            assert (out1 != out2).any()
            assert (mask1 != mask2).any()
Beispiel #8
0
def test_AveragedLoss():
    workspace.ResetWorkspace()
    shape = (32, )

    net = core.Net("net")
    X = net.GivenTensorFill([],
                            "Y",
                            shape=shape,
                            values=np.random.uniform(-1, 1, shape))
    X.AveragedLoss([], ["loss"])

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("loss")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert (np.allclose(f_result,
                            workspace.FetchBlob("loss"),
                            equal_nan=False))
Beispiel #9
0
def test_maxpool():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [[shape], kernel, stride] #TODO: add padding
    param_list = [[[1, 3, 10, 10], 2, 2], [[2, 3, 5, 5], 1, 1],
                  [[2, 2, 7, 7], 3, 2], [[8, 5, 8, 8], 4, 4]]

    for param_iter in param_list:
        shape, kernel, stride = param_iter
        data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]

        net = core.Net("net")
        X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
        net.MaxPool(X, 'Y', kernel=kernel, stride=stride)

        # Execute via Caffe2
        workspace.RunNetOnce(net)

        # Import caffe2 network into ngraph
        importer = C2Importer()
        importer.parse_net_def(net.Proto(), verbose=False)

        # Get handle
        f_ng = importer.get_op_handle("Y")

        # Execute
        with ExecutorFactory() as ex:
            f_result = ex.executor(f_ng)()

            # compare Caffe2 and ngraph results
            assert (np.array_equal(f_result, workspace.FetchBlob("Y")))
def test_sum():
    workspace.ResetWorkspace()

    shape = [2, 10]
    data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
    data2 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
    net = core.Net("net")
    X1 = net.GivenTensorFill([], "X1", shape=shape, values=data1, name="X1")
    X2 = net.GivenTensorFill([], "X2", shape=shape, values=data2, name="X2")
    net.Sum([X1, X2], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.array_equal(f_result, workspace.FetchBlob("Y")))
Beispiel #11
0
def test_constant():
    workspace.ResetWorkspace()

    shape = [10, 10]
    val = random.random()
    net = core.Net("net")
    net.ConstantFill([], ["Y"], shape=shape, value=val, run_once=0, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.ma.allequal(f_result, workspace.FetchBlob("Y")))
        assert (np.isclose(f_result[0][0], val, atol=1e-6, rtol=0))
Beispiel #12
0
def test_giventensorintfill():
    workspace.ResetWorkspace()

    shape = [10, 10]
    data1 = np.random.random_integers(-100, 100, shape)

    net = core.Net("net")
    net.GivenTensorIntFill([], ["Y"], shape=shape, values=data1, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.ma.allequal(f_result, workspace.FetchBlob("Y")))
        assert (np.ma.allequal(f_result, data1))
def check_communication_pass(ops_to_transform, expected_recv_nodes):
    """
    The communication pass should insert send/recv nodes wherever
    the metadata[transformer] differs between nodes.
    This checks that the recv nodes are inserted in the right place, and counts
    that the expected number of send
    nodes are found.

    :param ops_to_transform: list of ops to do the garph traversal
    :param expected_recv_nodes: lits of ops where receive nodes are expected to
           be inserted after the communication pass
    """
    with ExecutorFactory():
        send_nodes = OrderedSet()
        obj = CommunicationPass(send_nodes)
        obj.do_pass(ops_to_transform)

        op_list_instance_type = list()
        num_expected_sendnodes = len(expected_recv_nodes)

        # Count if the communication pass inserted the expected number of send nodes
        assert num_expected_sendnodes == len(send_nodes)

        # verify if Recv nodes are inserted in the right place
        for op in expected_recv_nodes:
            for each_arg in op.args:
                op_list_instance_type.append(type(each_arg))

            if (ng.op_graph.comm_nodes.CPUMlslRecvOp in op_list_instance_type or
                ng.op_graph.comm_nodes.CPUMlslGatherRecvOp in op_list_instance_type or
                    ng.op_graph.comm_nodes.CPUMlslScatterRecvOp in
                    op_list_instance_type) is False:
                assert False
            del op_list_instance_type[:]
def test_fc():
    workspace.ResetWorkspace()

    shape = [10, 10]
    data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
    data2 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]

    net = core.Net("net")
    X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
    W = net.GivenTensorFill([], ["W"], shape=shape, values=data2, name="W")
    b = net.ConstantFill([], ["b"], shape=[shape[0]], value=1.0, run_once=0, name="b")
    net.FC([X, W, b], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert(np.allclose(f_result, workspace.FetchBlob("Y"), atol=1e-4, rtol=1e-3,
                           equal_nan=False))
def test_stack():
    W = ng.make_axis(length=4)
    H = ng.make_axis(length=5)
    I = ng.make_axis(length=3)

    axes = ng.make_axes([W, H])

    rng = RandomTensorGenerator(0, np.float32)

    a_v = [rng.uniform(0, 1, axes) for i in range(I.length)]

    for pos in range(len(axes) + 1):
        a = [ng.placeholder(axes, initial_value=p) for p in a_v]

        s = ng.stack(a, I, pos)

        with ExecutorFactory() as ex:
            num_funs = [
                ex.numeric_derivative(s, p, delta,
                                      *(np for np in a if np is not p))
                for p in a
            ]
            sym_funs = [
                ex.derivative(s, p, *(np for np in a if np is not p))
                for p in a
            ]

            for n_fun, s_fun, a_i in zip(num_funs, sym_funs, a_v):
                na_is = list(na_i for na_i in a_v if na_i is not a_i)
                d_n = n_fun(a_i, *na_is)
                d_s = s_fun(a_i, *na_is)
                ng.testing.assert_allclose(d_n, d_s, rtol=rtol, atol=atol)
Beispiel #16
0
def test_softmax(transformer_factory, input_tensor):
    """TODO."""
    p_x = input_tensor
    N = p_x.axes.batch_axes()[0]
    W = p_x.axes.sample_axes()[0]
    # set up some distributions
    u = rng.uniform(0, 1, p_x.axes)
    u = u / sum(u, 0).reshape(1, N.length)

    # Put them in pre-softmax form
    x = np.log(u) + rng.uniform(-5000, 5000, ng.make_axes([N])).reshape(
        1, N.length)

    with ExecutorFactory() as ex:
        smax_w_fun = ex.executor(
            ng.softmax(p_x, normalization_axes=ng.make_axes([W])), p_x)
        smax_fun = ex.executor(ng.softmax(p_x), p_x)

        s = smax_w_fun(x)
        ng.testing.assert_allclose(s, u, atol=1e-6, rtol=1e-3)

        x = rng.uniform(-5000, 5000, p_x.axes)
        u = np_softmax(x, 0)
        s = smax_w_fun(x)
        ng.testing.assert_allclose(s, u, atol=1e-6, rtol=1e-3)

        # Test with softmax_axis default
        s = smax_fun(x)
        ng.testing.assert_allclose(s, u, atol=1e-6, rtol=1e-3)
Beispiel #17
0
def test_setting(M):
    with ExecutorFactory() as ex:
        axes = ng.make_axes([M])

        np_x = np.array([1, 2, 3], dtype=np.float32)
        np_y = np.array([1, 3, 5], dtype=np.float32)

        x = ng.constant(np_x, axes)
        y = ng.constant(np_y, axes)

        v = ng.variable(axes, initial_value=x)

        f_v = ex.executor(v)

        vset = ng.sequential([ng.assign(v, v + y), v])
        f_v1 = ex.executor(vset)

        f_v2 = ex.executor(v)

        e_v = f_v().copy()
        assert ng.testing.allclose(e_v, np_x)
        e_v1 = f_v1().copy()
        assert ng.testing.allclose(e_v1, np_x + np_y)
        e_v2 = f_v2().copy()
        assert ng.testing.allclose(e_v2, np_x + np_y)
Beispiel #18
0
def test_cast_axes(transformer_factory):
    C = ng.make_axis(length=2)
    D = ng.make_axis(length=3)

    x = ng.placeholder([C, D])

    with pytest.raises(ValueError):
        ng.cast_axes(x, [D, C])

    x_slice = x[1, :]
    # Cast back to known axes
    x_cast = ng.cast_axes(x_slice, [D])

    # Verfiy that the tensor broadcasts along D
    y = (x + x_cast).named('y')
    with ExecutorFactory() as ex:
        y_fun = ex.executor(y, x)
        num_deriv_fun = ex.numeric_derivative(y, x, delta)
        sym_deriv_fun = ex.derivative(y, x)

        x_np = np.array([[10, 20, 30], [1, 2, 3]], dtype='float32')
        y_fun_np = np.array([[11, 22, 33], [2, 4, 6]], dtype='float32')
        y_fun_ng = y_fun(x_np)
        assert ng.testing.allclose(y_fun_ng, y_fun_np)

        deriv_num = num_deriv_fun(x_np)
        deriv_sym = sym_deriv_fun(x_np)
        assert ng.testing.allclose(deriv_num, deriv_sym, rtol=rtol, atol=atol)
Beispiel #19
0
def compare_f_at_x(f_be, x_be, f_np, x, **kwargs):
    """
    Compare op_graph implementation of a function with numpy implementation

    Arguments:
        f_be: op_graph function
        x_be: argument to op_graph
        f_np: numpy function
        x: value to pass in to both implementations of f
        kwargs: used to pass rtol/atol on to assert_allclose
    """
    # op_graph
    with ExecutorFactory() as ex:

        # if x_be and x are not tuples or lists, put them in lists with length 1
        if isinstance(x_be, (tuple, list)):
            assert len(x_be) == len(x)
        else:
            x_be = [x_be]
            x = [x]

        # numpy
        val_np = f_np(*x)

        val_be = ex.executor(f_be, *x_be)(*x)

        # compare numpy and op_graph
        ng.testing.assert_allclose(val_np, val_be, **kwargs)
Beispiel #20
0
def test_stack(transformer_factory):
    if transformer_factory.name == flex_gpu_transformer_name:
        pytest.skip("Allowed to fail until PR2")

    W = ng.make_axis(length=4)
    H = ng.make_axis(length=5)
    I = ng.make_axis(length=3)

    axes = ng.make_axes([W, H])

    rng = RandomTensorGenerator(0, np.float32)

    a_v = [rng.uniform(0, 1, axes) for i in range(I.length)]

    for pos in range(len(axes) + 1):
        a = [ng.placeholder(axes, initial_value=_) for _ in a_v]

        s = ng.stack(a, I, pos)

        with ExecutorFactory() as ex:
            num_funs = [ex.numeric_derivative(s, _, delta) for _ in a]
            sym_funs = [ex.derivative(s, _) for _ in a]

            for n_fun, s_fun, a_i in zip(num_funs, sym_funs, a_v):
                d_n = n_fun(a_i)
                d_s = s_fun(a_i)
            ng.testing.allclose(d_n, d_s, rtol=rtol, atol=atol)
def test_exit_condition(transformer_factory):
    bsz = 16
    class_num = 10

    # Limiting maximum absolute value for tensors elements to 7.9.
    #
    # There is used np.random.randn function to fill tensors with random values. It can give any
    # value as a result however values above 5 are highly improbable and would appear very rarely.
    # Limit 7.9 would almost never modify the tested tensor but would prevent from random
    # failures from time to time when the test is run in continuous environment.
    # This limit is approximate upper bound of range [4, 8). Numbers from this region can be
    # expressed by flexpoint number of the same dec.
    # Why not 15.9 that is approximate limit of [8, 16) range ?
    # Numbers above 8 are highly improbable and if appear from time to time can cause random
    # failures due to reduced accuracy of all numbers in tensor. Most numbers in normal
    # distribution are close to 0.

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    N, Y = ng.make_axis(bsz), ng.make_axis(class_num)
    y_val = rng.randn_abs_clip(ng.make_axes([N, Y]), clip_max=clip_val)
    y = ng.constant(y_val, ng.make_axes([N, Y]))

    likelihood = ng.log(ng.softmax(y, normalization_axes=y.axes[1]))

    with ExecutorFactory() as ex:
        comp = ex.executor(likelihood)

        val1 = comp()
        val2 = comp()
        ng.testing.assert_allclose(val1, val2, atol=0, rtol=0)
Beispiel #22
0
def test_elementwise_unary_ops_matched_args(
    transformer_factory,
    elementwise_unary_op,
    symmetric_tensor
):
    """TODO."""
    delta = .001
    np_op = getattr(np, elementwise_unary_op)
    be_op = getattr(ng, elementwise_unary_op)

    p_u = symmetric_tensor
    u = rng.uniform(1.0, 2.0, p_u.axes)
    u_np = np_op(u)
    result_op = be_op(p_u)

    with ExecutorFactory() as ex:
        fun = ex.executor(result_op, p_u)
        dudunum_fun = ex.numeric_derivative(result_op, p_u, delta)
        dudut_fun = ex.derivative(result_op, p_u)

        u_t = fun(u)
        ng.testing.assert_allclose(u_np, u_t, atol=1e-4, rtol=1e-4)
        dudunum = dudunum_fun(u)
        dudut = dudut_fun(u)
        ng.testing.assert_allclose(dudunum, dudut, atol=1e-3, rtol=1e-3)
Beispiel #23
0
def mnist_mlp_ns(args):
    # write tensorflow models
    x = ns.placeholder(np.float32, [args.batch_size, 784])
    t = ns.placeholder(np.float32, [args.batch_size, 10])
    w = ns.Variable(np.zeros([784, 10]))
    b = ns.Variable(np.zeros([10]))
    y = ns.add(ns.matmul(x, w), b)
    cost = ns.reduce_mean(
        -ns.reduce_sum(ns.multiply(t, ns.log(ns.softmax(y))), axis=[1]))
    # transformer and computations
    with ExecutorFactory() as ex:
        updates = CommonSGDOptimizer(args.lrate).minimize(
            cost, cost.variables())
        train_comp = ex.executor(ng.sequential([updates, cost]), x, t)
        ex.transformer.initialize()

        # train
        if args.random_data is not None:
            mnist = args.random_data
            mnist.reset(0)
        else:
            mnist = input_data.read_data_sets(args.data_dir, one_hot=True)

        ng_cost_vals = []
        for idx in range(args.max_iter):
            batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
            cost_val = train_comp(batch_xs, batch_ys)
            ng_cost_vals.append(float(cost_val))
            print("[Iter %s] Cost = %s" % (idx, cost_val))

    return ng_cost_vals
Beispiel #24
0
def test_placeholder(transformer_factory):
    W = ng.make_axis(length=10)
    H = ng.make_axis(length=20)

    # Pass array through a placeholder
    aaxes = ng.make_axes([W, H])
    ashape = aaxes.lengths
    asize = aaxes.size
    aval = np.arange(asize, dtype=np.float32).reshape(ashape)

    x = ng.placeholder(aaxes)
    d = 2 * x
    d2 = ng.squared_L2(x, out_axes=None)

    with ExecutorFactory() as ex:
        # Return placeholder, param is placeholder
        placeholder_fun = ex.executor(x, x)
        prod_fun = ex.executor([d, d2], x)

        cval = placeholder_fun(aval)
        ng.testing.assert_allclose(cval, aval)

        # Pass a different array though
        u = rng.uniform(-1.0, 1.0, aaxes)
        cval = placeholder_fun(u)
        ng.testing.assert_allclose(cval, u)

        cval, s = prod_fun(aval)
        ng.testing.assert_allclose(cval, aval * 2)
        ng.testing.assert_allclose(s[()], np.dot(aval.flatten(), aval.flatten()))

        cval, s = prod_fun(u)
        u2 = u * 2
        ng.testing.assert_allclose(cval, u2)
        ng.testing.assert_allclose(s[()], np.dot(u.flatten(), u.flatten()))
def test_flat_tensor_dot_tensor():
    """
    Ensure that a flattened argument axis is not unflattend in the result.

    """
    H = ng.make_axis(2)
    W = ng.make_axis(7)
    C = ng.make_axis(3)
    K = ng.make_axis(11)

    axes_a = ng.make_axes([H, W, C])
    a = ng.constant(np.ones(axes_a.lengths), axes=axes_a)
    flat_a = ng.flatten_at(a, 2)

    axes_b = ng.make_axes([C, K])
    b = ng.constant(np.ones(axes_b.lengths), axes=axes_b)

    result = ng.dot(b, flat_a)

    with ExecutorFactory() as factory:
        result_fun = factory.executor(result)
        result_val = result_fun()

    result_correct = np.ones_like(result_val) * C.length
    ng.testing.assert_allclose(result_val, result_correct)
Beispiel #26
0
def mnist_mlp(args):
    # write tensorflow models
    x = tf.placeholder(tf.float32, [args.batch_size, 784])
    t = tf.placeholder(tf.float32, [args.batch_size, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, w) + b
    cost = tf.reduce_mean(-tf.reduce_sum(
        t * tf.log(tf.nn.softmax(y)), reduction_indices=[1]))
    init = tf.global_variables_initializer()

    # import graph_def
    importer = TFImporter()
    importer.import_graph_def(tf.get_default_graph().as_graph_def())

    # get handle of ngraph ops
    x_ng, t_ng, cost_ng, init_op_ng = importer.get_op_handle([x, t, cost, init])

    # transformer and computations
    with ExecutorFactory() as ex:
        updates = CommonSGDOptimizer(args.lrate).minimize(cost_ng, cost_ng.variables())
        train_comp = ex.executor(ng.sequential([updates, cost_ng]), x_ng, t_ng)
        init_comp = ex.executor(init_op_ng)
        ex.transformer.initialize()

        # train
        if args.random_data is not None:
            mnist = args.random_data
            mnist.reset(0)
        else:
            mnist = input_data.read_data_sets(args.data_dir, one_hot=True)

        init_comp()
        ng_cost_vals = []
        for idx in range(args.max_iter):
            batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
            cost_val = train_comp(batch_xs, batch_ys)
            ng_cost_vals.append(float(cost_val))
            print("[Iter %s] Cost = %s" % (idx, cost_val))

    # train in tensorflow as comparison
    with tf.Session() as sess:
        # train in tensorflow
        train_step = tf.train.GradientDescentOptimizer(args.lrate).minimize(cost)
        sess.run(init)
        if args.random_data is not None:
            mnist = args.random_data
            mnist.reset(0)
        else:
            mnist = input_data.read_data_sets(args.data_dir, one_hot=True)
        tf_cost_vals = []
        for idx in range(args.max_iter):
            batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
            cost_val, _ = sess.run([cost, train_step],
                                   feed_dict={x: batch_xs, t: batch_ys})
            tf_cost_vals.append(float(cost_val))
            print("[Iter %s] Cost = %s" % (idx, cost_val))

    return ng_cost_vals, tf_cost_vals
Beispiel #27
0
def test_initial_value(transformer_factory):
    # Test work-around for issue #1138
    w = [3, 4, 5]
    x = ng.constant(w)
    y = ng.variable([ng.make_axis(length=len(w))], initial_value=x)
    with ExecutorFactory() as ex:
        result = ex.executor(y)()
    ng.testing.assert_allclose(result, np.asarray(w, dtype=np.float32))
Beispiel #28
0
def test_ctc(data_args):
    """
    test ctc fprop and bprop
    """
    with ExecutorFactory() as ex:

        nout, bsz, max_utt_len, max_lbl_len = data_args
        V = ng.make_axis(nout)
        L = ng.make_axis(max_lbl_len * bsz)
        ax.N.length = bsz
        ax.REC.length = max_utt_len

        ax_activs = ng.make_axes([ax.REC, ax.N, V])
        ax_lbls = ng.make_axes([L])
        ax_utt_lens = ng.make_axes([ax.N])
        ax_lbl_lens = ng.make_axes([ax.N])

        activs = ng.placeholder(ax_activs)
        lbls = ng.placeholder(ax_lbls, dtype=np.dtype(np.int32))
        utt_lens = ng.placeholder(ax_utt_lens, dtype=np.dtype(np.int32))
        lbl_lens = ng.placeholder(ax_lbl_lens, dtype=np.dtype(np.int32))

        # fprop
        ctc_cost = ng.ctc(activs, lbls, utt_lens, lbl_lens)
        costfun = ex.executor(ctc_cost, activs, lbls, utt_lens, lbl_lens)

        # bprop
        grad_costfun = ex.derivative(ctc_cost, activs, lbls, utt_lens,
                                     lbl_lens)

        # provide numerical values and execute the graph
        activs_val = rng.uniform(-1, 1, activs.axes)
        lbls_val = np.random.randint(1,
                                     nout,
                                     lbls.axes.lengths,
                                     dtype=np.int32)
        lbl_lens_val = np.random.randint(1,
                                         max_lbl_len + 1,
                                         lbl_lens.axes.lengths,
                                         dtype=np.int32)
        utt_lens_val = ((2 * lbl_lens_val + 1) / float(max_utt_len)) * 100
        utt_lens_val = utt_lens_val.astype(np.int32)
        fprop_ctc = costfun(activs_val, lbls_val, utt_lens_val,
                            lbl_lens_val).copy()
        bprop_ctc = grad_costfun(activs_val, lbls_val, utt_lens_val,
                                 lbl_lens_val).copy()

        # compare with reference values
        costs_ref, grads_ref = ctc_ref(activs_val, lbls_val, utt_lens_val,
                                       lbl_lens_val)
        ng.testing.assert_allclose(fprop_ctc,
                                   costs_ref,
                                   rtol=1.0e-5,
                                   atol=1.0e-5)
        ng.testing.assert_allclose(bprop_ctc[0],
                                   grads_ref,
                                   rtol=1.0e-5,
                                   atol=1.0e-5)
Beispiel #29
0
def test_fill_state():
    with ExecutorFactory() as ex:
        N = ng.make_axis(3, name='N')
        x_np = np.ones((N.length)) * 4
        x = ng.variable([N], initial_value=x_np).named('x')
        val = ng.sequential([ng.fill(x, -1), x])
        f = ex.executor(val)
        x_val = f()
        assert np.allclose(-1, x_val)
Beispiel #30
0
def test_from_device(transformer_factory):
    with ng.metadata(device_id='1'):
        x = ng.placeholder(())
    x_plus_one = x + 1

    with ExecutorFactory() as ex:
        computation = ex.executor(x_plus_one, x)
        for i in [10, 20, 30]:
            assert computation(i) == i + 1