예제 #1
0
def test_giventensorintfill():
    workspace.ResetWorkspace()

    shape = [10, 10]
    data1 = np.random.random_integers(-100, 100, shape)

    net = core.Net("net")
    net.GivenTensorIntFill([], ["Y"], shape=shape, values=data1, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.ma.allequal(f_result, workspace.FetchBlob("Y")))
        assert (np.ma.allequal(f_result, data1))
예제 #2
0
def test_maxpool():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [[shape], kernel, stride] #TODO: add padding
    param_list = [[[1, 3, 10, 10], 2, 2], [[2, 3, 5, 5], 1, 1],
                  [[2, 2, 7, 7], 3, 2], [[8, 5, 8, 8], 4, 4]]

    for param_iter in param_list:
        shape, kernel, stride = param_iter
        data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]

        net = core.Net("net")
        X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
        net.MaxPool(X, 'Y', kernel=kernel, stride=stride)

        # Execute via Caffe2
        workspace.RunNetOnce(net)

        # Import caffe2 network into ngraph
        importer = C2Importer()
        importer.parse_net_def(net.Proto(), verbose=False)

        # Get handle
        f_ng = importer.get_op_handle("Y")

        # Execute
        with ExecutorFactory() as ex:
            f_result = ex.executor(f_ng)()

            # compare Caffe2 and ngraph results
            assert (np.array_equal(f_result, workspace.FetchBlob("Y")))
예제 #3
0
def test_LabelCrossEntropy():
    workspace.ResetWorkspace()
    batch = 8
    classes = 16
    y_shape = (batch, classes)
    t_shape = (batch, )
    y_values = np.random.uniform(0, 1, y_shape)
    t_values = np.random.randint(0, classes, t_shape)

    net = core.Net("net")
    Y = net.GivenTensorFill([], "Y", shape=y_shape, values=y_values)
    T = net.GivenTensorIntFill([], "T", shape=t_shape, values=t_values)
    net.LabelCrossEntropy([Y, T], "xent")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("xent")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert(np.allclose(f_result, workspace.FetchBlob("xent"), equal_nan=False))
예제 #4
0
def test_fc():
    workspace.ResetWorkspace()

    shape = [10, 10]
    data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
    data2 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]

    net = core.Net("net")
    X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
    W = net.GivenTensorFill([], ["W"], shape=shape, values=data2, name="W")
    b = net.ConstantFill([], ["b"], shape=[shape[0]], value=1.0, run_once=0, name="b")
    net.FC([X, W, b], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert(np.allclose(f_result, workspace.FetchBlob("Y"), atol=1e-4, rtol=1e-3,
                           equal_nan=False))
def test_sum():
    workspace.ResetWorkspace()

    shape = [2, 10]
    data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
    data2 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
    net = core.Net("net")
    X1 = net.GivenTensorFill([], "X1", shape=shape, values=data1, name="X1")
    X2 = net.GivenTensorFill([], "X2", shape=shape, values=data2, name="X2")
    net.Sum([X1, X2], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.array_equal(f_result, workspace.FetchBlob("Y")))
예제 #6
0
def fc_example():
    # Caffe2 - network creation
    net = core.Net("net")

    X = net.GivenTensorFill([], "X", shape=[2, 2], values=[1.0, 2.0, 3.0, 4.0], name="X")
    W = net.GivenTensorFill([], "W", shape=[1, 2], values=[5.0, 6.0], name="W")
    b = net.ConstantFill([], ["b"], shape=[1, ], value=0.5, run_once=0, name="b")
    X.FC([W, b], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.ResetWorkspace()
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    f_result = ngt.make_transformer().computation(f_ng)()

    # compare Caffe2 and ngraph results
    print("Caffe2 result: {}:\n{}".format("Y", workspace.FetchBlob("Y")))
    print("ngraph result: {}:\n{}".format("Y", f_result))
    assert(np.array_equal(f_result, workspace.FetchBlob("Y")))
예제 #7
0
def test_constant():
    workspace.ResetWorkspace()

    shape = [10, 10]
    val = random.random()
    net = core.Net("net")
    net.ConstantFill([], ["Y"], shape=shape, value=val, run_once=0, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.ma.allequal(f_result, workspace.FetchBlob("Y")))
        assert (np.isclose(f_result[0][0], val, atol=1e-6, rtol=0))
예제 #8
0
def test_NHWC2NCHW():
    workspace.ResetWorkspace()

    # NHWC
    shape = [2, 3, 4, 5]
    data1 = [float(i) for i in range(np.prod(shape))]

    net = core.Net("net")
    X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
    X.NCHW2NHWC([], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.array_equal(f_result, workspace.FetchBlob("Y")))
예제 #9
0
def test_AveragedLoss():
    workspace.ResetWorkspace()
    shape = (32, )

    net = core.Net("net")
    X = net.GivenTensorFill([],
                            "Y",
                            shape=shape,
                            values=np.random.uniform(-1, 1, shape))
    X.AveragedLoss([], ["loss"])

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("loss")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert (np.allclose(f_result,
                            workspace.FetchBlob("loss"),
                            equal_nan=False))
예제 #10
0
def test_convolution_nhwc():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [batch, input_feature_map, spatial, output_feature_map, kernel, stride, c2_pad_type]
    param_list = [
        [1, 3, 2, 1, 2, 2, caffe2_legacy_pb2.NOTSET],
        [1, 1, 4, 1, 2, 2, caffe2_legacy_pb2.NOTSET],
        [2, 3, 8, 1, 2, 2, caffe2_legacy_pb2.NOTSET],
        [8, 2, 5, 4, 3, 1, caffe2_legacy_pb2.NOTSET],
        [1, 2, 5, 2, 3, 1, caffe2_legacy_pb2.NOTSET],
        [8, 3, 4, 4, 3, 3, caffe2_legacy_pb2.VALID],
        [12, 6, 5, 5, 4, 3, caffe2_legacy_pb2.VALID],
        [8, 3, 4, 4, 3, 3, caffe2_legacy_pb2.SAME],
        [12, 6, 5, 5, 4, 3, caffe2_legacy_pb2.SAME]
    ]

    for param_iter in param_list:
        n, ifm, spatial, ofm, kernel, stride, pad_type = param_iter

        shape_x = (n, spatial, spatial, ifm)
        shape_w = (ofm, kernel, kernel, ifm)
        shape_b = (ofm, )

        data_x = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_x))]
        data_w = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_w))]
        data_b = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_b))]

        net = core.Net("net")
        X = net.GivenTensorFill([], ["X"], shape=shape_x, values=data_x, name="X")
        W = net.GivenTensorFill([], ["W"], shape=shape_w, values=data_w, name="W")
        B = net.GivenTensorFill([], ["B"], shape=shape_b, values=data_b, name="B")

        net.Conv([X, W, B], 'Y', kernel=kernel, stride=stride, order='NHWC', legacy_pad=pad_type)

        # Execute via Caffe2
        workspace.RunNetOnce(net)

        # Import caffe2 network into ngraph
        importer = C2Importer()
        importer.parse_net_def(net.Proto(), verbose=False)

        # Get handle
        f_ng = importer.get_op_handle("Y")

        # Execute
        with ExecutorFactory() as ex:
            f_result = ex.executor(f_ng)()

            # print("Caffe2 result: {}:\n{}".format("Y", workspace.FetchBlob("Y")))
            # print("ngraph result: {}:\n{}".format("Y", f_result))
            # compare Caffe2 and ngraph results
            assert(np.allclose(f_result, workspace.FetchBlob("Y"), atol=1e-4, rtol=1e-3,
                               equal_nan=False))
예제 #11
0
def sum_example():
    # Caffe2 - network creation
    net = core.Net("net")
    shape = (2, 2, 2)

    A = net.GivenTensorFill([],
                            "A",
                            shape=shape,
                            values=np.random.uniform(-5, 5, shape),
                            name="A")
    B = net.GivenTensorFill([],
                            "B",
                            shape=shape,
                            values=np.random.uniform(-5, 5, shape),
                            name="B")
    C = net.GivenTensorFill([],
                            "C",
                            shape=shape,
                            values=np.random.uniform(-5, 5, shape),
                            name="C")
    A.Sum([B, C], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.ResetWorkspace()
    workspace.RunNetOnce(net)

    # Execute in numpy
    a = workspace.FetchBlob("A")
    b = workspace.FetchBlob("B")
    c = workspace.FetchBlob("C")

    np_result = np.sum([a, b, c], axis=0)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute in ngraph
    f_result = ngt.make_transformer().computation(f_ng)()

    # compare numpy, Caffe2 and ngraph results
    print("Caffe2 result: \n{}\n".format(workspace.FetchBlob("Y")))
    print("ngraph result: \n{}\n".format(f_result))
    print("numpy result: \n{}\n".format(np_result))

    assert (np.allclose(f_result, workspace.FetchBlob("Y")))
    assert (np.allclose(f_result, np_result))
예제 #12
0
def test_avgpool():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [[shape], kernel, stride, caffe_padding_type]
    param_list = [[[1, 3, 10, 10], 2, 2, caffe2_legacy_pb2.NOTSET],
                  [[2, 3, 5, 5], 1, 1, caffe2_legacy_pb2.NOTSET],
                  [[2, 2, 7, 7], 3, 2, caffe2_legacy_pb2.NOTSET],
                  [[8, 5, 8, 8], 4, 4, caffe2_legacy_pb2.NOTSET],
                  [[8, 3, 4, 4], 3, 3, caffe2_legacy_pb2.VALID],
                  [[12, 6, 5, 5], 4, 3, caffe2_legacy_pb2.VALID],
                  [[8, 3, 4, 4], 3, 3, caffe2_legacy_pb2.SAME],
                  [[12, 6, 5, 5], 4, 3, caffe2_legacy_pb2.SAME]]

    for param_iter in param_list:
        shape, kernel, stride, pad_type = param_iter
        data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]

        net = core.Net("net")
        net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
        net.AveragePool('X',
                        'Y',
                        kernel=kernel,
                        stride=stride,
                        legacy_pad=pad_type)

        # Execute via Caffe2
        workspace.RunNetOnce(net)

        # Import caffe2 network into ngraph
        importer = C2Importer()
        importer.parse_net_def(net.Proto(), verbose=False)

        # Get handle
        f_ng = importer.get_op_handle("Y")

        # Execute
        with ExecutorFactory() as ex:
            f_result = ex.executor(f_ng)()

            # compare Caffe2 and ngraph results
            assert (np.allclose(f_result,
                                workspace.FetchBlob("Y"),
                                atol=1e-4,
                                rtol=1e-3,
                                equal_nan=False))
예제 #13
0
def test_convolution_nchw_no_pad_no_bias():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [batch, input_feature_map, spatial, output_feature_map, kernel, stride]
    n, ifm, spatial, ofm, kernel, stride = [2, 3, 8, 1, 2, 2]

    shape_x = (n, ifm, spatial, spatial)
    shape_w = (ofm, ifm, kernel, kernel)
    shape_b = (ofm, )

    data_x = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_x))]
    data_w = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_w))]
    data_b = [0. for i in range(np.prod(shape_b))]

    net = core.Net("net")
    X = net.GivenTensorFill([], ["X"], shape=shape_x, values=data_x, name="X")
    W = net.GivenTensorFill([], ["W"], shape=shape_w, values=data_w, name="W")
    B = net.GivenTensorFill([], ["B"], shape=shape_b, values=data_b, name="B")

    net.Conv([X, W, B], 'Y', kernel=kernel, stride=stride, order='NCHW')

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.allclose(f_result,
                            workspace.FetchBlob("Y"),
                            atol=1e-4,
                            rtol=1e-3,
                            equal_nan=False))
예제 #14
0
def test_gaussianfill():
    workspace.ResetWorkspace()

    # Size of test matrix
    N = 100
    shape = [N, N]

    net = core.Net("net")
    net.GaussianFill([], ["Y"], shape=shape, mean=0.0, std=1.0, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # get caffe result
        caffe_res = workspace.FetchBlob("Y")

        # Elementwise difference of the two random matrixes
        difference_res = caffe_res - f_result

        # standard deviation of Difference Matrix
        diffe_res_std = difference_res.std()

        # testing can only be approximate (so in rare cases may fail!!)
        # if fails once try to re-run a couple of times to make sure there is a problem)
        # the difference must be still gaussian and P(|m'-m|)<3*std = 99.73%, and
        # std(m) = std/N, having N*N elements
        assert (np.isclose(difference_res.mean(),
                           0,
                           atol=3 * diffe_res_std / N,
                           rtol=0))
예제 #15
0
def run_all_close_compare_initiated_with_random_gauss(c2_op_name,
                                                      shape=None,
                                                      data=None,
                                                      expected=None):
    workspace.ResetWorkspace()
    if not shape:
        shape = [2, 7]
    if not data:
        data = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]

    net = core.Net("net")
    net.GivenTensorFill([], "X", shape=shape, values=data, name="X")
    getattr(net, c2_op_name)(["X"], ["Y"], name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        c2_y = workspace.FetchBlob("Y")

        # compare Caffe2 and ngraph results
        assert (np.allclose(f_result, c2_y, atol=1e-4, rtol=0,
                            equal_nan=False))

        # compare expected results and ngraph results
        if expected:
            assert (np.allclose(f_result,
                                expected,
                                atol=1e-3,
                                rtol=0,
                                equal_nan=False))
예제 #16
0
def test_uniformfill():
    workspace.ResetWorkspace()

    # Size of test matrix
    N = 100
    shape = [N, N]
    net = core.Net("net")
    net.UniformFill([], ["Y"], shape=shape, min=-2., max=2., name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # get caffe result
        caffe_res = workspace.FetchBlob("Y")

        # Elementwise difference of the two random matrixes
        difference_res = caffe_res - f_result

        # standard deviation of Difference Matrix
        diffe_res_std = difference_res.std()

        # testing can only be approximated, so sometimes can fail!!
        # approach mimicking gaussian test, and this time the multiplier is set to 5
        # to account for distorsion from gaussian
        # if fails once try to re-run a couple of times to make sure there is a problem)
        assert (np.isclose(difference_res.mean(),
                           0,
                           atol=5 * diffe_res_std / N,
                           rtol=0))
예제 #17
0
파일: fc.py 프로젝트: kkasravi/ngraph
X = net.GivenTensorFill([],
                        "X",
                        shape=[2, 2],
                        values=[1.0, 2.0, 3.0, 4.0],
                        name="X")
W = net.GivenTensorFill([], "W", shape=[1, 2], values=[5.0, 6.0], name="W")
b = net.ConstantFill([], ["b"], shape=[
    1,
], value=0.5, run_once=0, name="b")
Y = X.FC([W, b], ["Y"], name="Y")

# Execute via Caffe2
workspace.ResetWorkspace()
workspace.RunNetOnce(net)

# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)

# Get handle
f_ng = importer.get_op_handle("Y")

# Execute
f_result = ngt.make_transformer().computation(f_ng)()

# compare Caffe2 and ngraph results
print("Caffe2 result: {}:\n{}".format("Y", workspace.FetchBlob("Y")))
print("ngraph result: {}:\n{}".format("Y", f_result))
assert (np.array_equal(f_result, workspace.FetchBlob("Y")))
def linear_regression(iter_num, lrate, gamma, step_size, noise_scale):
    # data multiplier
    m = 3
    # batch_len and data
    xs_np = np.array(
        [[0, 0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [-1.0, -1.0]], dtype='f')
    ys_np = np.array([[0.5 * m], [2.5 * m], [4.5 * m], [6.5 * m], [-1.5 * m]],
                     dtype='f')
    batch_len = len(ys_np)

    # with these values we have the following target weight and bias
    # to be approximated after computation:
    target_b = 0.5 * m
    target_w = np.array([1.0, 1.0]) * m

    # noise amplitude and noise generation
    noise_l = np.array(noise_scale * np.random.randn(batch_len), dtype='f')
    noise = [[i] for i in noise_l]

    # caffe2 init network
    init_net = core.Net("init")
    ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
    ITER = init_net.ConstantFill([],
                                 "ITER",
                                 shape=[1],
                                 value=0,
                                 dtype=core.DataType.INT32)

    # for the parameters to be learned: we randomly initialize weight
    # being output scalar, and two variables, W is 1x2, X is 2x1
    W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
    B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
    print('Created init net.')

    # caffe2 train net
    train_net = core.Net("train")

    # definition of external inputs: X, ground truth and noisy version of truth
    workspace.FeedBlob('X', xs_np)
    workspace.FeedBlob('Y_gt', ys_np)
    workspace.FeedBlob('Y_noise', ys_np + noise)
    train_net.AddExternalInput("X")
    train_net.AddExternalInput("Y_noise")
    train_net.AddExternalInput("Y_gt")

    # now, for the normal linear regression prediction, this is all we need.
    Y_pred = train_net.FC(["X", W, B], "Y_pred")

    # when it will be computing the loss, we want to refer to the noisy version of the truth:
    dist = train_net.SquaredL2Distance(["Y_noise", Y_pred], "dist")
    loss = dist.AveragedLoss([], ["loss"])

    # Caffe2 creation of the initialization and training nets, needed to have objects created
    # and therefore handlers can be obtained by the importer
    workspace.CreateNet(init_net)
    workspace.CreateNet(train_net)

    # importing in ngraph caffe2 network
    print("\n\n---------------------ngraph behaviour:")
    importer = C2Importer()
    importer.parse_net_def(net_def=train_net.Proto(),
                           init_net_def=init_net.Proto(),
                           c2_workspace=workspace)

    # Get handles to the various objects we are interested to for ngraph computation
    y_gt_ng, x_ng, w_ng, b_ng, y_pred_ng, dist_ng, loss_ng =  \
        importer.get_op_handle(['Y_noise', 'X', 'W', 'B', 'Y_pred', 'dist', 'loss'])

    # setting learning rate for ngraph, that matches the one that it will be used for caffe2 below
    lr_params = {
        'name': 'step',
        'base_lr': lrate,
        'gamma': gamma,
        'step': step_size
    }

    SGD = util.CommonSGDOptimizer(lr_params)
    parallel_update = SGD.minimize(loss_ng, [w_ng, b_ng])
    transformer = ngt.make_transformer()
    update_fun = transformer.computation(
        [loss_ng, w_ng, b_ng, parallel_update], x_ng, y_gt_ng,
        SGD.get_iter_buffer())

    true_iter = [0]
    # ngraph actual computation
    for i in range(iter_num // batch_len):
        for xs, ys in zip(xs_np, ys_np + noise):
            loss_val, w_val, b_val, _ = update_fun(xs, ys, i)
            # print("N it: %s W: %s, B: %s loss %s " % (i, w_val, b_val, loss_val))
            true_iter[0] += 1

    print("Ngraph loss %s " % (loss_val))

    # end of ngraph part

    # caffe2 backward pass and computation to compare results with ngraph
    gradient_map = train_net.AddGradientOperators([loss])

    # Increment the iteration by one.
    train_net.Iter(ITER, ITER)

    # Caffe2 backward pass and computation
    # Get gradients for all the computations above and do the weighted sum
    LR = train_net.LearningRate(ITER,
                                "LR",
                                base_lr=-lrate,
                                policy="step",
                                stepsize=step_size,
                                gamma=gamma)
    train_net.WeightedSum([W, ONE, gradient_map[W], LR], W)
    train_net.WeightedSum([B, ONE, gradient_map[B], LR], B)
    workspace.RunNetOnce(init_net)
    workspace.CreateNet(train_net)

    for i in range(iter_num):
        workspace.RunNet(train_net.Proto().name)
        # print("During training, loss is: {}".format(workspace.FetchBlob("loss")))

    print("Caffe2 loss is: {}".format(workspace.FetchBlob("loss")))
    # end of caffe2 part

    # printing out results
    print(
        "Done {} iterations over the batch data, with noise coefficient set to {}"
        .format(iter_num, noise_scale))
    print("Caffe2 after training, W is: {}".format(workspace.FetchBlob("W")))
    print("Caffe2 after training, B is: {}".format(workspace.FetchBlob("B")))
    print("Ngraph after training, W is: {}".format(w_val))
    print("Ngraph after training, B is: {}".format(b_val))
    print("Target W was: {}".format(target_w))
    print("Target B was: {}".format(target_b))

    assert (workspace.FetchBlob("loss") < 0.01)
    assert (loss_val < 0.01)
예제 #19
0
def mnist_mlp(args):
    mnist = input_data.read_data_sets(args.data_dir, one_hot=False)

    train_x, train_y = mnist.train.next_batch(args.batch)
    # we have to feed blobs with some data, to give them valid shape,
    # because ngraph will import this shape
    workspace.FeedBlob('train_x', train_x)
    # currently caffe2 accepts only int32 data type
    workspace.FeedBlob('train_y', train_y.astype('int32'))

    init_net = core.Net('init')
    main_net = core.Net('main')

    # definition of number of neurons for each hidden layer
    fc_size = [784, 512, 128, 10]
    init_net.UniformFill([], 'fc_w1', shape=[fc_size[1], fc_size[0]], min=-.5, max=.5)
    init_net.UniformFill([], 'fc_w2', shape=[fc_size[2], fc_size[1]], min=-.5, max=.5)
    init_net.UniformFill([], 'fc_w3', shape=[fc_size[3], fc_size[2]], min=-.5, max=.5)
    init_net.UniformFill([], 'fc_b1', shape=[fc_size[1]], min=-.5, max=.5)
    init_net.UniformFill([], 'fc_b2', shape=[fc_size[2]], min=-.5, max=.5)
    init_net.UniformFill([], 'fc_b3', shape=[fc_size[3]], min=-.5, max=.5)

    main_net.FC(['train_x', 'fc_w1', 'fc_b1'], 'FC1')
    main_net.Relu('FC1', 'activ1')
    main_net.FC(['activ1', 'fc_w2', 'fc_b2'], 'FC2')
    main_net.Relu('FC2', 'activ2')
    main_net.FC(['activ2', 'fc_w3', 'fc_b3'], 'FC3')
    main_net.Softmax('FC3', 'softmax')
    main_net.LabelCrossEntropy(['softmax', 'train_y'], 'xent')
    main_net.AveragedLoss('xent', 'loss')

    # Ngraph part
    if ng_on:
        print('>>>>>>>>>>>>>> Ngraph')
        # import graph_def
        importer = C2Importer()
        importer.parse_net_def(net_def=main_net.Proto(),
                               init_net_def=init_net.Proto(),
                               c2_workspace=workspace)

        # get handle of ngraph ops
        x_train_ng, y_train_ng, loss_ng, \
            fc_w1_ng, fc_w2_ng, fc_w3_ng, fc_b1_ng, fc_b2_ng, fc_b3_ng = importer.get_op_handle(
                ['train_x', 'train_y', 'loss',
                 'fc_w1', 'fc_w2', 'fc_w3', 'fc_b1', 'fc_b2', 'fc_b3'])

        # setting learning rate for ngraph,
        # that matches the one that it will be used for caffe2 below
        alpha = ng.placeholder(axes=(), initial_value=[args.lrate])

        # transformer and computations
        parallel_update = util.CommonSGDOptimizer(args.lrate) \
            .minimize(loss_ng, [fc_w1_ng, fc_w2_ng, fc_w3_ng, fc_b1_ng, fc_b2_ng, fc_b3_ng])
        transformer = ngt.make_transformer()
        update_fun = transformer.computation(
            [loss_ng, parallel_update], alpha, x_train_ng, y_train_ng)

        # train
        # ngraph actual computation
        for i in range(args.max_iter):
            train_x, train_y = mnist.train.next_batch(args.batch)
            lr = args.lrate * (1 + args.gamma * i) ** (-args.power)
            loss_val, _ = update_fun(lr, train_x, train_y)
            if args.verbose and i % log_interval == 0:
                print('iter %s, loss %s ' % (i, loss_val))
    # ======================================
    if c2_on:
        mnist = input_data.read_data_sets(args.data_dir, one_hot=False)
        print('>>>>>>>>>>>>>> Caffe')
        # caffe2 backward pass and computation to compare results with ngraph
        init_net.ConstantFill([], 'ONE', shape=[1], value=1.)
        init_net.ConstantFill([], 'ITER', shape=[1], value=0, dtype=core.DataType.INT32)
        gradient_map = main_net.AddGradientOperators(['loss'])

        # Increment the iteration by one.
        main_net.Iter('ITER', 'ITER')

        # Caffe2 backward pass and computation
        # Get gradients for all the computations above and do the weighted sum
        main_net.LearningRate('ITER', 'LR', base_lr=-args.lrate, policy='inv',
                              power=args.power, gamma=args.gamma)

        main_net.WeightedSum(['fc_w1', 'ONE', gradient_map['fc_w1'], 'LR'], 'fc_w1')
        main_net.WeightedSum(['fc_w2', 'ONE', gradient_map['fc_w2'], 'LR'], 'fc_w2')
        main_net.WeightedSum(['fc_w3', 'ONE', gradient_map['fc_w3'], 'LR'], 'fc_w3')
        main_net.WeightedSum(['fc_b1', 'ONE', gradient_map['fc_b1'], 'LR'], 'fc_b1')
        main_net.WeightedSum(['fc_b2', 'ONE', gradient_map['fc_b2'], 'LR'], 'fc_b2')
        main_net.WeightedSum(['fc_b3', 'ONE', gradient_map['fc_b3'], 'LR'], 'fc_b3')
        workspace.RunNetOnce(init_net)
        workspace.CreateNet(main_net)

        for i in range(args.max_iter):
            train_x, train_y = mnist.train.next_batch(args.batch)
            workspace.FeedBlob('train_x', train_x)
            workspace.FeedBlob('train_y', train_y.astype('int32'))
            workspace.RunNet(main_net.Proto().name)
            if args.verbose and i % log_interval == 0:
                print('Iter: {}, C2 loss is: {}'.format(i, workspace.FetchBlob('loss')))
        # end of caffe2 part

    if ng_on:
        print('Ngraph loss is: %s' % loss_val)
    if c2_on:
        print('Caffe2 loss is: {}'.format(workspace.FetchBlob('loss')))