def assert_cntk_ngraph_array_equal(cntk_op):
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret)
def assert_cntk_ngraph_flat_isclose(cntk_op):
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.isclose(cntk_ret.flatten(), ng_ret.flatten()).all()
Exemple #3
0
def test_element_times_2():
    cntk_op = C.element_times([[1, 2, 3], [4, 5, 6]], [7, 8, 9])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret)
Exemple #4
0
def test_minus_3():
    cntk_op = C.minus([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret)
Exemple #5
0
def test_times_6():
    cntk_op = C.times([[1, 2], [3, 4], [5, 6]], [[7, 8, 9], [10, 11, 12]])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equiv(cntk_ret, ng_ret)
Exemple #6
0
def test_times_4():
    cntk_op = C.times([[1, 2, 3], [4, 5, 6]], [[7], [8], [9]])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equiv(cntk_ret, ng_ret)
Exemple #7
0
def test_tanh_2():
    cntk_op = C.tanh([0.])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.isclose(cntk_ret, ng_ret).all()
Exemple #8
0
def test_exp_1():
    cntk_op = C.exp([-2, -1., 0., 1., 2.])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.isclose(cntk_ret, ng_ret).all()
Exemple #9
0
def test_exp_3():
    cntk_op = C.exp([-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.isclose(cntk_ret, ng_ret).all()
Exemple #10
0
def test_relu_1():
    cntk_op = C.relu([-2, -1., 0., 1., 2.])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret)
Exemple #11
0
def test_relu_5():
    cntk_op = C.relu([[-3, -2, -1], [1, 2, 3]])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret)
Exemple #12
0
def test_relu_3():
    cntk_op = C.relu(
        [-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret)
Exemple #13
0
def _create_model_and_execute_test(params):
    # Create CNTK model
    input_var = C.input_variable(params['input_dim'], np.float32)
    params['input_var'] = input_var
    params['act_fun'] = C.layers.blocks.identity
    params['init_fun'] = C.glorot_uniform()

    model = params['create_model'](params)

    label_var = C.input_variable((params['label_dim']), np.float32)
    loss = C.cross_entropy_with_softmax(model, label_var)
    eval_error = C.classification_error(model, label_var)

    lr_schedule = C.learning_rate_schedule(0.05, C.UnitType.minibatch)
    learner = C.sgd(model.parameters, lr_schedule)
    trainer = C.Trainer(model, (loss, eval_error), [learner])

    input_value, label_value = _generate_random_sample(
        params['batch_size'],
        params['input_dim'],
        params['label_dim']
    )

    # Import to ngraph
    ng_loss, placeholders = CNTKImporter(batch_size=params['batch_size']).import_model(loss)
    parallel_update = CommonSGDOptimizer(0.05).minimize(ng_loss, ng_loss.variables())

    transformer = ng.transformers.make_transformer()
    update_fun = transformer.computation([ng_loss, parallel_update], *placeholders)

    # Execute on CNTK
    trainer.train_minibatch({input_var: input_value, label_var: label_value})
    cntk_ret = trainer.previous_minibatch_loss_average

    # Execute on ngraph
    input_value = np.moveaxis(input_value, 0, -1)
    label_value = np.moveaxis(label_value, 0, -1)
    ng_ret = update_fun(input_value, label_value)[0]

    return cntk_ret, ng_ret
def load_and_score(cifar_dir, model_file):
    map_file = os.path.join(cifar_dir, 'test_map.txt')
    if not os.path.exists(map_file):
        raise RuntimeError("This example require prepared dataset. \
         Please run cifar_prepare.py example.")

    trained_network = os.path.join(cifar_dir, model_file)
    if not os.path.exists(trained_network):
        raise RuntimeError("This example require trained network. \
         Please run cifar_training.py example.")

    cntk_model = C.ops.functions.load_model(trained_network)
    ng_model, ng_placeholders = CNTKImporter().import_model(cntk_model)
    eval_fun = ng.transformers.make_transformer().computation(
        ng_model, *ng_placeholders)

    cntk_results = []
    ng_results = []
    same_predictions = []
    for line in open(map_file):
        image_file, label = line.split()

        rgb_image = np.asarray(Image.open(image_file), dtype="float32")
        pic = np.ascontiguousarray(np.moveaxis(rgb_image, 2, 0))

        cntk_predictions = np.squeeze(
            cntk_model.eval({cntk_model.arguments[0]: [pic]}))
        cntk_results.append(int(np.argmax(cntk_predictions)) == int(label))

        ng_predictions = eval_fun(pic)
        ng_results.append(int(np.argmax(ng_predictions)) == int(label))

        same_predictions.append(np.allclose(cntk_predictions, ng_predictions))

    test_size = len(ng_results)
    if np.all(same_predictions):
        print(
            'CNTK and ngraph predictions identical. Prediction correctness - {0:.2f}%.'
            .format(np.count_nonzero(ng_results) * 100 / test_size))
    else:
        print('CNTK prediction correctness - {0:.2f}%'.format(
            np.count_nonzero(cntk_results) * 100 / test_size))
        print('ngraph prediction correctness - {0:.2f}%'.format(
            np.count_nonzero(cntk_results) * 100 / test_size))
    print("")
def load_and_score(mnist_dir):
    jpg_files = glob.glob(os.path.join(mnist_dir, 'jpg/*.jpg'))
    if not jpg_files:
        raise RuntimeError(
            "This example require a dataset. Please download and extract the MNIST jpg files."
        )

    trained_network = os.path.join(mnist_dir, "MNIST.dnn")
    if not os.path.exists(trained_network):
        raise RuntimeError(
            "This example require trained network. Please run mnist_training.py example."
        )

    cntk_model = C.ops.functions.load_model(trained_network)

    ng_model, ng_placeholders = CNTKImporter().import_model(cntk_model)
    eval_fun = ng.transformers.make_transformer().computation(ng_model, *ng_placeholders)

    cntk_results = []
    ng_results = []
    for filename in jpg_files:
        label = int(os.path.basename(filename).split("_")[1])

        rgb_image = np.asarray(Image.open(filename), dtype="float32")
        pic = np.ascontiguousarray(rgb_image).flatten()

        cntk_predictions = np.squeeze(
            cntk_model.eval({cntk_model.arguments[0]: [pic]})
        )
        cntk_results.append(int(np.argmax(cntk_predictions) == label))

        ng_predictions = eval_fun(pic)
        ng_results.append(int(np.argmax(ng_predictions) == label))

    test_size = len(jpg_files)
    print('CNTK prediction correctness - {0:.2f}%'.format(
        np.count_nonzero(cntk_results) * 100 / test_size
    ))
    print('ngraph prediction correctness - {0:.2f}%'.format(
        np.count_nonzero(ng_results) * 100 / test_size
    ))
Exemple #16
0
def load_and_score(mnist_jpg_dir):
    cntk_model = C.ops.functions.load_model("/tmp/data/MNIST/MNIST.dnn")

    ng_model, ng_placeholders = CNTKImporter().import_model(cntk_model)
    eval_fun = ng.transformers.make_transformer().computation(
        ng_model, *ng_placeholders)

    for filename in glob.glob(os.path.join(mnist_jpg_dir, '*.jpg')):
        rgb_image = np.asarray(Image.open(filename), dtype="float32")
        pic = np.ascontiguousarray(rgb_image).flatten()

        cntk_predictions = np.squeeze(
            cntk_model.eval({cntk_model.arguments[0]: [pic]}))
        cntk_top_class = np.argmax(cntk_predictions)

        ng_predictions = eval_fun(pic)
        ng_top_class = np.argmax(ng_predictions)

        actual_number = os.path.basename(filename).split("_")[1]
        print("Digit in jpg file: " + actual_number)
        print("\tCNTK prediction:   " + str(cntk_top_class))
        print("\tngraph prediction: " + str(ng_top_class))
        print("")
Exemple #17
0
    label = C.blocks.Input(num_output_classes)

    z = fully_connected_classifier_net(input, num_output_classes,
                                       hidden_layers_dim, num_hidden_layers,
                                       C.sigmoid)
    loss = C.cross_entropy_with_softmax(z, label)
    eval_error = C.ops.classification_error(z, label)

    learning_rate = 0.5
    lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.sample)

    learner = C.learner.sgd(z.parameters, lr_schedule)
    trainer = C.Trainer(z, (loss, eval_error), [learner])

    # ngraph
    L, placeholders = CNTKImporter().import_model(loss)
    parallel_update = CommonSGDOptimizer(learning_rate).minimize(L, L.variables())

    transformer = ng.transformers.make_transformer()
    update_fun = transformer.computation([L, parallel_update], *placeholders)

    # CNTK training
    for i in range(0, number_of_iterations):
        for xs, ys in zip(features, labels):
            trainer.train_minibatch({input: [xs], label: [ys]})
        training_loss = C.utils.get_train_loss(trainer)
        print("cntk iteration {0} -> loss: {1}".format(i, training_loss))

    # ngraph training
    for i in range(0, number_of_iterations):
        for xs, ys in zip(features, labels):
    output_dim = num_output_classes
    z = linear_layer(input, output_dim)

    label = C.input_variable((num_output_classes), np.float32)
    loss = C.cross_entropy_with_softmax(z, label)
    eval_error = C.classification_error(z, label)

    learning_rate = 0.05
    lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.sample)

    learner = C.sgd(z.parameters, lr_schedule)
    trainer = C.Trainer(z, (loss, eval_error), [learner])

    # ngraph
    L, placeholders = CNTKImporter().import_model(loss)
    parallel_update = CommonSGDOptimizer(learning_rate).minimize(
        L, L.variables())

    transformer = ng.transformers.make_transformer()
    update_fun = transformer.computation([L, parallel_update], *placeholders)

    # CNTK training
    for i in range(0, number_of_iterations):
        for xs, ys in zip(features, labels):
            trainer.train_minibatch({input: [xs], label: [ys]})
        training_loss = trainer.previous_minibatch_loss_average
        print("cntk iteration {0} -> loss: {1}".format(i, training_loss))

    # ngraph training
    for i in range(0, number_of_iterations):
Exemple #19
0
def train_and_evaluate(reader_train, reader_test, max_epochs, model_func):
    # ======================================================================================
    # Creating
    # ======================================================================================
    input_var = C.input((num_channels, image_height, image_width))
    feature_scale = 1.0 / 256.0
    input_var_norm = C.element_times(feature_scale, input_var)

    cntk_model = model_func(input_var_norm, num_classes)

    label_var = C.input((num_classes))
    loss = C.cross_entropy_with_softmax(cntk_model, label_var)
    error = C.classification_error(cntk_model, label_var)

    minibatch_size = 64
    learning_rate = 0.01
    momentum = 0.9

    lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch)
    learner = C.momentum_sgd(cntk_model.parameters, lr_schedule,
                             C.momentum_schedule(momentum))
    trainer = C.Trainer(cntk_model, (loss, error), [learner])

    ng_model, ng_placeholders = CNTKImporter(
        batch_size=minibatch_size).import_model(cntk_model)
    ng_labels = ng.placeholder(
        [ng.make_axis(num_classes),
         ng.make_axis(minibatch_size, 'N')])
    ng_placeholders.append(ng_labels)

    transformer = ng.transformers.make_transformer()

    ng_loss = create_loss_and_learner(ng_model, ng_labels, learning_rate,
                                      momentum)
    training_fun = transformer.computation(ng_loss, *ng_placeholders)

    ng_error = classification_error(ng_model, ng_labels)
    test_fun = transformer.computation(ng_error, *ng_placeholders)

    # ======================================================================================
    # Training
    # ======================================================================================
    epoch_size = 50000
    input_map = {
        input_var: reader_train.streams.features,
        label_var: reader_train.streams.labels
    }

    num_minibatches_to_train = (epoch_size * max_epochs) / minibatch_size
    for _ in range(0, int(num_minibatches_to_train)):
        data = reader_train.next_minibatch(minibatch_size, input_map=input_map)
        trainer.train_minibatch(data)

        features_batch = np.moveaxis(np.squeeze(data[input_var].asarray()), 0,
                                     -1)
        labels_batch = np.moveaxis(
            data[label_var].data.data.slice_view(
                [0, 0, 0], [minibatch_size, num_classes]).asarray().todense(),
            0, -1)
        training_fun(features_batch, labels_batch)

    # ======================================================================================
    # Evaluation
    # ======================================================================================
    cntk_results = 0.0
    ng_results = 0.0
    epoch_size = 10000
    input_map = {
        input_var: reader_test.streams.features,
        label_var: reader_test.streams.labels
    }

    num_minibatches_to_test = epoch_size // minibatch_size
    for _ in range(num_minibatches_to_test):
        data = reader_test.next_minibatch(minibatch_size, input_map=input_map)
        cntk_results += trainer.test_minibatch(data)

        features_batch = np.moveaxis(np.squeeze(data[input_var].asarray()), 0,
                                     -1)
        labels_batch = np.moveaxis(
            data[label_var].data.data.slice_view([0, 0, 0],
                                                 [64, 10]).asarray().todense(),
            0, -1)
        ng_results += test_fun(features_batch, labels_batch)

    print("CNTK results: {0:.2f}%".format(
        (cntk_results * 100.0) / num_minibatches_to_test))
    print("ngraph results: {0:.2f}%".format(
        (ng_results * 100.0) / num_minibatches_to_test))
    print("")

    return C.softmax(cntk_model)
def train_and_test(data_dir):
    train_file = os.path.join(data_dir, "Train-28x28_cntk_text.txt")
    test_file = os.path.join(data_dir, "Test-28x28_cntk_text.txt")

    input_dim = 784
    output_dim = 10

    input_var = C.input(input_dim)
    label_var = C.input(output_dim)

    cntk_model = create_model(input_var / 256.0, 2, 400, output_dim)

    cntk_loss = C.cross_entropy_with_softmax(cntk_model, label_var)
    cntk_error = C.classification_error(cntk_model, label_var)

    learning_rate = 0.2
    lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch)
    learner = C.sgd(cntk_model.parameters, lr_schedule)
    trainer = C.Trainer(cntk_model, (cntk_loss, cntk_error), [learner])

    batch_size = 64

    # ngraph import begin ==================================================================
    ng_model, ng_placeholders = CNTKImporter(batch_size=batch_size).import_model(cntk_model)

    ng_labels = ng.placeholder([ng.make_axis(output_dim), ng.make_axis(batch_size, 'N')])
    ng_placeholders.append(ng_labels)

    transformer = ng.transformers.make_transformer()

    ng_loss = cross_entropy_with_softmax(ng_model, ng_labels)
    parallel_update = CommonSGDOptimizer(learning_rate).minimize(ng_loss, ng_loss.variables())
    training_fun = transformer.computation([ng_loss, parallel_update], *ng_placeholders)

    ng_error = classification_error(ng_model, ng_labels)
    test_fun = transformer.computation(ng_error, *ng_placeholders)
    # ngraph import end ====================================================================

    reader_train = create_reader(train_file, True, input_dim, output_dim)
    train_input_map = {
        input_var: reader_train.streams.features,
        label_var: reader_train.streams.labels
    }

    num_samples = 60000
    num_epochs = 10
    num_minibatches_to_train = (num_samples * num_epochs) / batch_size
    for _ in range(0, int(num_minibatches_to_train)):
        data = reader_train.next_minibatch(batch_size, input_map=train_input_map)
        trainer.train_minibatch(data)

        # ngraph train
        features_batch = np.moveaxis(np.squeeze(data[input_var].asarray()), 0, -1)
        labels_batch = np.moveaxis(np.squeeze(data[label_var].asarray()), 0, -1)
        training_fun(features_batch, labels_batch)

    reader_test = create_reader(test_file, False, input_dim, output_dim)
    test_input_map = {
        input_var: reader_test.streams.features,
        label_var: reader_test.streams.labels
    }

    cntk_result = 0.0
    ng_error = 0.0
    num_samples = 10000
    num_minibatches_to_test = num_samples // batch_size
    for _ in range(num_minibatches_to_test):
        data = reader_test.next_minibatch(batch_size, input_map=test_input_map)
        cntk_result += trainer.test_minibatch(data)

        # ngraph test
        features_batch = np.moveaxis(np.squeeze(data[input_var].asarray()), 0, -1)
        labels_batch = np.moveaxis(np.squeeze(data[label_var].asarray()), 0, -1)
        ng_error += test_fun(features_batch, labels_batch)

    print("Average CNTK test error: {0:.2f}%".format(cntk_result * 100 / num_minibatches_to_test))
    print("Average ngraph test error: {0:.2f}%".format(ng_error * 100 / num_minibatches_to_test))

    C.softmax(cntk_model).save(os.path.join(MNIST, "MNIST.dnn"))
Exemple #21
0
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function

import cntk as C

import ngraph as ng
from ngraph.frontends.cntk.cntk_importer.importer import CNTKImporter

cntk_op = C.minus([1, 2, 3], [4, 5, 6])

ng_op, ng_placeholders = CNTKImporter().import_model(cntk_op)
results = ng.transformers.make_transformer().computation(ng_op)
print(results())