コード例 #1
0
ファイル: FastRCNN_test.py プロジェクト: OlegBoulanov/CNTK
def test_fastrcnn_grocery_training(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU') # it runs very slow in CPU
    try_set_default_device(cntk_device(device_id))

    from A1_GenerateInputROIs import generate_input_rois
    assert generate_input_rois(testing=True)

    # since we do not use a reader for evaluation we need unzipped data
    externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ

    if externalData:
        extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
        model_file = os.path.join(extPath, "PreTrainedModels", "AlexNet", "v0", "AlexNet.model")
    else:
        model_file = os.path.join(abs_path, *"../../../../Examples/Image/PretrainedModels/AlexNet.model".split("/"))

    from A2_RunWithPyModel import train_fast_rcnn, evaluate_fast_rcnn
    trained_model = train_fast_rcnn(model_path=model_file)

    assert evaluate_fast_rcnn(trained_model)

    from A3_ParseAndEvaluateOutput import evaluate_output
    assert evaluate_output()

    from B3_VisualizeOutputROIs import visualize_output_rois
    assert visualize_output_rois(testing=True)
コード例 #2
0
def test_feature_extraction(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU') # due to batch normalization in ResNet_18
    try_set_default_device(cntk_device(device_id))

    base_path = os.path.dirname(os.path.abspath(__file__))
    externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ
    if externalData:
        extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
        print("Reading data and model from %s" % extPath)
        model_file = os.path.join(extPath, *"PreTrainedModels/ResNet/v1/ResNet_18.model".split("/"))
        map_file = os.path.join(extPath, *"Image/CIFAR/v0/cifar-10-batches-py/test_map.txt".split("/"))
    else:
        model_file = os.path.join(base_path, *"../../../../PretrainedModels/ResNet_18.model".split("/"))
        map_file = os.path.join(base_path, *"../../../../Examples/Image/DataSets/CIFAR-10/test_map.txt".split("/"))

    original_dir = os.curdir
    try:
        os.chdir(os.path.join(os.path.dirname(map_file), '..'))

        minibatch_source = create_mb_source(224, 224, 3, map_file)
        node_name = "z.x"
        output_file = os.path.join(base_path, "layerOutput.txt")
        eval_and_write(model_file, node_name, output_file, minibatch_source, num_objects=2)

        expected_output_file = os.path.join(base_path, "feature_extraction_expected_output.txt")
        output = np.fromfile(output_file)
        expected_output = np.fromfile(expected_output_file)

        assert np.allclose(output, expected_output, atol=TOLERANCE_ABSOLUTE)
    finally:
        os.chdir(original_dir)
コード例 #3
0
ファイル: ucf11_conv3d_test.py プロジェクト: AllanYiin/CNTK
def test_ucf11_conv3d_error(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    try_set_default_device(cntk_device(device_id))

    prepare_UCF11_data()

    base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             *"../../../../Examples/Video/DataSets/UCF11".split("/"))

    base_path = os.path.normpath(base_path)

    from _cntk_py import set_computation_network_trace_level, set_fixed_random_seed, force_deterministic_algorithms
    set_computation_network_trace_level(1)
    set_fixed_random_seed(1)

    num_output_classes = 11
    train_reader = VideoReader(os.path.join(base_path, 'train_map.csv'), num_output_classes, True, 100)
    test_reader  = VideoReader(os.path.join(base_path, 'test_map.csv'), num_output_classes, False, 40)

    test_error = conv3d_ucf11(train_reader, test_reader, max_epochs=1)
    expected_test_error = 0.8

    assert np.allclose(test_error, expected_test_error,
                       atol=TOLERANCE_ABSOLUTE)
コード例 #4
0
def test_bn_inception_cifar(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    try_set_default_device(cntk_device(device_id))

    current_path = os.getcwd()
    base_path = prepare_CIFAR10_data()
    # change dir to locate data.zip correctly
    os.chdir(base_path)

    from _cntk_py import set_fixed_random_seed, force_deterministic_algorithms
    set_fixed_random_seed(1)
    force_deterministic_algorithms()

    mean_data = os.path.join(base_path, 'CIFAR-10_mean.xml')
    train_data = os.path.join(base_path, 'train_map.txt')
    test_data = os.path.join(base_path, 'test_map.txt')

    try:
        error = bn_inception_train_and_eval(train_data, test_data, mean_data, minibatch_size=16, epoch_size=500,
                                    max_epochs=8, restore=False, testing_parameters=(500,16))
    finally:
        os.chdir(current_path)

    expected_error = 0.88
    assert np.allclose(error, expected_error, atol=TOLERANCE_ABSOLUTE)
コード例 #5
0
ファイル: cifar_convnet_test.py プロジェクト: Microsoft/CNTK
def test_cifar_convnet_error(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    set_default_device(cntk_device(device_id))

    try:
        base_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'],
                                *"Image/CIFAR/v0/cifar-10-batches-py".split("/"))
        # N.B. CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY has {train,test}_map.txt
        #      and CIFAR-10_mean.xml in the base_path.
    except KeyError:
        base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                *"../../../../Examples/Image/DataSets/CIFAR-10".split("/"))

    base_path = os.path.normpath(base_path)
    os.chdir(os.path.join(base_path, '..'))

    from _cntk_py import set_computation_network_trace_level, set_fixed_random_seed, force_deterministic_algorithms
    set_computation_network_trace_level(1)
    set_fixed_random_seed(1)  # BUGBUG: has no effect at present  # TODO: remove debugging facilities once this all works
    #force_deterministic_algorithms()
    # TODO: do the above; they lead to slightly different results, so not doing it for now

    reader_train = create_reader(os.path.join(base_path, 'train_map.txt'), os.path.join(base_path, 'CIFAR-10_mean.xml'), True)
    reader_test  = create_reader(os.path.join(base_path, 'test_map.txt'), os.path.join(base_path, 'CIFAR-10_mean.xml'), False)

    test_error = convnet_cifar10_dataaug(reader_train, reader_test, max_epochs=1)
    expected_test_error = 0.617

    assert np.allclose(test_error, expected_test_error,
                       atol=TOLERANCE_ABSOLUTE)
コード例 #6
0
def test_inception_v3_imagenet(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    try_set_default_device(cntk_device(device_id))

    current_path = os.getcwd()
    base_path = prepare_ImageNet_data()
    # change dir to locate data.zip correctly
    os.chdir(base_path)

    from _cntk_py import set_fixed_random_seed, force_deterministic_algorithms
    set_fixed_random_seed(1)
    force_deterministic_algorithms()

    train_data = os.path.join(base_path, 'train_map.txt')
    test_data = os.path.join(base_path, 'val_map.txt')

    try:
        error = inception_v3_train_and_eval(train_data, test_data, minibatch_size=8, epoch_size=200,
                                            max_epochs=4, restore=False, testing_parameters=(200, 8))
    finally:
        os.chdir(current_path)

    expected_error = 0.99
    assert np.allclose(error, expected_error, atol=TOLERANCE_ABSOLUTE)
コード例 #7
0
ファイル: ConvNet_MNIST_test.py プロジェクト: AllanYiin/CNTK
def test_convnet_mnist_error(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    try_set_default_device(cntk_device(device_id))

    error = convnet_mnist(epoch_size=5000, minibatch_size=32, max_epochs=10)

    expected_error = 0.0226
    assert np.allclose(error, expected_error, atol=TOLERANCE_ABSOLUTE)
コード例 #8
0
ファイル: trainer_test.py プロジェクト: BorisJineman/CNTK
def test_eval_sparse_dense(tmpdir, device_id):
    from cntk import Axis
    from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs
    from cntk.ops import input_variable, times

    input_vocab_dim = label_vocab_dim = 69

    ctf_data = '''\
0	|S0 3:1 |# <s>	|S1 3:1 |# <s>
0	|S0 4:1 |# A	|S1 32:1 |# ~AH
0	|S0 5:1 |# B	|S1 36:1 |# ~B
0	|S0 4:1 |# A	|S1 31:1 |# ~AE
0	|S0 7:1 |# D	|S1 38:1 |# ~D
0	|S0 12:1 |# I	|S1 47:1 |# ~IY
0	|S0 1:1 |# </s>	|S1 1:1 |# </s>
2	|S0 60:1 |# <s>	|S1 3:1 |# <s>
2	|S0 61:1 |# A	|S1 32:1 |# ~AH
'''
    ctf_file = str(tmpdir/'2seqtest.txt')
    with open(ctf_file, 'w') as f:
        f.write(ctf_data)

    mbs = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
        features  = StreamDef(field='S0', shape=input_vocab_dim,  is_sparse=True),
        labels    = StreamDef(field='S1', shape=label_vocab_dim,  is_sparse=True)
    )), randomize=False, epoch_size = 2)

    batch_axis = Axis.default_batch_axis()
    input_seq_axis = Axis('inputAxis')
    label_seq_axis = Axis('labelAxis')

    input_dynamic_axes = [batch_axis, input_seq_axis]
    raw_input = input_variable(
        shape=input_vocab_dim, dynamic_axes=input_dynamic_axes,
        name='raw_input', is_sparse=True)

    mb_valid = mbs.next_minibatch(minibatch_size_in_samples=100,
            input_map={raw_input : mbs.streams.features},
            device=cntk_device(device_id))

    z = times(raw_input, np.eye(input_vocab_dim))
    e_reader = z.eval(mb_valid, device=cntk_device(device_id))

    # CSR with the raw_input encoding in ctf_data
    one_hot_data = [
            [3, 4, 5, 4, 7, 12, 1],
            [60, 61]
            ]
    data = [csr(np.eye(input_vocab_dim, dtype=np.float32)[d]) for d in
            one_hot_data]
    e_csr = z.eval({raw_input: data}, device=cntk_device(device_id))
    assert np.all([np.allclose(a, b) for a,b in zip(e_reader, e_csr)])

    # One-hot with the raw_input encoding in ctf_data
    data = Value.one_hot(one_hot_data, num_classes=input_vocab_dim, device=cntk_device(device_id))
    e_hot = z.eval({raw_input: data}, device=cntk_device(device_id))
    assert np.all([np.allclose(a, b) for a,b in zip(e_reader, e_hot)])
コード例 #9
0
ファイル: FastRCNN_test.py プロジェクト: OlegBoulanov/CNTK
def test_fastrcnn_with_config_file(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU') # it runs very slow in CPU
    try_set_default_device(cntk_device(device_id))

    from A1_GenerateInputROIs import generate_input_rois
    assert generate_input_rois(testing=True)

    prepare_alexnet_v0_model()

    from A2_RunWithBSModel import run_fastrcnn_with_config_file
    assert run_fastrcnn_with_config_file(os.environ["TEST_CNTK_BINARY"])
コード例 #10
0
ファイル: trainer_test.py プロジェクト: BorisJineman/CNTK
def test_eval_one_hot_seq(one_hot_batch, device_id):
    dim = 10
    multiplier = 2

    for var_is_sparse in [True, False]:
        in1 = input_variable(shape=(dim,), is_sparse=var_is_sparse)
        # Convert CNTK node value to dense so that we can compare it later
        z = times(in1, np.eye(dim)*multiplier)
        # Convert expectation to dense
        expected = [np.eye(dim)[seq]*multiplier for seq in one_hot_batch]
        batch = Value.one_hot(one_hot_batch, num_classes=dim, device=cntk_device(device_id))
        result = z.eval({in1: batch}, device=cntk_device(device_id))
        assert np.all([np.allclose(a,b) for a,b in zip(result, expected)])
コード例 #11
0
ファイル: DetectionDemo_test.py プロジェクト: Tak-Au/CNTK
def test_detection_demo(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')  # it runs very slow in CPU
    try_set_default_device(cntk_device(device_id))

    from prepare_test_data import prepare_Grocery_data, prepare_alexnet_v0_model
    grocery_path = prepare_Grocery_data()
    prepare_alexnet_v0_model()

    from FastRCNN.install_data_and_model import create_grocery_mappings
    create_grocery_mappings(grocery_path)

    from DetectionDemo import get_configuration
    import utils.od_utils as od

    cfg = get_configuration('FasterRCNN')
    cfg["CNTK"].FORCE_DETERMINISTIC = True
    cfg["CNTK"].DEBUG_OUTPUT = False
    cfg["CNTK"].MAKE_MODE = False
    cfg["CNTK"].FAST_MODE = False
    cfg.CNTK.E2E_MAX_EPOCHS = 3
    cfg.CNTK.RPN_EPOCHS = 2
    cfg.CNTK.FRCN_EPOCHS = 2
    cfg.IMAGE_WIDTH = 400
    cfg.IMAGE_HEIGHT = 400
    cfg["CNTK"].TRAIN_E2E = True
    cfg.USE_GPU_NMS = False
    cfg.VISUALIZE_RESULTS = False
    cfg["DATA"].MAP_FILE_PATH = grocery_path

    externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ
    if externalData:
        extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
        cfg['BASE_MODEL_PATH'] = os.path.join(extPath, "PreTrainedModels", "AlexNet", "v1", "AlexNet_ImageNet_Caffe.model")
    else:
        cfg['BASE_MODEL_PATH'] = os.path.join(abs_path, *"../../../../PretrainedModels/AlexNet_ImageNet_Caffe.model".split("/"))

    # train and test
    eval_model = od.train_object_detector(cfg)
    eval_results = od.evaluate_test_set(eval_model, cfg)

    meanAP = np.nanmean(list(eval_results.values()))
    print('meanAP={}'.format(meanAP))
    assert meanAP > 0.01

    # detect objects in single image
    img_path = os.path.join(grocery_path, "testImages", "WIN_20160803_11_28_42_Pro.jpg")
    regressed_rois, cls_probs = od.evaluate_single_image(eval_model, img_path, cfg)
    bboxes, labels, scores = od.filter_results(regressed_rois, cls_probs, cfg)
    assert bboxes.shape[0] == labels.shape[0]
コード例 #12
0
def train(nonlinearity, num_hidden_layers, device_id,
          minibatch_size=10, num_samples=1000):
    from cntk.cntk_py import always_allow_setting_default_device
    always_allow_setting_default_device()
    C.try_set_default_device(cntk_device(device_id))
    np.random.seed(0)

    learning_rate = 0.5
    lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch)

    hidden_layers_dim = 50

    inp = C.input_variable((input_dim), np.float32)
    label = C.input_variable((num_output_classes), np.float32)

    z = fully_connected_classifier_net(inp, num_output_classes, hidden_layers_dim,
                                       num_hidden_layers, nonlinearity)

    loss = C.cross_entropy_with_softmax(z, label)
    eval_error = C.classification_error(z, label)

    learner = C.sgd(z.parameters, lr_schedule)
    trainer = C.Trainer(z, (loss, eval_error), [learner])

    num_minibatches_to_train = int(num_samples / minibatch_size)

    training_progress_output_freq = 20

    losses = []
    errors = []

    for i in range(num_minibatches_to_train):
        features, labels = generate_random_data_sample(minibatch_size,
                                                       input_dim,
                                                       num_output_classes)

        # Specify the input variables mapping in the model to actual minibatch
        # data for training.
        trainer.train_minibatch({inp: features, label: labels},
                                device=cntk_device(device_id))

        batchsize, loss, error = print_training_progress(trainer, i,
                                                         training_progress_output_freq)

        if not (loss == "NA" or error == "NA"):
            losses.append(loss)
            errors.append(error)

    return losses, errors
コード例 #13
0
def test_transfer_learning(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU') # due to batch normalization in ResNet_18
    try_set_default_device(cntk_device(device_id))

    base_path = os.path.dirname(os.path.abspath(__file__))
    externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ
    if externalData:
        extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
        print("Reading data and model from %s" % extPath)
        model_file = os.path.join(extPath, *"PreTrainedModels/ResNet/v1/ResNet_18.model".split("/"))
        map_file = os.path.join(extPath, *"Image/CIFAR/v0/cifar-10-batches-py/test_map.txt".split("/"))
    else:
        model_file = os.path.join(base_path, *"../../../../Examples/Image/PretrainedModels/ResNet_18.model".split("/"))
        map_file = os.path.join(base_path, *"../../../../Examples/Image/DataSets/CIFAR-10/test_map.txt".split("/"))

    os.chdir(os.path.join(os.path.dirname(map_file), '..'))

    feature_node_name = "features"
    last_hidden_node_name = "z.x"
    image_width = 224
    image_height = 224
    num_channels = 3
    num_classes = 10

    num_epochs = 10
    num_train_images = 10
    num_test_images = 2

    node_outputs = get_node_outputs(load_model(model_file))
    assert len(node_outputs) == 83

    output_file = os.path.join(base_path, "tl_output.txt")
    trained_model = train_model(model_file, feature_node_name, last_hidden_node_name,
                                image_width, image_height, num_channels, num_classes, map_file,
                                num_epochs=num_epochs, max_images=num_train_images, freeze=True)

    # since we do not use a reader for evaluation we need unzipped data
    grocery_path = prepare_Grocery_data()
    eval_map_file = os.path.join(grocery_path, "test.txt")
    os.chdir(grocery_path)
    eval_test_images(trained_model, output_file, eval_map_file, image_width, image_height,
                     max_images=num_test_images, column_offset=1)

    expected_output_file = os.path.join(base_path, "tl_expected_output.txt")
    output = np.fromfile(output_file)
    expected_output = np.fromfile(expected_output_file)
    assert np.allclose(output, expected_output, atol=TOLERANCE_ABSOLUTE)
コード例 #14
0
def test_session_cv_callback_with_cross_validation_3_times(tmpdir, device_id):
    device = cntk_device(device_id)
    t, feature, label = create_sample_model(device)
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)
    cv_mbs = mb_source(tmpdir, "cv")

    input_map = {
        feature: mbs.streams.features,
        label: mbs.streams.labels
    }

    def cv_callback(index, average_error, num_samples, num_mb):
        initial_position = cv_mbs.current_position
        total_error = 0
        while True:
            mb = cv_mbs.next_minibatch(2, input_map=input_map)
            if not mb:
                break
            mb_error = t.test_minibatch(mb, device=device)
            total_error += mb_error * mb[label].num_samples

        total_samples = 25  # Please see input data
        assert((total_error * 100) / total_samples == 92)
        cv_mbs.current_position = initial_position
        return True

    C.training_session(
        trainer=t, mb_source=mbs, mb_size=4,
        model_inputs_to_streams=input_map, max_samples=60,
        cv_config = C.CrossValidationConfig(frequency=20, callback=cv_callback)
    ).train(device)

    assert(t.total_number_of_samples_seen == 61)
コード例 #15
0
def test_session_cv_callback_early_exit(tmpdir, device_id):
    device = cntk_device(device_id)
    t, feature, label = create_sample_model(device)
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)

    input_map = {
        feature: mbs.streams.features,
        label: mbs.streams.labels
    }

    counter = [0]
    def cv_callback(index, average_error, num_samples, num_mb):
        assert(counter[0] == index)
        assert average_error == 0
        assert num_samples == 0
        assert num_mb == 0
        counter[0] += 1
        return counter[0] < 1

    C.training_session(
        trainer=t, mb_source=mbs, mb_size=4,
        model_inputs_to_streams=input_map,
        max_samples=60,
        cv_config = C.CrossValidationConfig(frequency=20, callback=cv_callback)
    ).train(device)
    assert counter == [1]
コード例 #16
0
def test_session_progress_print_on_sweep_unit(tmpdir, device_id):
    device = cntk_device(device_id)
    writer = MockProgressWriter()
    #set to a higher learning rate as we don't need to have converge but just to go through all the samples
    t, feature, label = create_sample_model(device, writer, lr_per_sample=C.learning_parameter_schedule_per_sample(0.3))
    mbs = mb_source(tmpdir, "training",
                    #max_samples=INFINITELY_REPEAT,
                    max_sweeps = 4)

    input_map = {
        feature: mbs.streams.features,
        label: mbs.streams.labels
    }

    test_dir = str(tmpdir)

    C.training_session(
        trainer=t, mb_source=mbs,
        mb_size=C.minibatch_size_schedule(5),
        model_inputs_to_streams=input_map, max_samples=FULL_DATA_SWEEP,
        progress_frequency=(2, C.train.DataUnit.sweep)
    ).train(device)
    #4 sweeps of 25 samples = 100 samples
    assert(t.total_number_of_samples_seen == 100)
    #output every 2 epoch sweeps; 4 sweeps in total, at the end 2 outputs are written:
    assert(writer.training_summary_counter == 2)
コード例 #17
0
ファイル: lightrnn_test.py プロジェクト: AllanYiin/CNTK
def test_lightrnn(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    expected_valid_error = 7.251514
    expected_test_error = 7.305801

    command = run_command(datadir=os.path.join(example_dir, '..', 'test'),
                          outputdir=os.path.join(example_dir, '..', 'LightRNN'),
                          vocabdir=os.path.join(example_dir, '..', 'test'),
                          vocab_file=os.path.join(example_dir, '..', 'test', 'vocab.txt'),
                          alloc_file=os.path.join(example_dir, '..', 'test', 'word-0.location'),
                          vocabsize=1566,
                          optim='adam', lr=0.20,
                          embed=500, nhid=500, batchsize=20, layer=2,
                          epochs=1)
    p = subprocess.Popen(command, stdout=subprocess.PIPE)
    if sys.version_info[0] < 3:
        out = p.communicate()[0]
    else:
        try:
            out = p.communicate(timeout=TIMEOUT_SECONDS)[0]  # in case we have a hang
        except subprocess.TimeoutExpired:
            os.kill(p.pid, signal.CTRL_C_EVENT)
            raise RuntimeError('Timeout in mpiexec, possibly hang')
    str_out = out.decode(sys.getdefaultencoding())
    results = re.findall("Epoch  1 Done : Valid error = (.+), Test error = (.+)", str_out)
    results = results[0]
    assert len(results) == 2
    assert np.allclose([float(results[0]), float(results[1])], [expected_valid_error, expected_test_error], atol=TOLERANCE_ABSOLUTE)
コード例 #18
0
def test_ffnet_error(device_id):
    from cntk.ops.tests.ops_test_utils import cntk_device
    set_default_device(cntk_device(device_id))

    avg_error = ffnet()
    expected_avg_error = 0.04
    assert np.allclose(avg_error, expected_avg_error, atol=TOLERANCE_ABSOLUTE)
コード例 #19
0
def run_simple_training(tmpdir, device_id, test_config_factory):
    device = cntk_device(device_id)
    writer = MockProgressWriter(expected_test_summary=[[92, 25]])
    t, feature, label = create_sample_model(device, writer)

    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)
    mbs1 = mb_source(tmpdir, "test", ctf=ctf_data2, streams=['S4', 'S5'])

    input_map = {
        feature: mbs.streams.features,
        label: mbs.streams.labels
    }

    input_map1 = {
        feature: mbs1.streams.features,
        label: mbs1.streams.labels
    }

    C.training_session(
        trainer=t, mb_source=mbs, 
        mb_size=4, model_inputs_to_streams=input_map,
        max_samples=60,
        test_config=test_config_factory(mbs1, input_map)
    ).train(device)

    assert(t.total_number_of_samples_seen == 61)
    assert(writer.test_summary_counter == 1)
コード例 #20
0
def test_FlappingBird_with_keras_DQN_noerror(device_id):
    if platform.system() != 'Windows':
        pytest.skip('Test only runs on Windows, pygame video device requirement constraint')
    from cntk.ops.tests.ops_test_utils import cntk_device
    try_set_default_device(cntk_device(device_id))
    
    sys.path.append(example_dir)
    current_dir = os.getcwd()
    os.chdir(example_dir)
    
    import FlappingBird_with_keras_DQN as fbgame

    # TODO: Currently the model is downloaded from a cached site
    #       Change the code to pick up the model from a locally 
    #       cached directory.
    model = fbgame.buildmodel()
    args = {'mode': 'Run'}
    res = fbgame.trainNetwork(model, args, internal_testing=True )
    
    np.testing.assert_array_equal(res, 0, \
        err_msg='Error in running Flapping Bird example', verbose=True)
    
    args = {'mode': 'Train'}
    res = fbgame.trainNetwork(model, args, internal_testing=True )
    
    np.testing.assert_array_equal(res, 0, \
        err_msg='Error in testing Flapping Bird example', verbose=True)
    
    #TODO: Add a test case to start with a CNTK trained cached model
    os.chdir(current_dir)
    print("Done")
コード例 #21
0
ファイル: random_ops_tests.py プロジェクト: AllanYiin/CNTK
def test_random_moments(arg0, arg1, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    N = 100000
    B = 10.0 / np.sqrt(N) # about 1.5 larger than the largest value ever observed
    eg = np.euler_gamma

    #                  op             mean,                   variance
    ops1 = [(cr.bernoulli, lambda a: a           , lambda a: a*(1-a))]

    #                  op             mean,                   variance
    ops2 = [(cr.uniform,   lambda a, b: (b+a)*0.5, lambda a,b: (b-a)**2/12.0   ),
            (cr.normal,    lambda a, b: a        , lambda a,b: b**2            ),
            (cr.gumbel,    lambda a, b: a+b*eg   , lambda a,b: (np.pi*b)**2/6.0)]

    for op, fmean, fvar in ops1:
        input_op = op((N//100,10,10), dt, arg0, seed=98052)
        value = input_op.eval(device=dev)
        assert np.abs(np.mean(value) - fmean(arg0)) < B
        assert np.abs(np.var(value) - fvar(arg0)) < B * fvar(arg0)

    for op, fmean, fvar in ops2:
        input_op = op((N//100,10,10), dt, arg0, arg1, seed=98052)
        value = input_op.eval(device=dev)
        assert np.abs(np.mean(value) - fmean(arg0, arg1)) < B
        assert np.abs(np.var(value) - fvar(arg0, arg1)) < B * fvar(arg0, arg1)
コード例 #22
0
ファイル: word_rnn_test.py プロジェクト: BorisJineman/CNTK
def test_word_rnn(device_id):
    from cntk.ops.tests.ops_test_utils import cntk_device
    set_default_device(cntk_device(device_id))

    # Just run and verify it does not crash
    # Setting global parameters
    W.use_sampled_softmax = True
    W.softmax_sample_size = 3
    W.use_sparse = True
    W.hidden_dim = 20
    W.num_layers = 2
    W.num_epochs = 1
    W.sequence_length = 3
    W.sequences_per_batch = 2
    W.alpha = 0.75
    W.learning_rate = 0.02
    W.momentum_as_time_constant = 5
    W.clipping_threshold_per_sample = 5.0
    W.segment_sepparator = '<eos>'
    W.num_samples_between_progress_report = 2

    # Get path to data files.
    dir =  os.path.dirname( os.path.abspath(W.__file__))
    W.token_to_id_path            = os.path.join(dir, 'test/token2id.txt')
    W.validation_file_path        = os.path.join(dir, 'test/text.txt')
    W.train_file_path             = os.path.join(dir, 'test/text.txt')
    W.token_frequencies_file_path = os.path.join(dir, 'test/freq.txt')

    W.train_lm()
コード例 #23
0
def test_sequence_to_sequence(device_id):

    # import code after setting the device, otherwise some part of the code picks up "default device"
    # which causes an inconsistency if there is already another job using GPU #0
    from Sequence2Sequence import create_reader, DATA_DIR, MODEL_DIR, TRAINING_DATA, VALIDATION_DATA, TESTING_DATA, \
                                  VOCAB_FILE, get_vocab, create_model, model_path_stem, train, evaluate_metric
    from cntk.ops.tests.ops_test_utils import cntk_device
    set_default_device(cntk_device(device_id))

    # hook up data (train_reader gets False randomization to get consistent error)
    train_reader = create_reader(os.path.join(DATA_DIR, TRAINING_DATA), False)
    valid_reader = create_reader(os.path.join(DATA_DIR, VALIDATION_DATA), True)
    test_reader  = create_reader(os.path.join(DATA_DIR, TESTING_DATA), False)
    vocab, i2w, _ = get_vocab(os.path.join(DATA_DIR, VOCAB_FILE))

    # create model
    model = create_model()

    # train (with small numbers to finish within a reasonable amount of time)
    train(train_reader, valid_reader, vocab, i2w, model, max_epochs=1, epoch_size=5000)

    # now test the model and print out test error (for automated test)
    model_filename = os.path.join(MODEL_DIR, model_path_stem + ".cmf.0")
    model = load_model(model_filename)
    error = evaluate_metric(test_reader, model, 10)

    print(error)

    #expected_error =  0.9943119920022192 # when run separately
    expected_error =  0.9912881900980582 # when run inside the harness--random-initialization?
    assert np.allclose(error, expected_error, atol=TOLERANCE_ABSOLUTE)
コード例 #24
0
ファイル: random_ops_tests.py プロジェクト: AllanYiin/CNTK
def test_two_times_n_vs_one_time_2n(arg0, arg1, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    N = 256
    x1 = np.zeros((N, 1), dtype=dt)
    x2 = np.zeros((2*N, 1), dtype=dt)

    ops1 = [cr.bernoulli]
    ops2 = [cr.uniform, cr.normal, cr.gumbel]

    for op in ops1:
        input_op1 = op((1*N,), dt, arg0, seed=98052)
        input_op2 = op((2*N,), dt, arg0, seed=98052)
        a = input_op1.eval(device=dev)
        b = input_op1.eval(device=dev)
        c = input_op2.eval(device=dev)
        assert np.allclose(c, np.concatenate([a,b]))
        
    for op in ops2:
        input_op1 = op((1*N,), dt, arg0, arg1, seed=98052)
        input_op2 = op((2*N,), dt, arg0, arg1, seed=98052)
        a = input_op1.eval(device=dev)
        b = input_op1.eval(device=dev)
        c = input_op2.eval(device=dev)
        assert np.allclose(c, np.concatenate([a,b]))
コード例 #25
0
def mpiexec_test(device_id, script, params, expected_test_error, match_exactly=True, per_minibatch_tolerance=TOLERANCE_ABSOLUTE, error_tolerance=TOLERANCE_ABSOLUTE):
    if cntk_device(device_id).type() != DeviceKind_GPU:
       pytest.skip('test only runs on GPU')

    cmd = ["mpiexec", "-n", "2", "python", script] + params
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    if sys.version_info[0] < 3:
        out = p.communicate()[0]
    else:
        try:
            out = p.communicate(timeout=TIMEOUT_SECONDS)[0]  # in case we have a hang
        except subprocess.TimeoutExpired:
            os.kill(p.pid, signal.CTRL_C_EVENT)
            raise RuntimeError('Timeout in mpiexec, possibly hang')
    str_out = out.decode(sys.getdefaultencoding())
    results = re.findall("Cross Validation \[.+?\]: Minibatch\[.+?\]: errs = (.+?)%", str_out)

    assert len(results) == 2
    print(results)

    if match_exactly:
        assert results[0] == results[1]
    else:
        assert np.allclose(float(results[0]), float(results[1]), atol=per_minibatch_tolerance)

    assert np.allclose(float(results[0])/100, expected_test_error, atol=error_tolerance)
コード例 #26
0
ファイル: random_ops_tests.py プロジェクト: AllanYiin/CNTK
def test_randomlike_moments(arg0, arg1, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    x = C.input_variable(1, dtype=dt)
    N = 100000 
    B = 10.0 / np.sqrt(N)
    x0 = np.zeros((N, 1), dtype=dt)
    eg = np.euler_gamma

    #                  op             mean,                   variance
    ops1 = [(cr.bernoulli_like, lambda a: a           , lambda a: a*(1-a))]

    #                  op             mean,                   variance
    ops2 = [(cr.uniform_like,   lambda a, b: (b+a)*0.5, lambda a,b: (b-a)**2/12.0   ),
            (cr.normal_like,    lambda a, b: a        , lambda a,b: b**2            ),
            (cr.gumbel_like,    lambda a, b: a+b*eg   , lambda a,b: (np.pi*b)**2/6.0)]

    for op, fmean, fvar in ops1:
        input_op = op(x, arg0, seed=98052)
        value = input_op.eval({x: x0}, device=dev)
        assert np.abs(np.mean(value) - fmean(arg0)) < B
        assert np.abs(np.var(value) - fvar(arg0)) < B * fvar(arg0)

    for op, fmean, fvar in ops2:
        input_op = op(x, arg0, arg1, seed=98052)
        value = input_op.eval({x: x0}, device=dev)
        assert np.abs(np.mean(value) - fmean(arg0, arg1)) < B
        assert np.abs(np.var(value) - fvar(arg0, arg1)) < B * fvar(arg0,arg1)
コード例 #27
0
def test_ucf11_conv3d_error(device_id):
    # Skip for now.
    if True: #cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    set_default_device(cntk_device(device_id))

    try:
        base_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'],
                                *"Video/DataSets/UCF11".split("/"))
    except KeyError:
        base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                *"../../../../Examples/Video/DataSets/UCF11".split("/"))

    base_path = os.path.normpath(base_path)

    from _cntk_py import set_computation_network_trace_level, set_fixed_random_seed, force_deterministic_algorithms
    set_computation_network_trace_level(1)
    set_fixed_random_seed(1)

    # For performance reason, we will use test data for both training and testing.
    num_output_classes = 11
    # train_reader = VideoReader(os.path.join(base_path, 'test_map.csv'), num_output_classes, True)
    # test_reader  = VideoReader(os.path.join(base_path, 'test_map.csv'), num_output_classes, False)

    test_error = 0.8437 #conv3d_ucf11(train_reader, test_reader, max_epochs=1)
    expected_test_error = 0.8437

    assert np.allclose(test_error, expected_test_error,
                       atol=TOLERANCE_ABSOLUTE)
コード例 #28
0
ファイル: char_rnn_test.py プロジェクト: BorisJineman/CNTK
def test_char_rnn(device_id):
    from cntk.ops.tests.ops_test_utils import cntk_device
    set_default_device(cntk_device(device_id))

    # Just run and verify it does not crash
    output = train_and_eval_char_rnn(1, 200)
    print(output)
コード例 #29
0
def test_convnet_cifar_error(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')
    try_set_default_device(cntk_device(device_id))

    externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ
    if externalData:
        extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
        dataset_path = os.path.join(extPath, "Image", "CIFAR", "v0")
    else:
        dataset_path = os.path.join(abs_path,  "..", "..", "..", "..", "Examples", "Image", "DataSets", "CIFAR-10")

    error = convnet_cifar10(data_path=dataset_path, epoch_size=2000, minibatch_size=32, max_epochs=10)

    expected_error = 0.7
    assert np.allclose(error, expected_error, atol=TOLERANCE_ABSOLUTE)
コード例 #30
0
ファイル: io_tests.py プロジェクト: vnvizitiu/CNTK
def test_compare_cbf_and_ctf(input_pair, device_id, tmpdir):

    try:
        import ctf2bin
    except ImportError:
        pytest.skip("ctf2bin not found")

    device = cntk_device(device_id)

    tmpfile = _write_data(tmpdir, input_pair[0])
    streams = input_pair[1]

    ctf2bin.process(tmpfile, tmpfile+'.bin', get_cbf_header(streams), ctf2bin.ElementType.FLOAT)

    def compare_cbf_and_ctf(num_mbs, mb_size, randomize):
        ctf = MinibatchSource(CTFDeserializer(tmpfile, streams), randomize=randomize)
        cbf = MinibatchSource(CBFDeserializer(tmpfile+'.bin', streams), randomize=randomize)

        ctf_stream_names = sorted([x.m_name for x in ctf.stream_infos()])
        cbf_stream_names = sorted([x.m_name for x in cbf.stream_infos()])

        assert(ctf_stream_names == cbf_stream_names)
        for _ in range(num_mbs):
            ctf_mb = ctf.next_minibatch(mb_size, device=device)
            cbf_mb = cbf.next_minibatch(mb_size, device=device)

            for name in cbf_stream_names:
                ctf_data = ctf_mb[ctf[name]]
                cbf_data = cbf_mb[cbf[name]]

                
                assert ctf_data.num_samples == cbf_data.num_samples
                assert ctf_data.num_sequences == cbf_data.num_sequences
                assert ctf_data.shape == cbf_data.shape
                assert ctf_data.end_of_sweep == cbf_data.end_of_sweep
                assert ctf_data.is_sparse == cbf_data.is_sparse
                assert ctf_data.data.masked_count() == cbf_data.data.masked_count()

                # XXX:
                # assert(ctf_data.asarray() == cbf_data.asarray()).all()
                # not using asarray because for sparse values it fails with
                # some strange exception "sum of the rank of the mask and Variable 
                #rank does not equal the Value's rank".

                assert C.cntk_py.are_equal(ctf_data.data.data, cbf_data.data.data)

                if (ctf_data.data.masked_count() > 0):
                    assert (ctf_data.data.mask == cbf_data.data.mask).all()
                # XXX: if mask_count is zero, mb_data.data.mask fails with 
                # "AttributeError: 'Value' object has no attribute 'mask'"!

                # XXX: without invoking erase, next_minibatch will fail with:
                # "Resize: Cannot resize the matrix because it is a view."
                ctf_data.data.erase()
                cbf_data.data.erase()

    for randomize in [False, True]:
        for (num_mbs, mb_size) in zip([1, 1, 3, 10], [1, 10, 100, 2]):
            compare_cbf_and_ctf(num_mbs, mb_size, randomize)
コード例 #31
0
def test_placeholder(device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    import cntk.random as cr
    p = C.placeholder()
    u = cr.uniform_like(p)
    x = C.sequence.input_variable((4, 5))

    x1 = np.ones((2, 3, 4, 5), dtype=dt)
    f = u + p
    f.replace_placeholders({p: x})
    fx0, fx1 = f.eval({x: x1})

    assert fx0.shape == (3, 4, 5)
    assert fx1.shape == (3, 4, 5)

    assert fx0.min() >= 1
    assert fx0.max() < 2

    assert fx1.min() >= 1
    assert fx1.max() < 2
コード例 #32
0
def test_cifar_convnet_distributed_mpiexec(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')

    cmd = ["mpiexec", "-n", "2", "python", os.path.join(abs_path, "run_cifar_convnet_distributed.py")]
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    if sys.version_info[0] < 3:
        # TODO add timeout for Py2?
        out = p.communicate()[0]
    else:
        try:
            out = p.communicate(timeout=TIMEOUT_SECONDS)[0]  # in case we have a hang
        except subprocess.TimeoutExpired:
            os.kill(p.pid, signal.CTRL_C_EVENT)
            raise RuntimeError('Timeout in mpiexec, possibly hang')
    str_out = out.decode(sys.getdefaultencoding())
    results = re.findall("Final Results: Minibatch\[.+?\]: errs = (.+?)%", str_out)
    assert len(results) == 2
    assert results[0] == results[1]
    expected_test_error = 0.617
    assert np.allclose(float(results[0])/100, expected_test_error,
                       atol=TOLERANCE_ABSOLUTE)
def test_simple_mnist_error(device_id):
    # Create a path to TensorBoard log directory and make sure it does not exist.
    abs_path = os.path.dirname(os.path.abspath(__file__))
    tb_logdir = os.path.join(abs_path, 'simple_mnist_test_log')
    if os.path.exists(tb_logdir):
        shutil.rmtree(tb_logdir)

    from cntk.ops.tests.ops_test_utils import cntk_device
    try_set_default_device(cntk_device(device_id))

    test_error = simple_mnist(tb_logdir)
    expected_test_error = 0.09

    assert np.allclose(test_error, expected_test_error,
                       atol=TOLERANCE_ABSOLUTE)

    # Ensure that the TensorBoard log directory was created and contains exactly one file with the expected name.
    tb_files = 0
    for tb_file in os.listdir(tb_logdir):
        assert tb_file.startswith("events.out.tfevents")
        tb_files += 1
    assert tb_files == 1
コード例 #34
0
def test_session_cross_validation_3_times_on_minibatch_unit(tmpdir, device_id):
    device = cntk_device(device_id)
    writer = MockProgressWriter(
        expected_test_summary=[[92, 25], [92, 25], [92, 25]])
    t, feature, label = create_sample_model(device, writer)
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)
    mbs1 = mb_source(tmpdir, "cv")

    input_map = {feature: mbs.streams.features, label: mbs.streams.labels}

    C.training_session(
        trainer=t,
        mb_source=mbs,
        mb_size=4,
        model_inputs_to_streams=input_map,
        max_samples=60,
        cv_config=C.CrossValidationConfig(
            mbs1, frequency=(5, C.train.DataUnit.minibatch), minibatch_size=2),
    ).train(device)

    assert (t.total_number_of_samples_seen == 61)
    assert (writer.test_summary_counter == 3)
def test_pooling(tmpdir, auto_padding, pooling_type, dtype, device_id):
    pytest.skip('Needs to be fixed after removal of batch axis change.')
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (1, 5, 5)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable(img.shape)
        pool_type = C.MAX_POOLING if pooling_type else C.AVG_POOLING
        root_node = C.pooling(x, pool_type, (2, 2), auto_padding=auto_padding)

        filename = os.path.join(str(tmpdir), R'conv.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_: [img]}, device=device),
                           root_node.eval({x: [img]}, device=device))
コード例 #36
0
def mpiexec_test(device_id,
                 script,
                 mpiexec_params,
                 params,
                 expected_test_error,
                 match_exactly=True,
                 per_minibatch_tolerance=TOLERANCE_ABSOLUTE,
                 error_tolerance=TOLERANCE_ABSOLUTE):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')

    cmd = ["mpiexec"] + mpiexec_params + ["python", script] + params
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    if sys.version_info[0] < 3:
        out = p.communicate()[0]
    else:
        try:
            out = p.communicate(
                timeout=TIMEOUT_SECONDS)[0]  # in case we have a hang
        except subprocess.TimeoutExpired:
            os.kill(p.pid, signal.CTRL_C_EVENT)
            raise RuntimeError('Timeout in mpiexec, possibly hang')
    str_out = out.decode(sys.getdefaultencoding())
    results = re.findall(
        "Finished Evaluation \[.+?\]: Minibatch\[.+?\]: metric = (.+?)%",
        str_out)

    assert len(results) == 2

    if match_exactly:
        assert results[0] == results[1]
    else:
        if abs(
            (float(results[0]) - float(results[1]))) > per_minibatch_tolerance:
            print(str_out)
            assert False
    assert np.allclose(float(results[0]) / 100,
                       expected_test_error,
                       atol=error_tolerance)
コード例 #37
0
def run_simple_training(tmpdir, device_id, test_config_factory):
    device = cntk_device(device_id)
    writer = MockProgressWriter(expected_test_summary=[[92, 25]])
    t, feature, label = create_sample_model(device, writer)

    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)
    mbs1 = mb_source(tmpdir, "test", ctf=ctf_data2, streams=['S4', 'S5'])

    input_map = {feature: mbs.streams.features, label: mbs.streams.labels}

    input_map1 = {feature: mbs1.streams.features, label: mbs1.streams.labels}

    C.training_session(trainer=t,
                       mb_source=mbs,
                       mb_size=4,
                       model_inputs_to_streams=input_map,
                       max_samples=60,
                       test_config=test_config_factory(
                           mbs1, input_map)).train(device)

    assert (t.total_number_of_samples_seen == 61)
    assert (writer.test_summary_counter == 1)
コード例 #38
0
def test_training_session_with_infinite_samples(tmpdir, device_id):
    import pytest
    device = cntk_device(device_id)
    t, feature, label = create_sample_model(device)
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)

    input_map = {
        feature: mbs.streams.features,
        label: mbs.streams.labels
    }

    with pytest.raises(ValueError) as info1:
        C.training_session(
            trainer=t, mb_source=mbs, 
            mb_size=4, model_inputs_to_streams=input_map
        ).train(device)
    assert 'Train minibatch source must have a limited number of samples or sweeps' in str(info1.value)

    with pytest.raises(ValueError) as info2:
        mbs1 = mb_source(tmpdir, "test", max_samples=INFINITELY_REPEAT)
        C.training_session(
            trainer=t, mb_source=mbs, 
            mb_size=4, model_inputs_to_streams=input_map,
            max_samples = 10,
            test_config = C.TestConfig(mbs1, minibatch_size=2),
        ).train(device)
    assert 'Test minibatch source must have a limited number of samples or sweeps' in str(info2.value)

    with pytest.raises(ValueError) as info3:
        mbs2 = mb_source(tmpdir, "cv", max_samples=INFINITELY_REPEAT)
        C.training_session(
            trainer=t, mb_source=mbs, 
            mb_size=4, model_inputs_to_streams=input_map,
            max_samples=20,
            cv_config = C.CrossValidationConfig(mbs2)
        ).train(device)
    assert 'Cross validation minibatch source must have a limited number of samples or sweeps' in str(info3.value)
コード例 #39
0
def test_sequence_to_sequence(device_id):

    # import code after setting the device, otherwise some part of the code picks up "default device"
    # which causes an inconsistency if there is already another job using GPU #0
    from Sequence2Sequence import create_reader, DATA_DIR, MODEL_DIR, TRAINING_DATA, VALIDATION_DATA, TESTING_DATA, \
                                  VOCAB_FILE, get_vocab, create_model, model_path_stem, train, evaluate_metric
    from cntk.ops.tests.ops_test_utils import cntk_device
    try_set_default_device(cntk_device(device_id))

    # hook up data (train_reader gets False randomization to get consistent error)
    train_reader = create_reader(os.path.join(DATA_DIR, TRAINING_DATA), False)
    valid_reader = create_reader(os.path.join(DATA_DIR, VALIDATION_DATA), True)
    test_reader = create_reader(os.path.join(DATA_DIR, TESTING_DATA), False)
    vocab, i2w, _ = get_vocab(os.path.join(DATA_DIR, VOCAB_FILE))

    # create model
    model = create_model()

    # train (with small numbers to finish within a reasonable amount of time)
    train(train_reader,
          valid_reader,
          vocab,
          i2w,
          model,
          max_epochs=1,
          epoch_size=5000)

    # now test the model and print out test error (for automated test)
    model_filename = os.path.join(MODEL_DIR, model_path_stem + ".cmf.0")
    model = load_model(model_filename)
    error = evaluate_metric(test_reader, model, 10)

    print(error)

    #expected_error =  0.9943119920022192 # when run separately
    expected_error = 0.9912881900980582  # when run inside the harness--random-initialization?
    assert np.allclose(error, expected_error, atol=TOLERANCE_ABSOLUTE)
コード例 #40
0
def test_OptimizedRNNStack(bidirectional, num_layers, input_size, hidden_size,
                           tmpdir, device_id):
    if device_id == -1:
        pytest.skip('Test only runs on GPU')
    dev = cntk_device(device_id)
    from _cntk_py import constant_initializer
    model_filename = 'optimized_rnn_stack_' + (
        'bi' if bidirectional else 'uni') + '_layers' + str(
            num_layers) + '_inp' + str(input_size) + '_hid' + str(hidden_size)
    W = C.parameter((C.InferredDimension, input_size),
                    constant_initializer(0.1),
                    device=dev)
    x = C.sequence.input_variable(shape=(input_size, ))
    s = np.asarray(np.random.uniform(-1, 1, (5, input_size)), dtype=np.float32)
    f = C.optimized_rnnstack(x,
                             W,
                             hidden_size,
                             num_layers,
                             bidirectional=bidirectional,
                             name='MyRnnStack')
    f.parameters[0].value = np.reshape(
        np.arange(np.prod(f.parameters[0].value.shape), dtype=np.float32),
        f.parameters[0].value.shape)
    verify_one_input(f, s, tmpdir, model_filename)
コード例 #41
0
ファイル: reasonet_test.py プロジェクト: zhoufengbuaa/CNTK
def test_reasonet(device_id, is_1bit_sgd):
  print("Device Id: {0}".format(device_id))
  if device_id < 0:
    pytest.skip('test only runs on GPU')
    
  if is_1bit_sgd != 0:
    pytest.skip('test doesn\'t support 1bit sgd')

  import ReasoNet.reasonet as rsn
  device.set_default_device(cntk_device(device_id))
  data_path = os.path.join(module_path, "Data/fast_test.txt")
  eval_path = os.path.join(module_path, "Data/fast_test.txt")
  vocab_dim = 101100
  entity_dim = 101
  epoch_size=1159400
  eval_size=1159400
  hidden_dim=256
  max_rl_iter=5
  max_epochs=1
  embedding_dim=300
  att_dim = 384
  params = rsn.model_params(vocab_dim = vocab_dim, entity_dim = entity_dim, hidden_dim = hidden_dim, embedding_dim = embedding_dim, embedding_init = None, attention_dim = att_dim, dropout_rate = 0.2)

  train_data = rsn.create_reader(data_path, vocab_dim, entity_dim, True)
  eval_data = rsn.create_reader(eval_path, vocab_dim, entity_dim, False) if eval_path is not None else None
  embedding_init = None

  model = rsn.create_model(params)
  learner = rsn.create_adam_learner(model.parameters)
  (train_loss, train_acc, eval_acc) = rsn.train(model, params, learner, train_data, max_epochs=max_epochs, epoch_size=epoch_size, save_model_flag=False, model_name=os.path.basename(data_path), eval_data=eval_data, eval_size=eval_size, check_point_freq=1, minibatch_size = 5000)
  assert abs(train_loss - 0.08067)<1e-2
  assert abs(train_acc - 0.21635)<1e-2
  if sys.version_info >= (3,):
    assert abs(eval_acc - 0.304)<1e-2
  else:
    assert abs(eval_acc - 0.312)<1e-2
コード例 #42
0
def test_seq_classification_error(device_id):
    from cntk.ops.tests.ops_test_utils import cntk_device
    DeviceDescriptor.set_default_device(cntk_device(device_id))

    from _cntk_py import set_computation_network_trace_level, set_fixed_random_seed
    set_computation_network_trace_level(1)
    set_fixed_random_seed(1) # to become invariant to initialization order, which is a valid change

    # test of the example itself
    # this emulates the main code in the PY file
    reader = create_reader(data_dir + "/atis.train.ctf")
    model = create_model()
    loss_avg, evaluation_avg = train(reader, model, max_epochs=1)
    expected_avg = [0.15570838301766451, 0.7846451368305728]
    assert np.allclose([evaluation_avg, loss_avg], expected_avg, atol=TOLERANCE_ABSOLUTE)

    # test of a config like in the example but with additions to test many code paths
    if device_id >= 0: # BatchNormalization currently does not run on CPU
        reader = create_reader(data_dir + "/atis.train.ctf")
        model = create_test_model()
        loss_avg, evaluation_avg = train(reader, model, max_epochs=1)
        log_number_of_parameters(model, trace_level=1) ; print()
        expected_avg = [0.084, 0.407364]
        assert np.allclose([evaluation_avg, loss_avg], expected_avg, atol=TOLERANCE_ABSOLUTE)
コード例 #43
0
def test_session_cross_validation_3_times_checkpoints_2_save_all(tmpdir, device_id):
    device = cntk_device(device_id)
    writer = MockProgressWriter(expected_test_summary=[[92, 25], [92, 25], [92, 25]])
    t, feature, label = create_sample_model(device, writer)
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)
    mbs1 = mb_source(tmpdir, "cv")

    input_map = {
        feature: mbs.streams.features,
        label: mbs.streams.labels
    }

    test_dir = str(tmpdir)

    C.training_session(
        trainer=t, mb_source=mbs,
        mb_size=4, model_inputs_to_streams=input_map,
        max_samples=60,
        checkpoint_config = C.CheckpointConfig(frequency=35, preserve_all=True,
                                             filename=str(tmpdir / "checkpoint_save_all")),
        cv_config = C.CrossValidationConfig(mbs1, frequency=20)
    ).train(device)

    candidates = [f for f in listdir(test_dir) if isfile(
        join(test_dir, f)) and f.startswith("checkpoint_save_all")]

    assert("checkpoint_save_all0" in candidates)
    assert("checkpoint_save_all0.ckp" in candidates)

    assert("checkpoint_save_all1" in candidates)
    assert("checkpoint_save_all1.ckp" in candidates)

    assert("checkpoint_save_all" in candidates)
    assert("checkpoint_save_all.ckp" in candidates)

    assert(writer.test_summary_counter == 3)
コード例 #44
0
def test_transfer_learning(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU for speed')
    try_set_default_device(cntk_device(device_id))

    base_path = os.path.dirname(os.path.abspath(__file__))
    externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ
    if externalData:
        extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
        print("Reading data and model from %s" % extPath)
        model_file = os.path.join(
            extPath, *"PreTrainedModels/ResNet/v1/ResNet_18.model".split("/"))
        map_file = os.path.join(
            extPath,
            *"Image/CIFAR/v0/cifar-10-batches-py/test_map.txt".split("/"))
    else:
        model_file = os.path.join(
            base_path,
            *"../../../../PretrainedModels/ResNet_18.model".split("/"))
        map_file = os.path.join(
            base_path,
            *"../../../../Examples/Image/DataSets/CIFAR-10/test_map.txt".split(
                "/"))

    os.chdir(os.path.join(os.path.dirname(map_file), '..'))

    feature_node_name = "features"
    last_hidden_node_name = "z.x"
    image_width = 224
    image_height = 224
    num_channels = 3
    num_classes = 10

    num_epochs = 10
    num_train_images = 10
    num_test_images = 2

    node_outputs = get_node_outputs(load_model(model_file))
    assert len(node_outputs) == 83

    output_file = os.path.join(base_path, "tl_output.txt")
    trained_model = train_model(model_file,
                                feature_node_name,
                                last_hidden_node_name,
                                image_width,
                                image_height,
                                num_channels,
                                num_classes,
                                map_file,
                                num_epochs=num_epochs,
                                max_images=num_train_images,
                                freeze=True)

    # since we do not use a reader for evaluation we need unzipped data
    grocery_path = prepare_Grocery_data()
    eval_map_file = os.path.join(grocery_path, "test.txt")
    os.chdir(grocery_path)
    eval_test_images(trained_model,
                     output_file,
                     eval_map_file,
                     image_width,
                     image_height,
                     max_images=num_test_images,
                     column_offset=1)

    expected_output_file = os.path.join(base_path, "tl_expected_output.txt")
    output = np.fromfile(output_file)
    expected_output = np.fromfile(expected_output_file)
    assert np.allclose(output, expected_output, atol=TOLERANCE_ABSOLUTE)
コード例 #45
0
def test_fasterrcnn_grocery_training_e2e(device_id):
    try_set_default_device(cntk_device(device_id))
    _, _, _ = run_fasterrcnn_grocery_training(e2e=True)
コード例 #46
0
ファイル: io_tests.py プロジェクト: sminger1202/CNTK
def test_compare_cbf_and_ctf(input_pair, device_id, tmpdir):
    try:
        import ctf2bin
    except ImportError:
        pytest.skip("ctf2bin not found")

    device = cntk_device(device_id)

    tmpfile = _write_data(tmpdir, input_pair[0])
    streams = input_pair[1]

    ctf2bin.process(tmpfile, tmpfile + '.bin', get_cbf_header(streams),
                    ctf2bin.ElementType.FLOAT)

    def compare_cbf_and_ctf(num_mbs, mb_size, randomize):
        ctf = MinibatchSource(CTFDeserializer(tmpfile, streams),
                              randomize=randomize)
        cbf = MinibatchSource(CBFDeserializer(tmpfile + '.bin', streams),
                              randomize=randomize)

        ctf_stream_names = sorted([x.m_name for x in ctf.stream_infos()])
        cbf_stream_names = sorted([x.m_name for x in cbf.stream_infos()])

        assert (ctf_stream_names == cbf_stream_names)
        for _ in range(num_mbs):
            ctf_mb = ctf.next_minibatch(mb_size, device=device)
            cbf_mb = cbf.next_minibatch(mb_size, device=device)

            for name in cbf_stream_names:
                ctf_data = ctf_mb[ctf[name]]
                cbf_data = cbf_mb[cbf[name]]

                assert ctf_data.num_samples == cbf_data.num_samples
                assert ctf_data.num_sequences == cbf_data.num_sequences
                assert ctf_data.shape == cbf_data.shape
                assert ctf_data.end_of_sweep == cbf_data.end_of_sweep
                assert ctf_data.is_sparse == cbf_data.is_sparse
                assert ctf_data.data.masked_count(
                ) == cbf_data.data.masked_count()

                # XXX:
                # assert(ctf_data.asarray() == cbf_data.asarray()).all()
                # not using asarray because for sparse values it fails with
                # some strange exception "sum of the rank of the mask and Variable
                #rank does not equal the Value's rank".

                assert C.cntk_py.are_equal(ctf_data.data.data,
                                           cbf_data.data.data)

                if (ctf_data.data.masked_count() > 0):
                    assert (ctf_data.data.mask == cbf_data.data.mask).all()
                # XXX: if mask_count is zero, mb_data.data.mask fails with
                # "AttributeError: 'Value' object has no attribute 'mask'"!

                # XXX: without invoking erase, next_minibatch will fail with:
                # "Resize: Cannot resize the matrix because it is a view."
                ctf_data.data.erase()
                cbf_data.data.erase()

    for randomize in [False, True]:
        for (num_mbs, mb_size) in zip([1, 1, 3, 10], [1, 10, 100, 2]):
            compare_cbf_and_ctf(num_mbs, mb_size, randomize)
コード例 #47
0
ファイル: trainer_test.py プロジェクト: mbencherif/CNTK
def test_eval_one_hot_bad(one_hot_batch, dim, device_id):
    with pytest.raises(ValueError):
        batch = Value.one_hot(one_hot_batch,
                              num_classes=dim,
                              device=cntk_device(device_id))
コード例 #48
0
def test_transfer_learning(device_id):
    set_fixed_random_seed(1)
    force_deterministic_algorithms()

    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip(
            'test only runs on GPU')  # due to batch normalization in ResNet_18
    try_set_default_device(cntk_device(device_id))

    base_path = os.path.dirname(os.path.abspath(__file__))
    animals_path = os.path.join(
        base_path, *"../../../../Examples/Image/DataSets/Animals".split("/"))
    externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ
    if externalData:
        extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
        model_file = os.path.join(
            extPath, *"PreTrainedModels/ResNet/v1/ResNet_18.model".split("/"))

        if not os.path.isfile(
                os.path.join(animals_path, 'Test', 'Weaver_bird.jpg')):
            # copy data from external test data location and unzip
            os.chdir(os.path.join(base_path, '..', '..', '..'))
            prepare_animals_data()
            os.chdir(base_path)
            zip_path = os.path.join(animals_path, 'Animals.zip')
            with zipfile.ZipFile(zip_path) as myzip:
                myzip.extractall(os.path.join(animals_path, '..'))

    else:
        model_file = os.path.join(
            base_path,
            *"../../../../Examples/Image/PretrainedModels/ResNet_18.model".
            split("/"))

    train_image_folder = os.path.join(animals_path, "Train")
    test_image_folder = os.path.join(animals_path, "Test")
    output_file = os.path.join(base_path, "tl_extended_output.txt")

    train_and_eval(model_file,
                   train_image_folder,
                   test_image_folder,
                   output_file,
                   None,
                   testing=True)

    expected_output_file = os.path.join(base_path,
                                        "tl_extended_expected_output.txt")

    with open(output_file) as output_json:
        output_lines = output_json.readlines()
    with open(expected_output_file) as expected_output_json:
        expected_output_lines = expected_output_json.readlines()

    # handling different ordering of files
    out_dict = {}
    exp_dict = {}
    for i in range(len(output_lines)):
        output = json.loads(output_lines[i])[0]
        expected_output = json.loads(expected_output_lines[i])[0]

        out_dict[output["image"]] = output
        exp_dict[expected_output["image"]] = expected_output

    # debug output
    for k in out_dict:
        output = out_dict[k]
        expected_output = exp_dict[k]

        print("output: {}".format(output))
        print("expect: {}".format(expected_output))

    for k in out_dict:
        output = out_dict[k]
        expected_output = exp_dict[k]

        assert np.allclose(output["predictions"]["Sheep"],
                           expected_output["predictions"]["Sheep"],
                           atol=TOLERANCE_ABSOLUTE)
        assert np.allclose(output["predictions"]["Wolf"],
                           expected_output["predictions"]["Wolf"],
                           atol=TOLERANCE_ABSOLUTE)
コード例 #49
0
def test_language_understanding(device_id):
    from cntk.ops.tests.ops_test_utils import cntk_device
    DeviceDescriptor.set_default_device(cntk_device(device_id))

    from _cntk_py import set_computation_network_trace_level, set_fixed_random_seed
    #set_computation_network_trace_level(1)
    set_fixed_random_seed(1) # to become invariant to initialization order, which is a valid change
    # BUGBUG: This ^^ currently seems to have no impact; the two BN models below should be identical in training

    if device_id >= 0: # BatchNormalization currently does not run on CPU
        # change to intent classifier   --moved up here since this fails, as repro
        # BUGBUG: Broken, need to pass new criterion to train().
        #with default_options(initial_state=0.1):  # inject an option to mimic the BS version identically; remove some day
        #    select_last = slice(Placeholder(), Axis.default_dynamic_axis(), -1, 0)
        #    # BUGBUG: Fails with "RuntimeError: The specified dynamic axis named defaultDynamicAxis does not match any of the dynamic axes of the operand"
        #    run_model_test('change to intent classifier', Sequential([
        #        Embedding(emb_dim),
        #        with_lookahead(),
        #        BatchNormalization(),
        #        BiRecurrence(LSTM(hidden_dim)),
        #        BatchNormalization(),
        #        select_last,  # fails here with an axis problem
        #        Dense(label_dim)
        #    ]), [0.084, 0.407364])


        # replace lookahead by bidirectional model
        with default_options(initial_state=0.1):  # inject an option to mimic the BS version identically; remove some day
            run_model_test('replace lookahead by bidirectional model', Sequential([
                Embedding(emb_dim),
                BatchNormalization(),
                BiRecurrence(LSTM(hidden_dim), LSTM(hidden_dim)),
                BatchNormalization(),
                Dense(label_dim)
            ]), [0.0579573500457558, 0.3214986774820327])

        # replace lookahead by bidirectional model
        with default_options(initial_state=0.1):  # inject an option to mimic the BS version identically; remove some day
          #with default_options(dtype=np.float64):  # test this with double precision since single precision is too little for reproducable aggregation
          # ^^ This test requires to change the #if 1 in Functions.cpp PopulateNetworkInputs() to be changed to #if 0.
            run_model_test('replace lookahead by bidirectional model, with shared BN', Sequential([
                Embedding(emb_dim),
                BNBiRecurrence(LSTM(hidden_dim), LSTM(hidden_dim), test_dual=True),
                #BNBiRecurrence(LSTM(hidden_dim), LSTM(hidden_dim), test_dual=False),
                BatchNormalization(normalization_time_constant=-1),
                Dense(label_dim)
            ]), [0.0579573500457558, 0.3214986774820327])
            # values with normalization_time_constant=-1 and double precision:
            # [0.0583178503091983, 0.3199431143304898]
            """ with normalization_time_constant=-1:
             Minibatch[   1-   1]: loss = 5.945220 * 67, metric = 100.0% * 67
             Minibatch[   2-   2]: loss = 4.850601 * 63, metric = 79.4% * 63
             Minibatch[   3-   3]: loss = 3.816031 * 68, metric = 57.4% * 68
             Minibatch[   4-   4]: loss = 2.213172 * 70, metric = 41.4% * 70
             Minibatch[   5-   5]: loss = 2.615342 * 65, metric = 40.0% * 65
             Minibatch[   6-   6]: loss = 2.360896 * 62, metric = 25.8% * 62
             Minibatch[   7-   7]: loss = 1.452822 * 58, metric = 27.6% * 58
             Minibatch[   8-   8]: loss = 0.947210 * 70, metric = 10.0% * 70
             Minibatch[   9-   9]: loss = 0.595654 * 59, metric = 10.2% * 59
             Minibatch[  10-  10]: loss = 1.515479 * 64, metric = 23.4% * 64
             Minibatch[  11- 100]: loss = 0.686744 * 5654, metric = 10.4% * 5654
             Minibatch[ 101- 200]: loss = 0.289059 * 6329, metric = 5.8% * 6329
             Minibatch[ 201- 300]: loss = 0.218765 * 6259, metric = 4.7% * 6259
             Minibatch[ 301- 400]: loss = 0.182855 * 6229, metric = 3.5% * 6229
             Minibatch[ 401- 500]: loss = 0.156745 * 6289, metric = 3.4% * 6289
            Finished Epoch [1]: [Training] loss = 0.321413 * 36061, metric = 5.8% * 36061
            --> 0.057818696098277916 0.3214128415043278
             Minibatch[   1-   1]: loss = 0.000000 * 991, metric = 2.5% * 991
             Minibatch[   2-   2]: loss = 0.000000 * 1000, metric = 2.8% * 1000
             Minibatch[   3-   3]: loss = 0.000000 * 992, metric = 4.0% * 992
             Minibatch[   4-   4]: loss = 0.000000 * 989, metric = 3.0% * 989
             Minibatch[   5-   5]: loss = 0.000000 * 998, metric = 3.8% * 998
             Minibatch[   6-   6]: loss = 0.000000 * 995, metric = 1.5% * 995
             Minibatch[   7-   7]: loss = 0.000000 * 998, metric = 2.5% * 998
             Minibatch[   8-   8]: loss = 0.000000 * 992, metric = 1.6% * 992
             Minibatch[   9-   9]: loss = 0.000000 * 1000, metric = 1.6% * 1000
             Minibatch[  10-  10]: loss = 0.000000 * 996, metric = 7.9% * 996
            Finished Epoch [1]: [Evaluation] loss = 0.000000 * 10984, metric = 3.2% * 10984
            --> 0.03159140568099053 0.0
            """

        # BatchNorm test case for global-corpus aggregation
        with default_options(initial_state=0.1):  # inject an option to mimic the BS version identically; remove some day
            run_model_test('BatchNorm global-corpus aggregation', Sequential([
                Embedding(emb_dim),
                BatchNormalization(normalization_time_constant=-1),
                Recurrence(LSTM(hidden_dim), go_backwards=False),
                BatchNormalization(normalization_time_constant=-1),
                Dense(label_dim)
            ]), [0.05662627214996811, 0.2968516879905391])
            """
             Minibatch[   1-   1]: loss = 5.745576 * 67, metric = 100.0% * 67
             Minibatch[   2-   2]: loss = 4.684151 * 63, metric = 90.5% * 63
             Minibatch[   3-   3]: loss = 3.957423 * 68, metric = 63.2% * 68
             Minibatch[   4-   4]: loss = 2.286908 * 70, metric = 41.4% * 70
             Minibatch[   5-   5]: loss = 2.733978 * 65, metric = 38.5% * 65
             Minibatch[   6-   6]: loss = 2.189765 * 62, metric = 30.6% * 62
             Minibatch[   7-   7]: loss = 1.427890 * 58, metric = 25.9% * 58
             Minibatch[   8-   8]: loss = 1.501557 * 70, metric = 18.6% * 70
             Minibatch[   9-   9]: loss = 0.632599 * 59, metric = 13.6% * 59
             Minibatch[  10-  10]: loss = 1.516047 * 64, metric = 23.4% * 64
             Minibatch[  11- 100]: loss = 0.580329 * 5654, metric = 9.8% * 5654
             Minibatch[ 101- 200]: loss = 0.280317 * 6329, metric = 5.6% * 6329
             Minibatch[ 201- 300]: loss = 0.188372 * 6259, metric = 4.1% * 6259
             Minibatch[ 301- 400]: loss = 0.170403 * 6229, metric = 3.9% * 6229
             Minibatch[ 401- 500]: loss = 0.159605 * 6289, metric = 3.4% * 6289
            Finished Epoch [1]: [Training] loss = 0.296852 * 36061, metric = 5.7% * 36061
            --> 0.05662627214996811 0.2968516879905391
             Minibatch[   1-   1]: loss = 0.000000 * 991, metric = 1.8% * 991
             Minibatch[   2-   2]: loss = 0.000000 * 1000, metric = 3.4% * 1000
             Minibatch[   3-   3]: loss = 0.000000 * 992, metric = 3.9% * 992
             Minibatch[   4-   4]: loss = 0.000000 * 989, metric = 4.1% * 989
             Minibatch[   5-   5]: loss = 0.000000 * 998, metric = 4.0% * 998
             Minibatch[   6-   6]: loss = 0.000000 * 995, metric = 1.2% * 995
             Minibatch[   7-   7]: loss = 0.000000 * 998, metric = 2.8% * 998
             Minibatch[   8-   8]: loss = 0.000000 * 992, metric = 2.9% * 992
             Minibatch[   9-   9]: loss = 0.000000 * 1000, metric = 2.0% * 1000
             Minibatch[  10-  10]: loss = 0.000000 * 996, metric = 8.2% * 996
            Finished Epoch [1]: [Evaluation] loss = 0.000000 * 10984, metric = 3.5% * 10984
            --> 0.035050983248361256 0.0
            """


        # plus BatchNorm
        with default_options(initial_state=0.1):  # inject an option to mimic the BS version identically; remove some day
            run_model_test('plus BatchNorm', Sequential([
                Embedding(emb_dim),
                BatchNormalization(),
                Recurrence(LSTM(hidden_dim), go_backwards=False),
                BatchNormalization(),
                Dense(label_dim)
            ]), [0.05662627214996811, 0.2968516879905391])

        # plus lookahead
        with default_options(initial_state=0.1):  # inject an option to mimic the BS version identically; remove some day
            run_model_test('plus lookahead', Sequential([
                Embedding(emb_dim),
                with_lookahead(),
                BatchNormalization(),
                Recurrence(LSTM(hidden_dim), go_backwards=False),
                BatchNormalization(),
                Dense(label_dim)
            ]), [0.057901888466764646, 0.3044637752807047])

        # replace lookahead by bidirectional model
        with default_options(initial_state=0.1):  # inject an option to mimic the BS version identically; remove some day
            run_model_test('replace lookahead by bidirectional model', Sequential([
                Embedding(emb_dim),
                BatchNormalization(),
                BiRecurrence(LSTM(hidden_dim), LSTM(hidden_dim)),
                BatchNormalization(),
                Dense(label_dim)
            ]), [0.0579573500457558, 0.3214986774820327])

        # test of a config like in the example but with additions to test many code paths
        with default_options(enable_self_stabilization=True, use_peepholes=True):
                run_model_test('alternate paths', Sequential([
                Stabilizer(),
                Embedding(emb_dim),
                BatchNormalization(),
                Recurrence(LSTM(hidden_dim, cell_shape=hidden_dim+50), go_backwards=True),
                BatchNormalization(map_rank=1),
                    Dense(label_dim)
                ]), [0.08574360112032389, 0.41847621578367716])

    # test of the example itself
    # this emulates the main code in the PY file
    reader = create_reader(data_dir + "/atis.train.ctf", is_training=True)
    model = create_model()
    loss_avg, evaluation_avg = train(reader, model, max_epochs=1)
    expected_avg = [0.15570838301766451, 0.7846451368305728]
    assert np.allclose([evaluation_avg, loss_avg], expected_avg, atol=TOLERANCE_ABSOLUTE)

    # test of a config like in the example but with additions to test many code paths
    if device_id >= 0: # BatchNormalization currently does not run on CPU
        # Create a path to TensorBoard log directory and make sure it does not exist.
        abs_path = os.path.dirname(os.path.abspath(__file__))
        tb_logdir = os.path.join(abs_path, 'language_understanding_test_log')
        if os.path.exists(tb_logdir):
            shutil.rmtree(tb_logdir)

        reader = create_reader(data_dir + "/atis.train.ctf", is_training=True)
        model = create_test_model()
        loss_avg, evaluation_avg = train(reader, model, max_epochs=1, tensorboard_logdir=tb_logdir)
        log_number_of_parameters(model, trace_level=1) ; print()
        expected_avg = [0.084, 0.407364]
        assert np.allclose([evaluation_avg, loss_avg], expected_avg, atol=TOLERANCE_ABSOLUTE)

        # Ensure that the TensorBoard log directory was created and contains exactly one file with the expected name.
        tb_files = 0
        for tb_file in os.listdir(tb_logdir):
            assert tb_file.startswith("events.out.tfevents")
            tb_files += 1
        assert tb_files == 1
コード例 #50
0
def mem_leak_check(nonlinearity, num_hidden_layers, device_id,
                   minibatch_size=1, num_samples=10000):
    from cntk.cntk_py import always_allow_setting_default_device
    always_allow_setting_default_device()
    C.try_set_default_device(cntk_device(device_id))
    np.random.seed(0)

    learning_rate = 0.5
    lr_schedule = C.learning_rate_schedule(learning_rate)

    hidden_layers_dim = 50

    inp = C.input_variable((input_dim), np.float32)
    label = C.input_variable((num_output_classes), np.float32)

    z = fully_connected_classifier_net(inp, num_output_classes, hidden_layers_dim,
                                       num_hidden_layers, nonlinearity)

    loss = C.cross_entropy_with_softmax(z, label)
    eval_error = C.classification_error(z, label)

    learner = C.sgd(z.parameters, lr_schedule, minibatch_size = 0)
    trainer = C.Trainer(z, (loss, eval_error), [learner])

    num_minibatches_to_train = int(num_samples / minibatch_size)

    mem = np.zeros(num_minibatches_to_train)

    features, labels = generate_random_data_sample(minibatch_size,
                                                   input_dim,
                                                   num_output_classes)

    # Set a maximum fraction of iterations, in which the memory is allowed to
    # increase. Most likely these will be the first training runs.
    # Long-term this test needs to be run in a separate process over a longer
    # period of time.
    MEM_INCREASE_FRACTION_TOLERANCE = 0.01
    # Set a maximum allowed memory increase. This tolerance should not be
    # exceeded when run as a standalone process (simply run this file with the
    # Python executable).
    MEM_INCREASE_TOLERANCE = 10*1024

    dev = cntk_device(device_id)
    i = 0
    proc = os_process()
    while i < num_minibatches_to_train:
        mem[i] = mem_used(proc)

        # Specify the input variables mapping in the model to actual minibatch
        # data for training.
        trainer.train_minibatch({inp: features, label: labels},
                                device=dev)
        i += 1

    mem_deltas = np.diff(mem)
    iterations_with_mem_increase = (mem_deltas > 0).sum()
    mem_inc_fraction = iterations_with_mem_increase/num_minibatches_to_train
    mem_diff = mem[-1] - mem[10]

    if mem_inc_fraction > MEM_INCREASE_FRACTION_TOLERANCE and \
            mem_diff > MEM_INCREASE_TOLERANCE:
        # For the rough leak estimation we take the memory footprint after the
        # dust of the first train_minibatch runs has settled.
        mem_changes = mem_deltas[mem_deltas != 0]
        raise ValueError('Potential memory leak of ~ %i KB (%i%% of MBs '
                         'increased memory usage) detected with %s:\n%s' %
                         (int(mem_diff/1024), int(mem_inc_fraction*100),
                             nonlinearity, mem_changes))
コード例 #51
0
def test_session_restart_from_checkpoint_preserve_all(tmpdir, device_id):
    device = cntk_device(device_id)
    writer = MockProgressWriter()
    t, feature, label = create_sample_model(device, writer)
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)

    input_map = {feature: mbs.streams.features, label: mbs.streams.labels}

    test_dir = str(tmpdir)

    training_session(
        trainer=t,
        mb_source=mbs,
        mb_size=4,
        var_to_stream=input_map,
        max_samples=60,
        progress_frequency=20,
        checkpoint_config=CheckpointConfig(
            frequency=20,
            preserve_all=True,
            filename=str(tmpdir / "restart_from_checkpoint"))).train(device)

    candidates = [
        f for f in listdir(test_dir) if isfile(join(test_dir, f))
        and f.startswith("restart_from_checkpoint")
    ]

    assert ("restart_from_checkpoint0" in candidates)
    assert ("restart_from_checkpoint0.ckp" in candidates)

    assert ("restart_from_checkpoint1" in candidates)
    assert ("restart_from_checkpoint1.ckp" in candidates)

    assert ("restart_from_checkpoint2" in candidates)
    assert ("restart_from_checkpoint2.ckp" in candidates)

    assert ("restart_from_checkpoint" in candidates)
    assert ("restart_from_checkpoint" in candidates)

    # remove everything except for 1
    for f in candidates:
        if f != "restart_from_checkpoint1" and f != "restart_from_checkpoint1.ckp":
            os.remove(str(tmpdir / f))

    # remove information about 1 and 2 epoch from the mock printer
    first_run_minibatch_info = [
        i for i in writer.minibatch_info if i[0] != 0 and i[0] != 1
    ]
    writer.minibatch_info = []
    writer.training_summary_counter = 2

    # restoring from a particular checkpoint and again save everything from the 3 epoch
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)
    training_session(
        trainer=t,
        mb_source=mbs,
        mb_size=4,
        var_to_stream=input_map,
        max_samples=60,
        progress_frequency=20,
        checkpoint_config=CheckpointConfig(
            frequency=20,
            restore=True,
            preserve_all=True,
            filename=str(tmpdir / "restart_from_checkpoint"))).train(device)

    candidates = [
        f for f in listdir(test_dir) if isfile(join(test_dir, f))
        and f.startswith("restart_from_checkpoint")
    ]

    assert ("restart_from_checkpoint1" in candidates)
    assert ("restart_from_checkpoint1.ckp" in candidates)

    assert ("restart_from_checkpoint2" in candidates)
    assert ("restart_from_checkpoint2.ckp" in candidates)

    assert ("restart_from_checkpoint" in candidates)
    assert ("restart_from_checkpoint.ckp" in candidates)

    assert (len(candidates) == 6)
    assert (first_run_minibatch_info == writer.minibatch_info)

    # remove everything except for 1
    for f in candidates:
        if f != "restart_from_checkpoint1" and f != "restart_from_checkpoint1.ckp":
            os.remove(str(tmpdir / f))

    # remove information about 1 and 2 epoch from the mock printer
    writer.minibatch_info = []
    writer.training_summary_counter = 2

    # renaming checkpoint 1 to generic one
    os.rename(str(tmpdir / "restart_from_checkpoint1"),
              str(tmpdir / "restart_from_checkpoint"))
    os.rename(str(tmpdir / "restart_from_checkpoint1.ckp"),
              str(tmpdir / "restart_from_checkpoint.ckp"))

    # restoring from a particular checkpoint and again save everything from the 3 epoch
    mbs = mb_source(tmpdir, "training", max_samples=INFINITELY_REPEAT)
    training_session(
        trainer=t,
        mb_source=mbs,
        mb_size=4,
        var_to_stream=input_map,
        max_samples=60,
        progress_frequency=20,
        checkpoint_config=CheckpointConfig(
            frequency=20,
            restore=True,
            preserve_all=True,
            filename=str(tmpdir / "restart_from_checkpoint"))).train(device)

    candidates = [
        f for f in listdir(test_dir) if isfile(join(test_dir, f))
        and f.startswith("restart_from_checkpoint")
    ]

    assert ("restart_from_checkpoint2" in candidates)
    assert ("restart_from_checkpoint2.ckp" in candidates)

    assert ("restart_from_checkpoint" in candidates)
    assert ("restart_from_checkpoint.ckp" in candidates)

    assert (len(candidates) == 4)
    assert (first_run_minibatch_info == writer.minibatch_info)
コード例 #52
0
keras_zip_name = keras_base_name + '.zip'
keras_zip_path = os.path.join(abs_path, keras_zip_name)
keras_path = os.path.join(abs_path, keras_base_name)

if not os.path.exists(keras_path):
    if not os.path.exists(keras_zip_path):
        if 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ:
            shutil.copy(
                os.path.join(
                    os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'],
                    'Keras', keras_zip_name), abs_path)
        else:
            keras_zip_url = 'https://github.com/fchollet/keras/archive/%s.zip' % (
                keras_version)
            urlretrieve(keras_zip_url, keras_zip_path)
    with zipfile.ZipFile(keras_zip_path) as keras_zip:
        keras_zip.extractall(abs_path)
    # We'll use our own pytest.ini, move original out of the way
    os.rename(os.path.join(keras_path, 'pytest.ini'),
              os.path.join(keras_path, 'pytest.ini.bak'))

cntk_test_device_id = -1 if os.environ.get('TEST_DEVICE',
                                           'cpu') == 'cpu' else 0
cntk.device.try_set_default_device(cntk_device(cntk_test_device_id))

# Files that we can't even import (we don't install all of the dependencies, e.g., tensorflow)
collect_ignore = [
    os.path.join(
        *[keras_base_name, 'tests', 'keras', 'backend', 'backend_test.py'])
]
コード例 #53
0
def test_fasterrcnn_grocery_training_4stage(device_id):
    if cntk_device(device_id).type() != DeviceKind_GPU:
        pytest.skip('test only runs on GPU')  # it runs very slow in CPU
    try_set_default_device(cntk_device(device_id))
    _, _, _ = run_fasterrcnn_grocery_training(e2e=False)
コード例 #54
0
ファイル: trainer_test.py プロジェクト: talktovishal/CNTK
def test_eval_sparse_dense(tmpdir, device_id):
    from cntk import Axis
    from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs
    from cntk.ops import input_variable, times

    input_vocab_dim = label_vocab_dim = 69

    ctf_data = '''\
0	|S0 3:1 |# <s>	|S1 3:1 |# <s>
0	|S0 4:1 |# A	|S1 32:1 |# ~AH
0	|S0 5:1 |# B	|S1 36:1 |# ~B
0	|S0 4:1 |# A	|S1 31:1 |# ~AE
0	|S0 7:1 |# D	|S1 38:1 |# ~D
0	|S0 12:1 |# I	|S1 47:1 |# ~IY
0	|S0 1:1 |# </s>	|S1 1:1 |# </s>
2	|S0 60:1 |# <s>	|S1 3:1 |# <s>
2	|S0 61:1 |# A	|S1 32:1 |# ~AH
'''
    ctf_file = str(tmpdir / '2seqtest.txt')
    with open(ctf_file, 'w') as f:
        f.write(ctf_data)

    mbs = MinibatchSource(CTFDeserializer(
        ctf_file,
        StreamDefs(features=StreamDef(field='S0',
                                      shape=input_vocab_dim,
                                      is_sparse=True),
                   labels=StreamDef(field='S1',
                                    shape=label_vocab_dim,
                                    is_sparse=True))),
                          randomize=False,
                          epoch_size=2)

    batch_axis = Axis.default_batch_axis()
    input_seq_axis = Axis('inputAxis')
    label_seq_axis = Axis('labelAxis')

    input_dynamic_axes = [batch_axis, input_seq_axis]
    raw_input = input_variable(shape=input_vocab_dim,
                               dynamic_axes=input_dynamic_axes,
                               name='raw_input',
                               is_sparse=True)

    mb_valid = mbs.next_minibatch(minibatch_size_in_samples=100,
                                  input_map={raw_input: mbs.streams.features},
                                  device=cntk_device(device_id))

    z = times(raw_input, np.eye(input_vocab_dim))
    e_reader = z.eval(mb_valid, device=cntk_device(device_id))

    # CSR with the raw_input encoding in ctf_data
    one_hot_data = [[3, 4, 5, 4, 7, 12, 1], [60, 61]]
    data = [
        csr(np.eye(input_vocab_dim, dtype=np.float32)[d]) for d in one_hot_data
    ]
    e_csr = z.eval({raw_input: data}, device=cntk_device(device_id))
    assert np.all([np.allclose(a, b) for a, b in zip(e_reader, e_csr)])

    # One-hot with the raw_input encoding in ctf_data
    data = Value.one_hot(one_hot_data,
                         num_classes=input_vocab_dim,
                         device=cntk_device(device_id))
    e_hot = z.eval({raw_input: data}, device=cntk_device(device_id))
    assert np.all([np.allclose(a, b) for a, b in zip(e_reader, e_hot)])
コード例 #55
0
def test_session_restart_from_checkpoint(tmpdir, device_id):
    from os import listdir
    from shutil import copyfile
    from os.path import isfile, join

    device = cntk_device(device_id)
    writer = MockProgressWriter()
    t, feature, label = create_sample_model(device, writer)
    mbs = mb_source(tmpdir, "training", epoch_size=INFINITELY_REPEAT)

    input_map = {feature: mbs.streams.features, label: mbs.streams.labels}

    test_dir = str(tmpdir)

    training_session(trainer=t,
                     mb_source=mbs,
                     mb_size=4,
                     var_to_stream=input_map,
                     max_samples=60,
                     checkpoint_config=CheckpointConfig(
                         frequency=35,
                         preserve_all=True,
                         filename=str(tmpdir / "restart_from_checkpoint")),
                     progress_frequency=35).train(device)

    candidates = [
        f for f in listdir(test_dir) if isfile(join(test_dir, f))
        and f.startswith("restart_from_checkpoint")
    ]

    assert ("restart_from_checkpoint0" in candidates)
    assert ("restart_from_checkpoint0.ckp" in candidates)

    assert ("restart_from_checkpoint1" in candidates)
    assert ("restart_from_checkpoint1.ckp" in candidates)

    assert ("restart_from_checkpoint" in candidates)
    assert ("restart_from_checkpoint" in candidates)

    # rename 0 checkpoint
    copyfile(str(tmpdir / "restart_from_checkpoint0"),
             str(tmpdir / "saved_restart_from_checkpoint0"))
    copyfile(str(tmpdir / "restart_from_checkpoint0.ckp"),
             str(tmpdir / "saved_restart_from_checkpoint0.ckp"))

    # remove everything except for 0
    for f in candidates:
        os.remove(str(tmpdir / f))

    # remove information about 0 epoch from the mock printer
    first_run_minibatch_info = [i for i in writer.minibatch_info if i[0] != 0]
    writer.minibatch_info = []
    writer.training_summary_counter = 1

    # restoring from a particular checkpoint and again save everything from
    # the second epoch

    training_session(trainer=t,
                     mb_source=mbs,
                     mb_size=4,
                     var_to_stream=input_map,
                     max_samples=60,
                     checkpoint_config=CheckpointConfig(
                         frequency=35,
                         restore=True,
                         preserve_all=True,
                         filename=str(tmpdir /
                                      "saved_restart_from_checkpoint0")),
                     progress_frequency=35).train(device)

    candidates = [
        f for f in listdir(test_dir) if isfile(join(test_dir, f))
        and f.startswith("saved_restart_from_checkpoint0")
    ]

    assert ("saved_restart_from_checkpoint00" not in candidates)
    assert ("saved_restart_from_checkpoint00.ckp" not in candidates)

    assert ("saved_restart_from_checkpoint01" in candidates)
    assert ("saved_restart_from_checkpoint01.ckp" in candidates)

    assert ("saved_restart_from_checkpoint0" in candidates)
    assert ("saved_restart_from_checkpoint0.ckp" in candidates)

    assert (first_run_minibatch_info == writer.minibatch_info)