Beispiel #1
0
def cifar_resnet():
    dev = 0
    cntk_dev = cntk_device(dev)
    epoch_size = sys.maxsize
    mbs = create_mb_source(epoch_size)
    stream_infos = mbs.stream_infos()
    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    image_shape = features_si.m_sample_layout.dimensions()
    image_shape = (image_shape[2], image_shape[0], image_shape[1])

    num_classes = labels_si.m_sample_layout.dimensions()[0]

    image_input = variable(image_shape,
                           features_si.m_element_type,
                           needs_gradient=False,
                           name="Images")
    classifier_output = resnet_classifer(image_input, num_classes, dev,
                                         "classifierOutput")
    label_var = variable((num_classes),
                         features_si.m_element_type,
                         needs_gradient=False,
                         name="Labels")

    ce = cross_entropy_with_softmax(classifier_output, label_var)
    pe = classification_error(classifier_output, label_var)

    #TODO: add save and load module code
    image_classifier = combine([ce, pe, classifier_output], "ImageClassifier")

    lr = learning_rates_per_sample(0.0078125)

    mb_size = 32
    num_mbs = 1000

    trainer = Trainer(classifier_output, ce, pe,
                      [sgdlearner(classifier_output.owner.parameters(), lr)])

    for i in range(0, num_mbs):
        mb = mbs.get_next_minibatch(mb_size, cntk_dev)

        arguments = dict()
        arguments[image_input] = mb[features_si].m_data
        arguments[label_var] = mb[labels_si].m_data

        trainer.train_minibatch(arguments, cntk_dev)
        freq = 20
        if i % freq == 0:
            training_loss = get_train_loss(trainer)
            eval_crit = get_train_eval_criterion(trainer)
            print(
                "Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}"
                .format(i, training_loss, eval_crit))
Beispiel #2
0
def ffnet():
    input_dim = 2
    num_output_classes = 2
    num_hidden_layers = 2
    hidden_layers_dim = 50
    epoch_size = sys.maxsize
    minibatch_size = 25
    num_samples_per_sweep = 10000
    num_sweeps_to_train_with = 2
    num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
    lr = learning_rates_per_sample(0.02)
    input = variable((input_dim,), np.float32, needs_gradient=False, name="features")
    label = variable((num_output_classes,), np.float32, needs_gradient=False, name="labels")
    dev = -1
    cntk_dev = cntk_device(dev)
    netout = fully_connected_classifier_net(input, num_output_classes, hidden_layers_dim, num_hidden_layers, dev, sigmoid)

    ce = cross_entropy_with_softmax(netout, label)
    pe = classification_error(netout, label)
    #TODO: add save and load module code
    ffnet = combine([ce, pe, netout], "classifier_model")

    rel_path = r"../../../../Examples/Other/Simple2d/Data/SimpleDataTrain_cntk_text.txt"
    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
    cm = create_text_mb_source(path, input_dim, num_output_classes, epoch_size)

    stream_infos = cm.stream_infos()

    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    trainer = Trainer(netout, ce, pe, [sgdlearner(netout.owner.parameters(), lr)])

    for i in range(0,int(num_minibatches_to_train)):
        mb=cm.get_next_minibatch(minibatch_size, cntk_dev)

        arguments = dict()
        arguments[input] = mb[features_si].m_data
        arguments[label] = mb[labels_si].m_data

        trainer.train_minibatch(arguments, cntk_dev)
        freq = 20
        if i % freq == 0:
            training_loss = get_train_loss(trainer)
            eval_crit = get_train_eval_criterion(trainer)
            print ("Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}".format(i, training_loss, eval_crit))
    # loop over minibatches in the epoch
    while sample_count < num_train_samples:

        minibatch = train_minibatch_source.next_minibatch(min(train_minibatch_size, num_train_samples - sample_count))

        # Specify the mapping of input variables in the model to actual minibatch data to be trained with
        data = {input_vars: minibatch[training_features],
                labels: minibatch[training_labels]}
        trainer.train_minibatch(data)

        sample_count += data[labels].num_samples
        num_minibatch += 1

        # Print the training progress data
        if num_minibatch % training_progress_output_freq == 0:
            training_loss = cntk.get_train_loss(trainer)
            eval_error = cntk.get_train_eval_criterion(trainer)
            t = perf_counter() - training_start_time
            print("(%d s) Epoch %d  |  # of Samples: %6d  |  Loss: %.6f  |  Error: %.6f" % (t, epoch, sample_count, training_loss, eval_error))

t_minute = (perf_counter() - training_start_time) / 60
print("Training Completed in %f minutes." % t_minute , end="\n\n")


'''
-------------------
Classification Test
--------------------
'''
test_minibatch_size = 1000
Beispiel #4
0
def train_sequence_classifier(device):
    input_dim = 2000
    cell_dim = 25
    hidden_dim = 25
    embedding_dim = 50
    num_output_classes = 5

    features = variable(shape=input_dim, is_sparse=True, name="features")
    classifier_output = LSTM_sequence_classifer_net(features,
                                                    num_output_classes,
                                                    embedding_dim, hidden_dim,
                                                    cell_dim, device)

    label = variable(num_output_classes,
                     dynamic_axes=[Axis.default_batch_axis()],
                     name="labels")
    ce = cross_entropy_with_softmax(classifier_output, label)
    pe = classification_error(classifier_output, label)

    #TODO: add save and load module code
    lstm_net = combine([ce, pe, classifier_output], "classifier_model")

    rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
    cm = create_text_mb_source(path, input_dim, num_output_classes, 0, True,
                               False, "x", "y")

    stream_infos = cm.stream_infos()

    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    minibatch_size = 200
    lr = lr = learning_rates_per_sample(0.0005)

    trainer = Trainer(classifier_output, ce, pe,
                      [sgdlearner(classifier_output.owner.parameters(), lr)])

    freq = 1
    i = 0
    cntk_dev = cntk_device(device)
    while True:
        mb = cm.get_next_minibatch(minibatch_size, cntk_dev)
        if len(mb) == 0:
            break
        arguments = dict()
        arguments[features] = mb[features_si].m_data
        arguments[label] = mb[labels_si].m_data

        trainer.train_minibatch(arguments, cntk_dev)

        if i % freq == 0:
            training_loss = get_train_loss(trainer)
            eval_crit = get_train_eval_criterion(trainer)
            print(
                "Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}"
                .format(i, training_loss, eval_crit))

        i += 1