コード例 #1
0
ファイル: nn.py プロジェクト: hahatt/CNTK
def print_training_progress(trainer, mb, frequency):

    if mb % frequency == 0:
        training_loss = get_train_loss(trainer)
        eval_crit = get_train_eval_criterion(trainer)
        print("Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}".format(
            mb, training_loss, eval_crit))
コード例 #2
0
def print_training_progress(trainer, mb, frequency):

    if mb % frequency == 0:
        training_loss = get_train_loss(trainer)
        eval_crit = get_train_eval_criterion(trainer)
        print("Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}".
              format(mb, training_loss, eval_crit))
コード例 #3
0
def print_training_progress(trainer, mb, frequency):
    training_loss = "NA"
    eval_error = "NA"

    if mb%frequency == 0:
        training_loss = get_train_loss(trainer)
        eval_error = get_train_eval_criterion(trainer)

    return mb, training_loss, eval_error
コード例 #4
0
def print_training_progress(trainer, mb, frequency):
    training_loss = "NA"
    eval_error = "NA"

    if mb % frequency == 0:
        training_loss = get_train_loss(trainer)
        eval_error = get_train_eval_criterion(trainer)

    return mb, training_loss, eval_error
コード例 #5
0
ファイル: mnist_logistic_regression.py プロジェクト: m3rik/nn
def print_training_progress(trainer, mb, frequency, verbose=1):
    training_loss, eval_error = "NA", "NA"

    if mb % frequency == 0:
        training_loss = get_train_loss(trainer)
        eval_error = get_train_eval_criterion(trainer)
        if verbose:
            print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}".format(mb, training_loss, eval_error))

    return mb, training_loss, eval_error
コード例 #6
0
TtrainS = datetime.datetime.now()
for epoch in range(1, EPOCH + 1):
    # Specify the input variables mapping in the model to actual minibatch data for training
    for i in range(int(len(X_train) / minibatch_size)):
        #i=0
        trainer.train_minibatch({
            Xs:
            X_train[i * minibatch_size:(i + 1) * minibatch_size],
            ys:
            y_train[i * minibatch_size:(i + 1) * minibatch_size]
        })
    #trainer.train_minibatch({Xs:X_train, ys: y_train})

    if (epoch % SHOW_FREQ == 0):
        cur_loss = get_train_loss(trainer)
        acc = get_train_eval_criterion(trainer)
        print("{}/{}, loss = {}, acc = {}".format(epoch, EPOCH, cur_loss,
                                                  1 - acc))
        #print("{}/{}, loss = {}".format(epoch, EPOCH, trainer.test_minibatch({Xs : X_train[:20], ys : y_train[:20]}) ))

    if (epoch % TEST_FREQ == 0):
        print(1 - trainer.test_minibatch({Xs: X_test1, ys: y_test1}))
        acc2 = 1 - trainer.test_minibatch({Xs: X_test2, ys: y_test2})
        print(acc2)
        acc_all.append(acc2)
        if acc2 > max_acc2:
            max_acc2 = acc2
TtrainE = datetime.datetime.now()

plt.figure(figsize=(15, 9))
plt.plot(np.array(range(len(acc_all))) * 20, acc_all, linewidth=1.5)
コード例 #7
0
def train(train_reader, valid_reader, vocab, i2w, model, max_epochs):

    # do some hooks that we won't need in the future
    label_sequence = model.find_by_name('label_sequence')
    decoder_history_hook = model.find_by_name('decoder_history_hook')

    # Criterion nodes
    ce = cross_entropy_with_softmax(model, label_sequence)
    errs = classification_error(model, label_sequence)

    def clone_and_hook():
        # network output for decoder history
        net_output = hardmax(model)

        # make a clone of the graph where the ground truth is replaced by the network output
        return model.clone(CloneMethod.share,
                           {decoder_history_hook.output: net_output.output})

    # get a new model that uses the past network output as input to the decoder
    new_model = clone_and_hook()

    # Instantiate the trainer object to drive the model training
    lr_per_sample = learning_rate_schedule(0.007, UnitType.sample)
    minibatch_size = 72
    momentum_time_constant = momentum_as_time_constant_schedule(1100)
    clipping_threshold_per_sample = 2.3
    gradient_clipping_with_truncation = True
    learner = momentum_sgd(
        model.parameters,
        lr_per_sample,
        momentum_time_constant,
        gradient_clipping_threshold_per_sample=clipping_threshold_per_sample,
        gradient_clipping_with_truncation=gradient_clipping_with_truncation)
    trainer = Trainer(model, ce, errs, learner)

    # Get minibatches of sequences to train with and perform model training
    i = 0
    mbs = 0

    # Set epoch size to a larger number of lower training error
    epoch_size = 5000 if isFast else 908241

    training_progress_output_freq = 100

    # bind inputs to data from readers
    train_bind = {
        find_arg_by_name('raw_input', model): train_reader.streams.features,
        find_arg_by_name('raw_labels', model): train_reader.streams.labels
    }
    valid_bind = {
        find_arg_by_name('raw_input', new_model):
        valid_reader.streams.features,
        find_arg_by_name('raw_labels', new_model): valid_reader.streams.labels
    }

    for epoch in range(max_epochs):
        loss_numer = 0
        metric_numer = 0
        denom = 0

        while i < (epoch + 1) * epoch_size:
            # get next minibatch of training data
            mb_train = train_reader.next_minibatch(minibatch_size,
                                                   input_map=train_bind)
            trainer.train_minibatch(mb_train)

            # collect epoch-wide stats
            samples = trainer.previous_minibatch_sample_count
            loss_numer += trainer.previous_minibatch_loss_average * samples
            metric_numer += trainer.previous_minibatch_evaluation_average * samples
            denom += samples

            # every N MBs evaluate on a test sequence to visually show how we're doing; also print training stats
            if mbs % training_progress_output_freq == 0:

                print(
                    "Minibatch: {0}, Train Loss: {1:2.3f}, Train Evaluation Criterion: {2:2.3f}"
                    .format(mbs, get_train_loss(trainer),
                            get_train_eval_criterion(trainer)))

                mb_valid = valid_reader.next_minibatch(minibatch_size,
                                                       input_map=valid_bind)
                e = new_model.eval(mb_valid)
                print_sequences(e, i2w)

            i += mb_train[find_arg_by_name('raw_labels', model)].num_samples
            mbs += 1

        print("--- EPOCH %d DONE: loss = %f, errs = %f ---" %
              (epoch, loss_numer / denom, 100.0 * (metric_numer / denom)))
        return 100.0 * (metric_numer / denom)
コード例 #8
0
ファイル: mnist.py プロジェクト: itoupeter/CNTK
lr_schedule = learning_rate_schedule(learning_rate, UnitType.minibatch)
learner = sgd(z.parameters, lr_schedule)
trainer = Trainer(z, (loss, error), [learner])

# training
num_sweeps = 13
minibatch_size = 50
num_train_samples = 60000
num_test_samples = 10000

for i in range(num_sweeps):
    for j in range(0, num_train_samples, minibatch_size):
        trainer.train_minibatch({
            input: train_features[j:j + minibatch_size, :],
            label: train_labels[j:j + minibatch_size, :]
        })
    train_error = get_train_eval_criterion(trainer)

    test_error = 0.
    for j in range(0, num_test_samples, minibatch_size):
        test_data = {
            input: test_features[j:j + minibatch_size, :],
            label: test_labels[j:j + minibatch_size, :]
        }
        test_error = test_error + trainer.test_minibatch(test_data)
    test_error = test_error / (num_test_samples / minibatch_size)

    print('sweep {0} train error: {1:.4f} test error: {2:.4f}'.format(
        i, train_error, test_error),
          flush=True)
コード例 #9
0
def save_metrics(trainer, filename):
    training_loss = get_train_loss(trainer)
    eval_error = get_train_eval_criterion(trainer)
    f = open(filename, 'w')
    f.write("Loss: {0:.4f}, Error: {1:.2f}%".format(training_loss,
                                                    eval_error * 100))