def train(cl, dataset):

    state = dict(
        (key, val)
        for key, val in cl.model.state_dict().items()
        if key.endswith(".weight"))
    test_history, train_history = cl.training(
        manager=manager,
        nb_epochs=5,
        checkpointdir=None,
        fold_index=0,
        with_validation=False)
    train_state = dict(
        (key, val)
        for key, val in cl.model.state_dict().items()
        if key.endswith(".weight"))
    for key, val in state.items():
        if not np.allclose(val, train_state[key]):
            print("--", key)

    idx = 0
    y_pred_prob, X, y_true, loss, values = cl.testing(
        manager=manager,
        with_logit=True,
        predict=False)
    y_pred = np.argmax(y_pred_prob, axis=1)
    print(" ** true label      : ", y_true[idx])
    print(" ** predicted label : ", y_pred[idx])
    titles = ["{0}-{1}".format(data.labels[it1], data.labels[it2])
              for it1, it2 in zip(y_pred, y_true)]
    plot_data(X, labels=titles, nb_samples=5)
    plot_history(train_history)
Example #2
0
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=net.optimizer,
                                           mode="min",
                                           factor=0.5,
                                           patience=4,
                                           verbose=True,
                                           min_lr=1e-7)
train_history, valid_history = net.training(
    manager=manager,
    nb_epochs=(1 if "CI_MODE" in os.environ else 150000),
    checkpointdir=None,  # outdir,
    fold_index=0,
    scheduler=scheduler,
    with_validation=True)
print(train_history)
print(valid_history)
plot_history(train_history)

#############################################################################
# Testing
# -------
#
# Finaly use the testing set and check the results.

y_pred, X, y_true, loss, values = net.testing(manager=manager,
                                              with_logit=False,
                                              predict=False,
                                              concat_layer_outputs=["flow"])
print(y_pred.shape, X.shape, y_true.shape)
# y_pred = np.expand_dims(y_pred, axis=1)
# data = np.concatenate((y_pred, y_true, X), axis=1)
# plot_data(data, nb_samples=5)
Example #3
0
test_history, train_history = cl.training(manager=manager,
                                          nb_epochs=3,
                                          checkpointdir="/tmp/pynet",
                                          fold_index=0,
                                          with_validation=True)

#############################################################################
# You can reload the optimization history at any time and any step.

from pprint import pprint
from pynet.history import History
from pynet.plotting import plot_history

history = History.load("/tmp/pynet/train_0_epoch_2.pkl")
print(history)
plot_history(history)

#############################################################################
# And now predict the labels on the test set.

import numpy as np
from sklearn.metrics import classification_report
from pynet.plotting import plot_data

y_pred, X, y_true, loss, values = cl.testing(manager=manager,
                                             with_logit=True,
                                             predict=True)
pprint(data.labels)
print(classification_report(y_true, y_pred, target_names=data.labels.values()))
titles = [
    "{0}-{1}".format(data.labels[it1], data.labels[it2])