def main():
    # data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('autoencoder.classifier.num_classes'))

    ae_classifier = AutoencoderClassifier(config('autoencoder.ae_repr_dim'),
                                          config('autoencoder.classifier.num_classes'))
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(ae_classifier.parameters(),
                                 lr=config('autoencoder.classifier.learning_rate'))

    # freeze the weights of the encoder
    for name, param in ae_classifier.named_parameters():
        if 'fc1.' in name or 'fc2.' in name:
            param.requires_grad = False

    # Attempts to restore the latest checkpoint if exists
    print('Loading autoencoder...')
    ae_classifier, _, _ = restore_checkpoint(ae_classifier,
                                             config('autoencoder.checkpoint'), force=True, pretrain=True)
    print('Loading autoencoder classifier...')
    ae_classifier, start_epoch, stats = restore_checkpoint(ae_classifier,
                                                           config('autoencoder.classifier.checkpoint'))

    axes = utils.make_cnn_training_plot(name='Autoencoder Classifier')

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, ae_classifier, criterion,
                    start_epoch, stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('autoencoder.classifier.num_epochs')):
        # Train model
        _train_epoch(tr_loader, ae_classifier, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, ae_classifier, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(ae_classifier, epoch + 1,
                        config('autoencoder.classifier.checkpoint'), stats)

    print('Finished Training')
    with torch.no_grad():
        y_true, y_pred = [], []
        correct, total = 0, 0
        running_loss = []
        for X, y in va_loader:
            output = ae_classifier(X)
            predicted = predictions(output.data)
            y_true.extend(y)
            y_pred.extend(predicted)
        print("Validation data accuracies:")
        print(confusion_matrix(y_true, y_pred))


    # Keep plot open
    utils.save_cnn_training_plot(name='ae_clf')
    utils.hold_training_plot()
Beispiel #2
0
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-4)
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config('cnn.checkpoint'))

    axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
                    stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    y_true, y_pred = [], []
    correct, total = 0, 0
    running_loss = []
    for X, y in va_loader:
        with torch.no_grad():
            output = model(X)
            predicted = predictions(output.data)
            y_true.extend(y)
            y_pred.extend(predicted)
            total += y.size(0)
            correct += (predicted == y).sum().item()
            running_loss.append(criterion(output, y).item())
    print("Validation data accuracies:")
    print(confusion_matrix(y_true, y_pred))

    # Save figure and keep plot open
    utils.save_cnn_training_plot()
    utils.hold_training_plot()
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, get_semantic_labels = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    params = list(model.conv1.parameters()) + list(
        model.conv2.parameters()) + list(model.conv3.parameters())
    params = params + list(model.fc1.parameters()) + list(
        model.fc2.parameters()) + list(model.fc3.parameters())
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params, lr=0.0001)
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config('cnn.checkpoint'))

    fig, axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
                    stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    model, _, _ = restore_checkpoint(model, config('cnn.checkpoint'))

    dataset = get_data_by_label(va_loader)
    evaluate_cnn(dataset, model, criterion, get_semantic_labels)

    # Save figure and keep plot open
    utils.save_cnn_training_plot(fig)
    utils.hold_training_plot()
def main():
    print('building model...')
    images, labels = supervised_placeholders()
    logits = cnn(images)
    acc = accuracy(labels, logits)
    loss = cross_entropy_loss(labels, logits)
    train_op = supervised_optimizer(loss)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver, save_path = utils.restore(sess, get('cnn.checkpoint'))
        clothes = ClothesDataset(get('cnn.num_classes'))
        train_cnn(sess, saver, save_path, images, labels, loss, train_op, acc,
                  clothes)
        print('saving trained model...\n')
        saver.save(sess, save_path)
        utils.hold_training_plot()
Beispiel #5
0
def main():
    print('building model...')
    images, labels, keep_prob = placeholders()
    logits = cnn(images, keep_prob)
    acc = accuracy(labels, logits)
    loss = cross_entropy_loss(labels, logits)
    train_op = optimizer(loss)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver, save_path = utils.restore(sess, './checkpoints/cnn/')
        dataset = Dataset()
        report_test_accuracy(sess, images, labels, keep_prob, acc, dataset)
        train_cnn(sess, saver, save_path, images, labels, keep_prob, loss,
                  train_op, acc, dataset)
        print('saving trained model...\n')
        saver.save(sess, save_path)
        utils.hold_training_plot()
Beispiel #6
0
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    import torch.optim as op
    import torch.nn as nn
    criterion = nn.CrossEntropyLoss()
    optimizer = op.Adam(model.parameters(), lr=config('cnn.learning_rate'))
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
        config('cnn.checkpoint'))

    axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
        stats)
    
    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)
        
        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, epoch+1,
            stats)

        # Save model parameters
        save_checkpoint(model, epoch+1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    # Save figure and keep plot open
    utils.save_cnn_training_plot()
    utils.hold_training_plot()
Beispiel #7
0
def main(ff_type, bi, input_size):
    print('======building model...======')
    if ff_type == 'magnitude':
        images, labels = regressor_placeholders(input_size * input_size, 1)
        if bi == 'true':
            logits = birnn(images, dim_out=1, partition=ff_type)
        else:
            logits = rnn(images,
                         lstm_size=256,
                         dim_out=1,
                         num_layers=3,
                         partition=ff_type)
        loss = mean_squared_error(labels, logits)
    else:
        images, labels = regressor_placeholders(input_size * input_size * 2, 2)
        if bi == 'true':
            logits = birnn(images, dim_out=2, partition=ff_type)
        else:
            logits = rnn(images,
                         lstm_size=256,
                         dim_out=2,
                         num_layers=2,
                         partition=ff_type)
        loss = mean_squared_error(labels, logits)
    acc = regressor_accuracy(labels, logits, partition=ff_type)
    train_op = unsupervised_optimizer(loss, lr=3e-4)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if bi == 'true':
            saver, save_path = utils.restore(
                sess, get('birnn.' + ff_type + '_checkpoint'))
            data = RNNDataSet(input_size=input_size,
                              partition=ff_type,
                              bidirection=True)
        else:
            saver, save_path = utils.restore(
                sess, get('rnn.' + ff_type + '_checkpoint'))
            data = RNNDataSet(input_size=input_size, partition=ff_type)
        train_rnn(sess, saver, save_path, images, labels, loss, train_op, acc,
                  data)
        print('=======saving trained model...======\n')
        saver.save(sess, save_path)
        utils.hold_training_plot()
Beispiel #8
0
def main():
    # data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('autoencoder.num_classes'))

    # Model
    model = Autoencoder(config('autoencoder.ae_repr_dim'))

    # TODO: define loss function, and optimizer
    criterion = torch.nn.MSELoss()
    params = list(model.pool.parameters()) + list(
        model.fc1.parameters()) + list(model.fc2.parameters())
    params = params + list(model.fc3.parameters()) + list(
        model.deconv.parameters())
    optimizer = torch.optim.Adam(params, lr=0.0001)
    #

    # Attempts to restore the latest checkpoint if exists
    print('Loading autoencoder...')
    model, start_epoch, stats = restore_checkpoint(
        model, config('autoencoder.checkpoint'))

    fig, axes = utils.make_ae_training_plot()

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('autoencoder.num_epochs')):
        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion,
                        epoch + 1, stats)

        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)
        _train_epoch(te_loader, model, criterion, optimizer)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config('autoencoder.checkpoint'),
                        stats)

    print('Finished Training')

    # Save figure and keep plot open
    utils.save_ae_training_plot(fig)
    utils.hold_training_plot()
def main():
    # data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('challenge.num_classes'))

    # TODO: define model, loss function, and optimizer
    model = Challenge()
    params = list(model.conv1.parameters()) + list(model.conv2.parameters()) + list(model.conv3.parameters())
    params = params + list(model.fc1.parameters()) + list(model.fc2.parameters()) + list(model.fc3.parameters())
    params = params + list(model.fc4.parameters()) + list(model.fc5.parameters())
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params, lr=config('challenge.learning_rate'))
    #

    # Attempts to restore the latest checkpoint if exists
    print('Loading challenge...')
    model, start_epoch, stats = restore_checkpoint(model,
        config('challenge.checkpoint'))

    fig, axes = utils.make_cnn_training_plot(name='Challenge')

    # Evaluate model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
        stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('challenge.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, epoch+1,
            stats)

        # Save model parameters
        save_checkpoint(model, epoch+1, config('challenge.checkpoint'), stats)

    print('Finished Training')

    # Keep plot open
    utils.save_cnn_training_plot(fig, name='challenge')
    utils.hold_training_plot()
Beispiel #10
0
def train(tr_loader, va_loader, te_loader, model, model_name, num_layers=0):
    """Train transfer learning model."""
    #TODO: define loss function, and optimizer
    criterion = 
    optimizer = 
    #

    print("Loading target model with", num_layers, "layers frozen")
    model, start_epoch, stats = restore_checkpoint(model, model_name)

    axes = utils.make_training_plot("Target Training")

    evaluate_epoch(
        axes,
        tr_loader,
        va_loader,
        te_loader,
        model,
        criterion,
        start_epoch,
        stats,
        include_test=True,
    )

    # initial val loss for early stopping
    prev_val_loss = stats[0][1]

    #TODO: patience for early stopping
    patience = 
    curr_patience = 
    #

    # Loop over the entire dataset multiple times
    epoch = start_epoch
    while curr_patience < patience:
        # Train model
        train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        evaluate_epoch(
            axes,
            tr_loader,
            va_loader,
            te_loader,
            model,
            criterion,
            epoch + 1,
            stats,
            include_test=True,
        )

        # Save model parameters
        save_checkpoint(model, epoch + 1, model_name, stats)

        curr_patience, prev_val_loss = early_stopping(
            stats, curr_patience, prev_val_loss
        )
        epoch += 1

    print("Finished Training")

    # Keep plot open
    utils.save_tl_training_plot(num_layers)
    utils.hold_training_plot()
Beispiel #11
0
def main():
    filename = config("savefilename")
    lr = 0.0001

    this_config = dict(csv_file=config("csv_file"),
                       img_path=config("image_path"),
                       learning_rate=lr,
                       num_classes=4,
                       batchsize=64)

    wandb.init(project="prob_fix", name=filename, config=this_config)

    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        task="default", batch_size=config("net.batch_size"))

    print('successfully loading!')

    model = Source()
    criterion = torch.nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    print("Number of float-valued parameters:", count_parameters(model))

    model, start_epoch, stats = restore_checkpoint(model,
                                                   config("cnn.checkpoint"))

    axes = utils.make_training_plot()
    prolist = []

    evaluate_epoch(axes,
                   tr_loader,
                   va_loader,
                   te_loader,
                   model,
                   criterion,
                   start_epoch,
                   stats,
                   prolist,
                   multiclass=True)

    # initial val loss for early stopping
    prev_val_loss = stats[0][1]

    # TODO: define patience for early stopping
    patience = 5
    curr_patience = 0
    #

    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch

    lowest_val_loss = 1
    train_auroc = 0
    test_auroc = 0
    lowest_round = epoch
    while curr_patience < patience:
        # Train model
        train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        evaluate_epoch(axes,
                       tr_loader,
                       va_loader,
                       te_loader,
                       model,
                       criterion,
                       epoch + 1,
                       stats,
                       prolist,
                       multiclass=True)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config("net.checkpoint"), stats)

        # update early stopping parameters
        curr_patience, prev_val_loss = early_stopping(stats, curr_patience,
                                                      prev_val_loss)

        epoch += 1
        if (prev_val_loss < lowest_val_loss):
            lowest_val_loss = prev_val_loss
            lowest_round = epoch

    pickle.dump(prolist, open("base_pro.pck", "wb"))
    print("Finished Training")
    # Save figure and keep plot open
    print("the lowest round: ", lowest_round)
    # utils.save_cnn_training_plot()
    # utils.save_cnn_other()
    utils.hold_training_plot()
Beispiel #12
0
def main():
    # data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('autoencoder.classifier.num_classes'))

    ae_classifier = AutoencoderClassifier(config('autoencoder.ae_repr_dim'),
        config('autoencoder.classifier.num_classes'))
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(ae_classifier.parameters(),
        lr=config('autoencoder.classifier.learning_rate'))

    # freeze the weights of the encoder
    for name, param in ae_classifier.named_parameters():
        if 'fc1.' in name or 'fc2.' in name:
            param.requires_grad = False

    # Attempts to restore the latest checkpoint if exists
    print('Loading autoencoder...')
    ae_classifier, _, _ = restore_checkpoint(ae_classifier,
        config('autoencoder.checkpoint'), force=True, pretrain=True)
    print('Loading autoencoder classifier...')
    ae_classifier, start_epoch, stats = restore_checkpoint(ae_classifier,
        config('autoencoder.classifier.checkpoint'))

    fig, axes = utils.make_cnn_training_plot(name='Autoencoder Classifier')

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, ae_classifier, criterion,
        start_epoch, stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('autoencoder.classifier.num_epochs')):
        # Train model
        _train_epoch(tr_loader, ae_classifier, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, ae_classifier, criterion,
            epoch+1, stats)

        #accuracy
        if epoch == start_epoch:
            r = [[], [], [], [], []]
            for X, y in va_loader:
                with torch.no_grad():
                    output = ae_classifier(X)
                    predict_res = predictions(output.data)
                    for y_sub, pred_out in zip(y, predict_res):
                        r[y_sub.item()].append(pred_out == y_sub)

            for i in range(0,5):
                print("Class ", i, "gives accuracy", np.sum(np.array(r[i]) / len(r[i])))

        # Save model parameters
        save_checkpoint(ae_classifier, epoch+1,
            config('autoencoder.classifier.checkpoint'), stats)

    print('Finished Training')

    # Keep plot open
    utils.save_cnn_training_plot(fig, name='ae_clf')
    utils.hold_training_plot()
def main():
    # Data loaders
    if check_for_augmented_data("./data"):
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="augment",
            batch_size=config("challenge.batch_size"),
        )
    else:
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="target",
            batch_size=config("challenge.batch_size"),
        )
    # Model
    model = Challenge()

    # TODO: define loss function, and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    #

    # Attempts to restore the latest checkpoint if exists
    print("Loading challenge...")
    model, start_epoch, stats = restore_checkpoint(model, config("challenge.checkpoint"))

    axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(
        axes, tr_loader, va_loader, te_loader, model, criterion, start_epoch, stats
    )

    # initial val loss for early stopping
    prev_val_loss = stats[0][1]

    #TODO: define patience for early stopping
    patience = 5
    curr_patience = 0
    #

    # Loop over the entire dataset multiple times
    epoch = start_epoch
    while curr_patience < patience:
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(
            axes, tr_loader, va_loader, te_loader, model, criterion, epoch + 1, stats
        )

        # Save model parameters
        save_checkpoint(model, epoch + 1, config("challenge.checkpoint"), stats)

        #TODO: Implement early stopping
        curr_patience, prev_val_loss = early_stopping(
            stats, curr_patience, prev_val_loss
        )
        #
        epoch += 1
    print("Finished Training")
    # Save figure and keep plot open
    utils.save_challenge_training_plot()
    utils.hold_training_plot()
Beispiel #14
0
def main():
    """Train source model on multiclass data."""
    # Data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        task="source",
        batch_size=config("source.batch_size"),
    )

    # Model
    model = Source()

    # TODO: define loss function, and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=1e-3,
                                 weight_decay=0.01)
    #

    print("Number of float-valued parameters:", count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print("Loading source...")
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config("source.checkpoint"))

    axes = utils.make_training_plot("Source Training")

    # Evaluate the randomly initialized model
    evaluate_epoch(
        axes,
        tr_loader,
        va_loader,
        te_loader,
        model,
        criterion,
        start_epoch,
        stats,
        multiclass=True,
    )

    # initial val loss for early stopping
    prev_val_loss = stats[0][1]

    # TODO: patience for early stopping
    patience = 10
    curr_patience = 0
    #

    # Loop over the entire dataset multiple times
    epoch = start_epoch
    while curr_patience < patience:
        # Train model
        train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        evaluate_epoch(
            axes,
            tr_loader,
            va_loader,
            te_loader,
            model,
            criterion,
            epoch + 1,
            stats,
            multiclass=True,
        )

        # Save model parameters
        save_checkpoint(model, epoch + 1, config("source.checkpoint"), stats)

        curr_patience, prev_val_loss = early_stopping(stats, curr_patience,
                                                      prev_val_loss)
        epoch += 1

    # Save figure and keep plot open
    print("Finished Training")
    utils.save_source_training_plot()
    utils.hold_training_plot()
Beispiel #15
0
def main():
    """Train CNN and show training plots."""
    # Data loaders
    if check_for_augmented_data("./data"):
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="target", batch_size=config("cnn.batch_size"), augment=True)
    else:
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="target",
            batch_size=config("cnn.batch_size"),
        )
    # Model
    model = Target()

    # TODO: define loss function, and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    #

    print("Number of float-valued parameters:", count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print("Loading cnn...")
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config("cnn.checkpoint"))

    axes = utils.make_training_plot()

    # Evaluate the randomly initialized model
    evaluate_epoch(axes, tr_loader, va_loader, te_loader, model, criterion,
                   start_epoch, stats)

    # initial val loss for early stopping
    prev_val_loss = stats[0][1]

    # TODO: define patience for early stopping
    patience = 5
    curr_patience = 0
    #

    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch
    while curr_patience < patience:
        # Train model
        train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        evaluate_epoch(axes, tr_loader, va_loader, te_loader, model, criterion,
                       epoch + 1, stats)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config("cnn.checkpoint"), stats)

        # update early stopping parameters
        curr_patience, prev_val_loss = early_stopping(stats, curr_patience,
                                                      prev_val_loss)

        epoch += 1
    print("Finished Training")
    # Save figure and keep plot open
    utils.save_cnn_training_plot()
    utils.hold_training_plot()