Esempio n. 1
0
def create_plots(trainer: Trainer, name: str):
    test_loss, test_acc = compute_loss_and_accuracy(trainer.dataloader_test,
                                                    trainer.model,
                                                    trainer.loss_criterion)
    print(test_loss, test_acc)
    plot_path = pathlib.Path("plots")
    plot_path.mkdir(exist_ok=True)
    # Save plots and show them
    plt.figure(figsize=(20, 8))
    plt.subplot(1, 2, 1)
    plt.title("Cross Entropy Loss")
    utils.plot_loss(trainer.train_history["loss"],
                    label="Training loss",
                    npoints_to_average=10)
    utils.plot_loss(trainer.validation_history["loss"],
                    label="Validation loss")
    plt.legend()
    plt.subplot(1, 2, 2)
    plt.title("Accuracy")
    utils.plot_loss(trainer.validation_history["accuracy"],
                    label="Validation Accuracy")
    utils.plot_loss(trainer.train_history["accuracy"],
                    label="Training Accuracy")

    print(trainer.train_history["accuracy"].popitem(last=True), " train acc")
    print(trainer.train_history["loss"].popitem(last=True), " train loss")
    plt.legend()
    plt.savefig(plot_path.joinpath(f"{name}_final_.png"))
    plt.show()
Esempio n. 2
0
 def validation_step(self):
     """
         Computes the loss/accuracy for all three datasets.
         Train, validation and test.
     """
     self.model.eval()
     validation_loss, validation_acc = compute_loss_and_accuracy(
         self.dataloader_val, self.model, self.loss_criterion
     )
     self.validation_history["loss"][self.global_step] = validation_loss
     self.validation_history["accuracy"][self.global_step] = validation_acc
     used_time = time.time() - self.start_time
     print(
         f"Epoch: {self.epoch:>1}",
         f"Batches per seconds: {self.global_step / used_time:.2f}",
         f"Global step: {self.global_step:>6}",
         f"Validation Loss: {validation_loss:.2f}",
         f"Validation Accuracy: {validation_acc:.3f}",
         sep=", ")
     self.model.train()
Esempio n. 3
0
    plt.savefig(plot_path.joinpath(f"{name}_plot.png"))
    plt.show()


if __name__ == "__main__":
    # Set the random generator seed (parameters, shuffling etc).
    # You can try to change this and check if you still get the same result!
    utils.set_seed(0)
    epochs = 10
    batch_size = 64
    learning_rate = 5e-2
    early_stop_count = 4
    dataloaders = load_cifar10(batch_size)
    model = ExampleModel(image_channels=3, num_classes=10)
    trainer = Trainer(batch_size, learning_rate, early_stop_count, epochs,
                      model, dataloaders)
    trainer.train()
    create_plots(trainer, "task2")
    _, train_acc = compute_loss_and_accuracy(trainer.dataloader_train,
                                             trainer.model,
                                             trainer.loss_criterion)
    _, val_acc = compute_loss_and_accuracy(trainer.dataloader_val,
                                           trainer.model,
                                           trainer.loss_criterion)
    _, test_acc = compute_loss_and_accuracy(trainer.dataloader_test,
                                            trainer.model,
                                            trainer.loss_criterion)
    print("Train final accuracy = ", train_acc.item())
    print("Validation final accuracy = ", val_acc.item())
    print("Test final accuracy = ", test_acc.item())
Esempio n. 4
0
    # You can try to change this and check if you still get the same result! 
    
    utils.set_seed(0)
    epochs = 10
    batch_size = 64
    learning_rate = 5e-2
    early_stop_count = 4
    dataloaders = load_cifar10(batch_size)
    #train_set,val_set,test_set = dataloaders
    model = ExampleModel(image_channels=3, num_classes=10)
    trainer = Trainer(
        batch_size,
        learning_rate,
        early_stop_count,
        epochs,
        model,
        dataloaders
    )
    trainer.train()
    #trainer.load_best_model()
    test_set = trainer.dataloader_test
    val_set = trainer.dataloader_val
    train_set = trainer.dataloader_train
    
    #create_plots(trainer, "task2_1")
    model.eval()
    print(compute_loss_and_accuracy(dataloader=test_set,model = model, loss_criterion=nn.CrossEntropyLoss()))
    


Esempio n. 5
0
    # You can try to change this and check if you still get the same result!
    utils.set_seed(0)
    epochs = 10
    batch_size = 64
    learning_rate = 5e-2
    early_stop_count = 4
    dataloaders = load_cifar10(batch_size)
    model = ExampleModel(image_channels=3, num_classes=10)
    trainer = Trainer(batch_size, learning_rate, early_stop_count, epochs,
                      model, dataloaders)
    trainer.train()

    create_plots(trainer, "task2")

    # Task 2b
    trainer.load_best_model()
    dataloader_train, dataloader_val, dataloader_test = dataloaders

    train_loss, train_accuracy = compute_loss_and_accuracy(
        dataloader_train, trainer.model, nn.CrossEntropyLoss())
    val_loss, val_accuracy = compute_loss_and_accuracy(dataloader_val,
                                                       trainer.model,
                                                       nn.CrossEntropyLoss())
    test_loss, test_accuracy = compute_loss_and_accuracy(
        dataloader_test, trainer.model, nn.CrossEntropyLoss())

    print("\nAccuracies of best model")
    print("Training accuracy:\t", train_accuracy)
    print("Validation accuracy:\t", val_accuracy)
    print("Test accuracy:\t\t", test_accuracy)
Esempio n. 6
0
    epochs = 10
    batch_size = 64
    learning_rate = 5e-2  # 5e-3
    early_stop_count = 10
    dataloaders = load_cifar10(batch_size)
    model = ConvModel1(image_channels=3, num_classes=10)
    model_nobatch = ConvModel1_nobatch(image_channels=3, num_classes=10)
    trainer = Trainer(batch_size, learning_rate, early_stop_count, epochs,
                      model, dataloaders)
    trainer.train()

    trainer2 = Trainer(batch_size, learning_rate, early_stop_count, epochs,
                       model_nobatch, dataloaders)
    trainer2.train()
    test_loss, test_acc = compute_loss_and_accuracy(trainer.dataloader_test,
                                                    trainer.model,
                                                    trainer.loss_criterion)
    print(test_loss, test_acc)
    plot_path = pathlib.Path("plots")
    plot_path.mkdir(exist_ok=True)
    # Save plots and show them
    plt.figure(figsize=(20, 8))
    plt.subplot(1, 2, 1)
    plt.title("Cross Entropy Loss")
    utils.plot_loss(trainer.train_history["loss"],
                    label="Training loss",
                    npoints_to_average=10)
    utils.plot_loss(trainer.validation_history["loss"],
                    label="Validation loss")

    utils.plot_loss(trainer2.train_history["loss"],
Esempio n. 7
0

if __name__ == "__main__":
    # Set the random generator seed (parameters, shuffling etc).
    # You can try to change this and check if you still get the same result!
    utils.set_seed(0)
    epochs = 10
    batch_size = 64
    learning_rate = 5e-2
    early_stop_count = 4
    dataloaders = load_cifar10(batch_size)
    optimizer = optim.SGD
    weight_decay = 0
    model = ExampleModel(image_channels=3, num_classes=10)
    trainer = Trainer(batch_size, learning_rate, early_stop_count, epochs,
                      model, dataloaders, weight_decay, optimizer)
    trainer.train()
    create_plots(trainer, "task3_1")
    dataloader_train, dataloader_val, dataloader_test = dataloaders
    model.eval()
    loss, train_acc = compute_loss_and_accuracy(dataloader_train, model,
                                                torch.nn.CrossEntropyLoss())
    loss, val_acc = compute_loss_and_accuracy(dataloader_val, model,
                                              torch.nn.CrossEntropyLoss())
    loss, test_acc = compute_loss_and_accuracy(dataloader_test, model,
                                               torch.nn.CrossEntropyLoss())

    print("train ", train_acc)
    print("val ", val_acc)
    print("test ", test_acc)
Esempio n. 8
0
        trainer1.dataloader_test, trainer1.model, trainer1.loss_criterion
    )
    print("Final train accuracy = ", train_acc.item())
    print("Final validation accuracy = ", val_acc.item())
    print("Final test accuracy = ", test_acc.item())"""

    # Network 2:
    print("Network 2:")
    model2 = Net2(image_channels=3, num_classes=10)
    trainer2 = Trainer(batch_size, learning_rate, early_stop_count, epochs,
                       model2, dataloaders)
    trainer2.train()
    trainer2.load_best_model()
    #create_plots(trainer2, "task3_net2")
    create_plots(trainer2, "task3_net2_improved")

    _, train_acc = compute_loss_and_accuracy(trainer2.dataloader_train,
                                             trainer2.model,
                                             trainer2.loss_criterion)
    """_, val_acc = compute_loss_and_accuracy(
        trainer2.dataloader_val, trainer2.model, trainer2.loss_criterion
    )
    _, test_acc = compute_loss_and_accuracy(
        trainer2.dataloader_test, trainer2.model, trainer2.loss_criterion
    )"""
    print("Final train accuracy = ", train_acc.item())
    print("Final validation accuracy = ",
          trainer2.validation_history["accuracy"].popitem()[1].item())
    print("Final test accuracy = ",
          trainer2.test_history["accuracy"].popitem()[1].item())