Exemple #1
0
def train(model,
          optimizer,
          dataset,
          n_epoch,
          batch_size,
          use_gpu=True,
          scheduler=None,
          criterion=None):
    history = History()

    if criterion is None:
        criterion = nn.CrossEntropyLoss()

    dataset.transform = ToTensor()
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        start = time.time()
        do_epoch(criterion, model, optimizer, scheduler, train_loader, use_gpu)
        end = time.time()

        train_acc, train_loss = validate(model, train_loader, use_gpu)
        val_acc, val_loss = validate(model, val_loader, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss,
                     optimizer.param_groups[0]['lr'])
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f} - Training time: {:.2f}s'
            .format(i, train_acc, val_acc, train_loss, val_loss, end - start))

    return history
Exemple #2
0
def train(network, optimizer, dataset, n_epoch, batch_size, *, use_gpu=True, criterion=None, callbacks=None):
    """
    Entraîne un réseau de neurones PyTorch avec Poutyne. On suppose que la sortie du réseau est compatible avec
    la fonction cross-entropy de PyTorch pour calculer l'exactitude (accuracy).

    Args:
        network (nn.Module): Un réseau de neurones PyTorch
        optimizer (torch.optim.Optimizer): Un optimiseur PyTorch
        dataset (torch.utils.data.Dataset): Un jeu de données PyTorch
        n_epoch (int): Le nombre d'epochs d'entraînement désiré
        batch_size (int): La taille de batch désirée
        use_gpu (bool): Si on veut utiliser le GPU. Est vrai par défaut. Un avertissement est lancé s'il n'y a pas de
            GPU.
        criterion: Une fonction de perte compatible avec la cross-entropy de PyTorch.
        callbacks (List[poutyne.Callback]): Une liste de callbacks de Poutyne (utile pour les horaires d'entrainement
            entre autres).

    Returns:
        Retourne un objet de type `deeplib.history.History` contenant l'historique d'entraînement.
    """
    history_callback = HistoryCallback()
    callbacks = [history_callback] if callbacks is None else [history_callback] + callbacks

    dataset.transform = ToTensor()
    train_loader, valid_loader = train_valid_loaders(dataset, batch_size=batch_size)

    model = get_model(network, optimizer, criterion, use_gpu=use_gpu)
    model.fit_generator(train_loader,
                        valid_loader,
                        epochs=n_epoch,
                        progress_options=dict(coloring=False),
                        callbacks=callbacks)

    return history_callback.history
def train(model,
          optimizer,
          dataset,
          n_epoch,
          batch_size,
          use_gpu=True,
          scheduler=None,
          criterion=None):
    history = History()

    if criterion is None:
        criterion = nn.CrossEntropyLoss()

    dataset.transform = transforms.Compose([
        transforms.Resize([wanted_size, wanted_size]),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        start = time.time()
        do_epoch(criterion, model, optimizer, scheduler, train_loader, use_gpu)
        end = time.time()

        train_acc, train_loss, covid_recall_train, covid_accuracy_train = validate(
            model, train_loader, use_gpu)
        val_acc, val_loss, covid_recall_valid, covid_accuracy_valid = validate(
            model, val_loader, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss,
                     optimizer.param_groups[0]['lr'], i, model,
                     covid_recall_train, covid_recall_valid,
                     covid_accuracy_train, covid_accuracy_valid)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f} - Training time: {:.2f}s'
            .format(i, train_acc, val_acc, train_loss, val_loss, end - start))
        print(
            'Covid inforamtions - Train recall: {:.2f} - Val recall: {:.2f} - Train precision: {:.2f} - Val precision: {:.2f}'
            .format(covid_recall_train, covid_recall_valid,
                    covid_accuracy_train, covid_accuracy_valid))
        print('Train f1 score: {:.2f} - Val f1 score: {:.2f}'.format(
            history.history["covid_f1_train"][-1],
            history.history["covid_f1_valid"][-1]))
        print("")

    return history
Exemple #4
0
def train(model,
          optimizer,
          dataset,
          n_epoch,
          batch_size,
          use_gpu=True,
          scheduler=None,
          criterion=None,
          pruner=None,
          best_result_save_path=None,
          batch_count=None,
          should_validate=True):
    history = History()

    if criterion is None:
        criterion = torch.nn.CrossEntropyLoss()
    """
    if someone provides a transform upstream.
     there is chances this person what the transform and not silently override it.
    """
    if dataset.transform is None:
        dataset.transform = ToTensor()

    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    highest_score = 0.0
    for i in range(n_epoch):
        start = time.time()
        do_epoch(criterion,
                 model,
                 optimizer,
                 scheduler,
                 train_loader,
                 use_gpu,
                 pruner=pruner,
                 count=batch_count)
        end = time.time()

        if should_validate:
            train_acc, train_loss = validate(model, train_loader, use_gpu)
            val_acc, val_loss = validate(model, val_loader, use_gpu)
            train_time = end - start
            history.save(train_acc, val_acc, train_loss, val_loss,
                         optimizer.param_groups[0]['lr'], train_time)
            print(
                'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f} - Training time: {:.2f}s'
                .format(i, train_acc, val_acc, train_loss, val_loss,
                        train_time))

        if best_result_save_path is not None \
                and val_acc > highest_score:
            highest_score = val_acc
            if os.path.isfile(best_result_save_path):
                copyfile(best_result_save_path, best_result_save_path + ".old")

            basedir = os.path.dirname(best_result_save_path)
            if not os.path.exists(basedir):
                os.makedirs(basedir)
            torch.save(model.state_dict(), best_result_save_path)

    return history
Exemple #5
0
if __name__ == "__main__":
    mnist, mnist_test = load_mnist()

    mnist.transform = ToTensor()
    mnist_test.transform = ToTensor()

    dataset = get_256_sample(mnist)
    dataset_loader = DataLoader(dataset, batch_size=256)

    dataset = next(iter(dataset_loader))

    epoch = 20
    batch_size = 64
    learning_rate = 0.001

    train_loader, valid_loader = train_valid_loaders(mnist,
                                                     batch_size=batch_size)
    test_loader = DataLoader(mnist_test, batch_size=batch_size)

    net = MnistNet()

    #weights_init_constant(net)
    #weights_init_uniform(net)
    #weights_init_normal(net)
    #weights_init_xavier_normal(net)
    weights_init_kaiming_uniform(net)

    porcentage_dead_neuron = porcentage_dead(net, dataset)
    print(porcentage_dead_neuron)

    optimizer = optim.Adam(net.parameters(), lr=learning_rate)