Beispiel #1
0
def train(model, dataset, batch_size, n_epoch, learning_rate):
    history = History()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    nb_tour = (len(dataset[0]) // batch_size) + 1
    nb_batch_maxi = len(dataset[0]) % batch_size

    for i in range(n_epoch):
        model.train()
        inputs, targets = dataset
        zip_io = list(zip(inputs, targets))
        random.shuffle(zip_io)
        inputs, targets = zip(*zip_io)
        optimizer.zero_grad()
        inputs = list(inputs)
        # TODO : Il manque une dimension à mon tensor ??? Chercher à comprendre pourquoi et ou
        for j, inp in enumerate(inputs):
            const_list = [60 for k in range(0, len(inp))]
            const_tens = torch.tensor(const_list, dtype=torch.long)
            inp = torch.nn.utils.rnn.pack_padded_sequence(inp, const_tens, True)
            inputs[j] = inp
        output = model(inputs, [2, len(inputs), 50])

        loss = criterion(output, list(targets))
        loss.backward()
        optimizer.step()

        train_acc, train_loss = validate(model, dataset)
        history.save(train_acc, train_loss, learning_rate)
        print('Epoch {} - Train acc: {:.2f} - Train loss: {:.4f}'.format((i+1), train_acc, train_loss))

    return history
Beispiel #2
0
def train(model,
          optimizer,
          dataset,
          n_epoch,
          batch_size,
          use_gpu=True,
          scheduler=None,
          criterion=None):
    history = History()

    if criterion is None:
        criterion = nn.CrossEntropyLoss()

    dataset.transform = ToTensor()
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        start = time.time()
        do_epoch(criterion, model, optimizer, scheduler, train_loader, use_gpu)
        end = time.time()

        train_acc, train_loss = validate(model, train_loader, use_gpu)
        val_acc, val_loss = validate(model, val_loader, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss,
                     optimizer.param_groups[0]['lr'])
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f} - Training time: {:.2f}s'
            .format(i, train_acc, val_acc, train_loss, val_loss, end - start))

    return history
Beispiel #3
0
def train(model, dataset, n_epoch, batch_size, learning_rate, use_gpu=False):
    history = History()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.3)

    train_loader, val_loader = train_valid_loaders(dataset, batch_size=batch_size)
    for i in range(n_epoch):
        scheduler.step()
        t = time.time()
        model.train()
        for j, batch in enumerate(train_loader):
            (inputs, targets), inputs_len = batch
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()
            inputs = Variable(inputs)
            inputs = nn.utils.rnn.pack_padded_sequence(inputs, inputs_len, batch_first=True)
            targets = Variable(targets)
            optimizer.zero_grad()
            output = model(inputs)

            loss = criterion(output, targets)
            loss.backward()
            optimizer.step()

        print('Time: ' + str(time.time()-t))
        train_cm, train_acc, train_loss = validate(model, train_loader, use_gpu)
        val_cm, val_acc, val_loss = validate(model, val_loader, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss)
        print('Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}'.format(i,
                                                                                                              train_acc,
                                                                                                              val_acc,
                                                                                                              train_loss,
                                                                                                              val_loss))
        print('Train confusion matrix')
        print(train_cm)
        print('Valid confusion matrix')
        print(val_cm)
        with open('resultats.txt', 'a') as f:
            f.write('Epoch' + str(i))
            f.write('\n')
            f.write('Train confusion matrix')
            f.write('\n')
            f.write(str(train_cm))
            f.write('\n')
            f.write('Valid confusion matrix')
            f.write('\n')
            f.write(str(val_cm))
            f.write('\n')
    return history
Beispiel #4
0
class HistoryCallback(pt.Callback):
    """
    Un callback Poutyne pour sauvegarder le taux d'apprentissaage dans un objet de type `deeplib.history.History` en
    plus des autres métriques d'entraînement retourné par `Model.fit_generator`.

    Attributes:
        history (deeplib.history.History): L'objet d'historique de deeplib.
    """
    def __init__(self):
        super().__init__()
        self.history = History()

    def on_epoch_end(self, epoch_number, logs):
        self.history.save(
            dict(**logs, lr=self.model.optimizer.param_groups[0]['lr']))
def train(model,
          optimizer,
          dataset,
          n_epoch,
          batch_size,
          use_gpu=True,
          scheduler=None,
          criterion=None):
    history = History()

    if criterion is None:
        criterion = nn.CrossEntropyLoss()

    dataset.transform = transforms.Compose([
        transforms.Resize([wanted_size, wanted_size]),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        start = time.time()
        do_epoch(criterion, model, optimizer, scheduler, train_loader, use_gpu)
        end = time.time()

        train_acc, train_loss, covid_recall_train, covid_accuracy_train = validate(
            model, train_loader, use_gpu)
        val_acc, val_loss, covid_recall_valid, covid_accuracy_valid = validate(
            model, val_loader, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss,
                     optimizer.param_groups[0]['lr'], i, model,
                     covid_recall_train, covid_recall_valid,
                     covid_accuracy_train, covid_accuracy_valid)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f} - Training time: {:.2f}s'
            .format(i, train_acc, val_acc, train_loss, val_loss, end - start))
        print(
            'Covid inforamtions - Train recall: {:.2f} - Val recall: {:.2f} - Train precision: {:.2f} - Val precision: {:.2f}'
            .format(covid_recall_train, covid_recall_valid,
                    covid_accuracy_train, covid_accuracy_valid))
        print('Train f1 score: {:.2f} - Val f1 score: {:.2f}'.format(
            history.history["covid_f1_train"][-1],
            history.history["covid_f1_valid"][-1]))
        print("")

    return history
def train_model(model,
                train_loader,
                val_loader,
                n_epoch,
                scheduler,
                optimizer,
                criterion,
                use_gpu=False,
                path_save=None,
                path_start_from_existing_model=None):

    if path_start_from_existing_model is not None and os.path.isfile(
            path_start_from_existing_model):

        # Loading state
        checkpoint = torch.load(path_start_from_existing_model)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        next_epoch = checkpoint['epoch'] + 1
        loss = checkpoint['loss']
        history = checkpoint["history"]
        best_acc = checkpoint["best_acc"]
        best_model_weights = checkpoint["best_model_weights"]
        scheduler.load_state_dict(checkpoint["lr_scheduler_state"])

        print("Modèle chargé pour entraînement")

    else:
        # best_model_weights = copy.deepcopy(model.state_dict())
        history = History()
        next_epoch = 0
        best_acc = 0
        print("Aucun modèle chargé pour entraînement")

    # Entrainement
    for epoch in range(0, n_epoch):
        model.train()
        scheduler.step()
        for j, batch in enumerate(train_loader):

            inputs, targets = batch
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()

            optimizer.zero_grad()
            output = model(inputs)

            loss = criterion(output, targets)
            loss.backward()
            optimizer.step()

        train_acc, train_loss, train_top3_score, train_conf_mat, train_acc_per_class = calcul_metric_concours(
            model, train_loader, use_gpu, show_acc_per_class=True)
        val_acc, val_loss, val_top3_score, val_conf_mat, val_acc_per_class = calcul_metric_concours(
            model, val_loader, use_gpu, show_acc_per_class=True)

        #Current LR
        for param_group in optimizer.param_groups:
            current_lr = param_group["lr"]

        history.save(train_acc, val_acc, train_loss, val_loss, current_lr)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f} -Val score top3 :{:.4f}'
            .format(epoch, train_acc, val_acc, train_loss, val_loss,
                    val_top3_score))

        print(val_acc_per_class)

        #Best model
        if val_acc > best_acc:
            best_acc = val_acc
            best_model_weights = copy.deepcopy(model.state_dict())

        # Sauvegarde
        if path_save is not None:
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': loss,
                    "history": history,
                    "best_acc": best_acc,
                    "best_model_weights": best_model_weights,
                    "lr_scheduler_state": scheduler.state_dict()
                }, path_save)
Beispiel #7
0
 def __init__(self):
     super().__init__()
     self.history = History()
Beispiel #8
0
def train_model(model,
                train_loader,
                val_loader,
                n_epoch,
                scheduler,
                optimizer,
                criterion,
                use_gpu=False,
                path_save=None):

    # if path_save is not None:
    #     try:
    #         # Loading state
    #         checkpoint = torch.load(path_save)
    #         model.load_state_dict(checkpoint['model_state_dict'])
    #         optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    #         next_epoch = checkpoint['epoch'] + 1
    #         loss = checkpoint['loss']
    #         history = checkpoint["history"]
    #         best_acc = checkpoint["best_acc"]
    #         best_model_weights = checkpoint["best_model_weights"]
    #         scheduler.load_state_dict(checkpoint["lr_scheduler_state"])
    #
    #         print("Modèle chargé")
    #
    #     except:
    #         best_model_weights = copy.deepcopy(model.state_dict())
    #         best_acc = 0
    #         history = History()
    #         next_epoch = 0
    #         print("Aucun modèle chargé")
    #         pass

    history = History()
    next_epoch = 0
    best_acc = 0

    # Entrainement
    for epoch in range(next_epoch, n_epoch):
        model.train()
        scheduler.step()
        for j, batch in enumerate(train_loader):

            inputs, targets = batch
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()

            optimizer.zero_grad()
            output = model(inputs)

            loss = criterion(output, targets)
            loss.backward()
            optimizer.step()

        train_acc, train_loss = validate(model, train_loader, use_gpu)
        val_acc, val_loss = validate(model, val_loader, use_gpu)

        #Current LR
        for param_group in optimizer.param_groups:
            current_lr = param_group["lr"]

        history.save(train_acc, val_acc, train_loss, val_loss, current_lr)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}'
            .format(epoch, train_acc, val_acc, train_loss, val_loss))

        #Best model
        if val_acc > best_acc:
            best_acc = val_acc
            best_model_weights = copy.deepcopy(model.state_dict())

        # Sauvegarde
        if path_save is not None:
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': loss,
                    "history": history,
                    "best_acc": best_acc,
                    "best_model_weights": best_model_weights,
                    "lr_scheduler_state": scheduler.state_dict()
                }, path_save)

            print("Epoch {} sauvegardée".format(epoch))

    # Return
    # checkpoint = torch.load(path_save)
    # model.load_state_dict(checkpoint['best_model_weights'])

    return history, model
Beispiel #9
0
    mnist_test.transform = ToTensor()

    epoch = 50
    batch_size = 64
    learning_rate = 0.001

    train_loader = DataLoader(mnist, batch_size=batch_size)
    test_loader = DataLoader(mnist_test, batch_size=batch_size)

    net = MnistNet()
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)
    scheduler = pt.ReduceLROnPlateau(monitor='acc',
                                     mode='max',
                                     patience=1,
                                     factor=0.5,
                                     verbose=True)

    model = pt.Model(net,
                     optimizer,
                     'cross_entropy',
                     batch_metrics=['accuracy'])
    history = model.fit_generator(train_loader,
                                  test_loader,
                                  epochs=epoch,
                                  callbacks=[scheduler])

    History(history).display()

    test_loss, test_acc = model.evaluate_generator(test_loader)
    print('test_loss: {:.4f} test_acc: {:.2f}'.format(test_loss, test_acc))