Пример #1
0
        loss_history = []

        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=reg)
        scheduler = ReduceLROnPlateau(optimizer,
                                      patience=adaptive_lr_patience,
                                      cooldown=2,
                                      verbose=1,
                                      min_lr=1e-5 * lr,
                                      factor=adaptive_lr_factor)

        for epoch in range(1, num_epochs + 1):
            print("Begin epoch {}/{}".format(epoch, num_epochs))
            epoch_losses, epoch_f2 = train_epoch(
                train_loader,
                model,
                loss_fn,
                optimizer,
                dtype,
                sigmoid_threshold=sigmoid_threshold,
                print_every=20)
            scheduler.step(np.mean(epoch_losses), epoch)
            ## f2 score for validation dataset
            f2_acc = validate_epoch(model,
                                    val_loader,
                                    dtype,
                                    sigmoid_threshold=sigmoid_threshold)
            ## store results
            train_acc_history += epoch_f2
            val_acc_history.append(f2_acc)
            loss_history += epoch_losses
            ## overwrite the model .pkl file every epoch
            torch.save(model.state_dict(), save_model_path)
Пример #2
0
    loss_fn = nn.MultiLabelSoftMarginLoss().type(dtype)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = ReduceLROnPlateau(optimizer,
                                  patience=1,
                                  factor=0.5,
                                  min_lr=0.01 * lr)

    acc_history = []
    loss_history = []
    ## don't load model params from file - instead retrain the model
    if not from_pickle:
        for epoch in range(num_epochs):
            print("Begin epoch {}/{}".format(epoch + 1, num_epochs))
            epoch_loss = train_epoch(train_loader,
                                     model,
                                     loss_fn,
                                     optimizer,
                                     dtype,
                                     print_every=10)
            scheduler.step(epoch_loss[-1], epoch)
            ## f2 score for validation dataset
            acc = validate_epoch(model, val_loader, dtype)
            acc_history.append(acc)
            loss_history += epoch_loss
            print("END epoch {}/{}: F2 score = {:.02f}".format(
                epoch + 1, num_epochs, acc))
        ## serialize model data and save as .pkl file
        torch.save(model.state_dict(), save_model_path)
        print("model saved as {}".format(os.path.abspath(save_model_path)))
        ## save loss and accuracy as .mat file
        savemat(save_mat_path, {
            "acc": acc_history,
Пример #3
0
            optimizer_1 = optim.Adam(model.fc.parameters(),
                                     lr=lr_1,
                                     weight_decay=reg_1)

            train_acc_history_1 = []
            val_acc_history_1 = []
            loss_history_1 = []
            print("training {} fully connected layer".format(model_names[i]))
            for epoch in range(num_epochs_1):
                print("Begin {} epoch {}/{}".format(model_names[i], epoch + 1,
                                                    num_epochs_1))
                epoch_losses, epoch_f2 = train_epoch(
                    train_loaders[i],
                    model,
                    loss_fn,
                    optimizer_1,
                    dtype,
                    print_every=20,
                    sigmoid_threshold=sigmoid_threshold)
                ## f2 score for validation dataset
                f2_acc = validate_epoch(model,
                                        val_loaders[i],
                                        dtype,
                                        sigmoid_threshold=sigmoid_threshold)
                ## store results
                train_acc_history_1 += epoch_f2
                val_acc_history_1.append(f2_acc)
                loss_history_1 += epoch_losses
                print("END {} epoch {}/{}: Val F2 score = {:.02f}".format(
                    model_names[i], epoch + 1, num_epochs_1, f2_acc))
Пример #4
0
        for param in model.fc.parameters():
            param.requires_grad = True

        optimizer_1 = optim.Adam(model.fc.parameters(),
                                 lr=lr_1,
                                 weight_decay=reg_1)

        train_acc_history_1 = []
        val_acc_history_1 = []
        loss_history_1 = []
        print("training final fully connected layer")
        for epoch in range(num_epochs_1):
            print("Begin epoch {}/{}".format(epoch + 1, num_epochs_1))
            epoch_losses, epoch_f2 = train_epoch(train_loader,
                                                 model,
                                                 loss_fn,
                                                 optimizer_1,
                                                 dtype,
                                                 print_every=10)
            ## f2 score for validation dataset
            f2_acc = validate_epoch(model, val_loader, dtype)
            ## store results
            train_acc_history_1 += epoch_f2
            val_acc_history_1.append(f2_acc)
            loss_history_1 += epoch_losses
            print("END epoch {}/{}: validation F2 score = {:.02f}".format(
                epoch + 1, num_epochs_1, f2_acc))

        ## now finetue the whole param set to our data
        for param in model.parameters():
            param.requires_grad = True