Ejemplo n.º 1
0
def test_cls_tta(model, test_loader):
    probs_tta = []
    prs = [0, 1]
    for p1 in prs:
        for p2 in prs:
            test_loader.dataset.transforms.transforms[
                1].p = p1  # pr(horizontal flip)
            test_loader.dataset.transforms.transforms[
                2].p = p2  # pr(vertical flip)
            # validate one epoch, note no optimizer is passed
            with torch.no_grad():
                test_preds, test_probs, test_labels = run_one_epoch_cls(
                    test_loader, model)
                probs_tta.append(test_probs)

    probs_tta = np.mean(np.array(probs_tta), axis=0)
    preds_tta = np.argmax(probs_tta, axis=1)

    test_k, test_auc, test_acc = eval_predictions_multi(
        test_labels, preds_tta, probs_tta)
    print('Test Kappa: {:.4f} -- AUC: {:.4f} -- Balanced Acc: {:.4f}'.format(
        test_k, test_auc, test_acc))

    del model
    torch.cuda.empty_cache()
    return probs_tta, preds_tta, test_labels
Ejemplo n.º 2
0
def test_cls_tta_dihedral(model, test_loader, n=3):
    probs_tta = []
    prs = [0, 1]
    import torchvision
    test_loader.dataset.transforms.transforms.insert(-1, torchvision.transforms.RandomRotation(0))
    rotations = np.array([i * 360 // n for i in range(n)])
    for angle in rotations:
        for p2 in prs:
            test_loader.dataset.transforms.transforms[2].p = p2  # pr(vertical flip)
            test_loader.dataset.transforms.transforms[-2].degrees = [angle, angle]
            # validate one epoch, note no optimizer is passed
            with torch.no_grad():
                test_preds, test_probs, test_labels = run_one_epoch_cls(test_loader, model)
                probs_tta.append(test_probs)

    probs_tta = np.mean(np.array(probs_tta), axis=0)
    preds_tta = np.argmax(probs_tta, axis=1)
    try:
        test_k, test_auc, test_acc = eval_predictions_multi(test_labels, preds_tta, probs_tta)
        print('Test Kappa: {:.4f} -- AUC: {:.4f} -- Balanced Acc: {:.4f}'.format(test_k, test_auc, test_acc))
    except:
        print('Test Kappa: {:.4f} -- AUC: {:.4f} -- Balanced Acc: {:.4f}'.format(0, 0, 0))


    del model
    torch.cuda.empty_cache()
    return probs_tta, preds_tta, test_labels
Ejemplo n.º 3
0
def test_cls(model, test_loader):
    # validate one epoch, note no optimizer is passed
    with torch.no_grad():
        preds, probs, labels = run_one_epoch_cls(test_loader, model)
    vl_k, vl_auc, vl_acc = eval_predictions_multi(labels, preds, probs)
    print('Val. Kappa: {:.4f} -- AUC: {:.4f}'.format(vl_k, vl_auc).rstrip('0'))

    del model
    torch.cuda.empty_cache()
    return probs, preds, labels
Ejemplo n.º 4
0
def train_cls(model, optimizer, train_criterion, val_criterion, train_loader,
              val_loader, oversample, n_epochs, metric, patience, decay_f,
              exp_path):
    counter_since_checkpoint = 0
    tr_losses, tr_aucs, tr_ks, vl_losses, vl_aucs, vl_ks = [], [], [], [], [], []
    stats = {}
    is_better, best_monitoring_metric = compare_op(metric)
    best_kappa, best_auc = 0, 0
    for epoch in range(n_epochs):
        print('\n EPOCH: {:d}/{:d}'.format(epoch + 1, n_epochs))
        if oversample == [1, 1, 1]:
            tr_preds, tr_probs, tr_labels, tr_loss = run_one_epoch_cls(
                train_loader, model, train_criterion, optimizer)
        else:
            csv_train_path = train_loader.dataset.csv_path
            train_loader_MOD = modify_dataset(train_loader,
                                              csv_train_path=csv_train_path,
                                              keep_samples=oversample)
            # train one epoch
            tr_preds, tr_probs, tr_labels, tr_loss = run_one_epoch_cls(
                train_loader_MOD, model, train_criterion, optimizer)

        # validate one epoch, note no optimizer is passed
        with torch.no_grad():
            vl_preds, vl_probs, vl_labels, vl_loss = run_one_epoch_cls(
                val_loader, model, val_criterion)
        tr_k, tr_auc, tr_acc = eval_predictions_multi(tr_labels, tr_preds,
                                                      tr_probs)
        print('\n')
        vl_k, vl_auc, vl_acc = eval_predictions_multi(vl_labels, vl_preds,
                                                      vl_probs)
        print(
            'Train/Val. Loss: {:.4f}/{:.4f} -- Kappa: {:.4f}/{:.4f} -- AUC: {:.4f}/{:.4f} -- LR={:.6f}'
            .format(tr_loss, vl_loss, tr_k, vl_k, tr_auc, vl_auc,
                    get_lr(optimizer)).rstrip('0'))
        # store performance for this epoch
        tr_losses.append(tr_loss)
        tr_aucs.append(tr_auc)
        tr_ks.append(tr_k)
        vl_losses.append(vl_loss)
        vl_aucs.append(vl_auc)
        vl_ks.append(vl_k)

        #  smooth val values with a moving average before comparing
        vl_auc = ewma(vl_aucs, window=3)[-1]
        vl_loss = ewma(vl_losses, window=3)[-1]
        vl_k = ewma(vl_ks, window=3)[-1]

        # check if performance was better than anyone before and checkpoint if so
        if metric == 'auc': monitoring_metric = vl_auc
        elif metric == 'loss': monitoring_metric = vl_loss
        elif metric == 'kappa': monitoring_metric = vl_k
        elif metric == 'kappa_auc_avg':
            monitoring_metric = 0.5 * (vl_k + vl_auc)
        else:
            sys.exit('Not a suitable metric for this task')

        if is_better(monitoring_metric, best_monitoring_metric):
            print('Best (smoothed) val {} attained. {:.4f} --> {:.4f}'.format(
                metric, best_monitoring_metric, monitoring_metric))
            best_auc, best_kappa = vl_auc, vl_k
            if exp_path != None:
                print(15 * '-', ' Checkpointing ', 15 * '-')
                write_model(exp_path, model, optimizer, stats)

            best_monitoring_metric = monitoring_metric
            stats['tr_losses'], stats['vl_losses'] = tr_losses, vl_losses
            stats['tr_aucs'], stats['vl_aucs'] = tr_aucs, vl_aucs
            stats['tr_ks'], stats['vl_ks'] = tr_aucs, vl_aucs
            counter_since_checkpoint = 0  # reset patience
        else:
            counter_since_checkpoint += 1

        if decay_f != 0 and counter_since_checkpoint == 3 * patience // 4:
            reduce_lr(optimizer, epoch, factor=decay_f, verbose=False)
            print(8 * '-', ' Reducing LR now ', 8 * '-')

        # early stopping if no improvement happened for `patience` epochs
        if counter_since_checkpoint == patience:
            print('\n Early stopping the training, trained for {:d} epochs'.
                  format(epoch))
            del model
            torch.cuda.empty_cache()
            return best_kappa, best_auc

    del model
    torch.cuda.empty_cache()
    return best_kappa, best_auc