Exemple #1
0
def benchmark_dataset(dataset, title, fname, testname, xlabels, ylabels=None):
    global benchmark_data
    print(f'Benchmarking {title}.. ')

    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=10,
        shuffle=False, num_workers=4,
    )

    tracked_metrics = [
        metrics.Accuracy(),
        metrics.RocAuc(),
        metrics.FScore()
    ]

    logs, cm = trainer.test(
        model=model, test_dataloader=dataloader,
        criterion=criterion, metrics=tracked_metrics, device=device
    )

    with open(f'logs/{vars.corda_version}/{name}/{fname}-metric.txt', 'w') as f:
        f.write(f'{fname}: ' + trainer.summarize_metrics(logs) + '\n')

    ax = sns.heatmap(
        cm.get(normalized=True), annot=True, fmt=".2f",
        xticklabels=xlabels, yticklabels=ylabels or xlabels,
        vmin=0., vmax=1.
    )
    ax.set_title(title)
    plt.xlabel('predicted')
    plt.ylabel('ground')
    hm = ax.get_figure()
    hm.savefig(f'logs/{vars.corda_version}/{name}/{fname}.png')
    hm.clf()

    fpr, tpr, thresholds = tracked_metrics[1].get_curve()
    auc = tracked_metrics[1].get()
    f = plt.figure()
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (auc = {auc:.2f})')
    plt.title(f'{title} ROC')
    plt.legend(loc='lower right')
    plt.savefig(f'logs/{vars.corda_version}/{name}/{fname}-roc.png')
    plt.clf()
    plt.cla()
    plt.close()

    specificity, fpr, fnr, sensitivity = cm.get(normalized=True).ravel()
    dor = (sensitivity*specificity)/((1-sensitivity)*(1-specificity))
    fscore = tracked_metrics[2].get()
    ba = (sensitivity+specificity)/2.

    data = {
        'arch': args.arch, 'pretrain': args.pretrain, 'train': args.train.upper(),
        'test': testname, 'accuracy': tracked_metrics[0].get(), 'auc': auc,
        'sensitivity': sensitivity, 'specificity': specificity, 'fscore': fscore,
        'ba': ba, 'missrate': fnr, 'dor': dor
    }

    for k,v in data.items():
        benchmark_data[k].append(v)
Exemple #2
0
def benchmark_dataset(dataset, title, fname, xlabels, ylabels=None):
    print(f'Benchmarking {title}.. ', end='', flush=True)

    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=10,
        shuffle=False,
        num_workers=4,
    )

    tracked_metrics = [metrics.Accuracy(), metrics.RocAuc(), metrics.FScore()]

    logs, cm = trainer.test(model=model,
                            test_dataloader=dataloader,
                            criterion=criterion,
                            metrics=tracked_metrics,
                            device=device)

    with open(f'logs/{vars.corda_version}/{name}/{fname}-metric.txt',
              'w') as f:
        f.write(f'{fname}: ' + trainer.summarize_metrics(logs) + '\n')

    ax = sns.heatmap(cm.get(normalized=True),
                     annot=True,
                     fmt=".2f",
                     xticklabels=xlabels,
                     yticklabels=ylabels or xlabels)
    ax.set_title(title)
    plt.xlabel('predicted')
    plt.ylabel('ground')
    hm = ax.get_figure()
    hm.savefig(f'logs/{vars.corda_version}/{name}/{fname}.png')
    hm.clf()

    fpr, tpr, thresholds = tracked_metrics[1].get_curve()
    auc = tracked_metrics[1].get()
    f = plt.figure()
    plt.plot(fpr,
             tpr,
             color='darkorange',
             lw=2,
             label=f'ROC curve (auc = {auc:.2f})')
    plt.title(f'{title} ROC')
    plt.legend(loc='lower right')
    plt.savefig(f'logs/{vars.corda_version}/{name}/{fname}-roc.png')
    plt.clf()
    plt.cla()
    plt.close()
Exemple #3
0
elif args.arch == 'resnet50':
    model = covid_classifier.CovidClassifier50(
        encoder=feature_extractor,
        pretrained=False,
        freeze_conv=False
    ).to(device)

#model = covid_classifier.LeNet1024NoPoolingDeep().to(device)

print(f'Using lr {lr}')

# TRAINING
# %%
tracked_metrics = [
    metrics.Accuracy(),
    metrics.RocAuc(),
    metrics.FScore()
]

def focal_loss(output, target, gamma=2., weight=None):
    bce = F.binary_cross_entropy(output, target, reduction='none', weight=weight)
    pt = target*output + (1-target)*(1-output)
    return (torch.pow((1-pt), gamma) * bce).mean()

criterion = focal_loss
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=1e-3)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=15, verbose=True)

best_model = trainer.fit(
    model=model, train_dataloader=train_dataloader,
    val_dataloader=val_dataloader, test_dataloader=test_dataloader,
Exemple #4
0
if args.arch == 'resnet18':
    model = covid_classifier.CovidClassifier(encoder=feature_extractor,
                                             pretrained=False,
                                             freeze_conv=False).to(device)
elif args.arch == 'resnet50':
    model = covid_classifier.CovidClassifier50(encoder=feature_extractor,
                                               pretrained=False,
                                               freeze_conv=False).to(device)

#model = covid_classifier.LeNet1024NoPoolingDeep().to(device)

print(f'Using lr {lr}')

# TRAINING
# %%
tracked_metrics = [metrics.Accuracy(), metrics.RocAuc(), metrics.FScore()]


def focal_loss(output, target, gamma=2., weight=None):
    bce = F.binary_cross_entropy(output,
                                 target,
                                 reduction='none',
                                 weight=weight)
    pt = target * output + (1 - target) * (1 - output)
    return (torch.pow((1 - pt), gamma) * bce).mean()


criterion = focal_loss
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=1e-3)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                          patience=15,