Exemplo n.º 1
0
    pd.get_dummies(perch.test['labels']).values, pred_ensemble_weighted)
# accuracy_weighted2=multiclass_acc(pd.get_dummies(perch.test['labels']).values,pred_ensemble_weighted2)

auc = np.mean(
    multilabel_auc(pd.get_dummies(perch.test['labels']).values, pred_ensemble))
auc_weighted = np.mean(
    multilabel_auc(
        pd.get_dummies(perch.test['labels']).values, pred_ensemble_weighted))
# auc_weighted2=np.mean(multilabel_auc(pd.get_dummies(perch.test['labels']).values,pred_ensemble_weighted2))
#majority vote
pred_ensemble_cat = pred_stacked.argmax(axis=2)
pred_cat_prob = np.zeros((len(perch.test['labels']), 5))
for p in range(pred_cat_prob.shape[0]):
    for c in range(5):
        pred_cat_prob[p, c] = np.mean(pred_ensemble_cat[:, p] == c)

multiclass_acc(pd.get_dummies(perch.test['labels']).values, pred_cat_prob)

save_table3(
    method="Multi-task learning:Ensemble",
    chestray=True,
    model=basemodel.name,
    accuracy=accuracy,
    auc=auc,
    details=details_,
    other=json.dumps({
        'weighted': [accuracy_weighted, auc_weighted],
        # 'weighted_model':[accuracy_weighted2,auc_weighted2],
        'acc_cons_infil': accuracy_consolidation_infiltrates
    }))
Exemplo n.º 2
0
    pd.get_dummies(perch.test['labels']).values, pred_ensemble_weighted)
auc = np.mean(
    multilabel_auc(pd.get_dummies(perch.test['labels']).values, pred_ensemble))
auc_weighted = np.mean(
    multilabel_auc(
        pd.get_dummies(perch.test['labels']).values, pred_ensemble_weighted))
auc_arbitrators = np.mean(
    multilabel_auc(
        pd.get_dummies(perch.test['labels']).values,
        pred_ensemble_arbitrators))
save_table3(method="Supervised Pretraining - Ensemble",
            chestray=with_chestray,
            model=basemodel.name,
            accuracy=accuracy,
            auc=auc,
            details=details_,
            other=json.dumps({
                'weighted': [accuracy_weighted, auc_weighted],
                'acc_cons_infil':
                accuracy_consolidation_infiltrates,
                'arbitrators': [accuracy_arbitrators, auc_arbitrators]
            }))
save_table_reviewers(method="Supervised Pretraining - Ensemble",
                     chestray=with_chestray,
                     model=basemodel.name,
                     accuracy=rev_acc,
                     auc=rev_auc,
                     details=details_)

if display:
    perch_confusion_matrix(perch.test['labels'].values,
                           pred_ensemble_weighted.argmax(axis=1))
Exemplo n.º 3
0
                        'pred_class_3', 'pred_class_4'
                    ]].values,
                    multi_class='ovr')
aucs = [
    roc_auc_score(test_pred_agg['labels'].values == i,
                  test_pred_agg[f'pred_class_{i}'].values) for i in range(5)
]
accuracy = test_pred_agg['correct'].mean()
site_accuracies = test_pred_agg.groupby('SITE')[['correct']].mean()
age_accuracies = test_pred_agg.groupby('age12m')[['correct']].mean()
print(f"Accuracy: {accuracy:.2f}, AUC: {auc:.2f}")
save_table3(experiment=experiment,
            initialize="Imagenet",
            model=base_model,
            accuracy=accuracy,
            auc=auc,
            config=json.dumps(best_config),
            details=json.dumps({
                'hostname': os.uname()[1],
                'aucs': aucs
            }))
torch.save(best_model.state_dict(),
           os.path.join(result_dir, f"weights/{experiment}.pth"))

embeddings = best_model.embedding.weight.detach().cpu().numpy()
labelList = [
    'REV1-SITE3', 'REV2-SITE2', 'REV3-SITE4', 'REV4-SITE7', 'REV5-SITE5',
    'REV6-SITE6', 'REV7-SITE1', 'REV8-SITE3', 'REV9-SITE2', 'REV10-SITE4',
    'REV11-SITE7', 'REV12-SITE5', 'REV13-SITE6', 'REV14-SITE1', 'ARB1', 'ARB2',
    'ARB3', 'ARB4'
]
Exemplo n.º 4
0
if display:
    plt.show()
else:
    plt.savefig("/home/pmwaniki/Dropbox/tmp/finetune_%s_%s.png" %
                (data_name, details_))

pred_ens = []
for w in swa_callback.weight_list:
    model2.set_weights(w)
    pred_ens.append(model2.predict(test_dataset))
pred_ens = np.stack(pred_ens)

pred_ens_stacked = pred_ens.mean(axis=0)
sep_ens_acc = [
    multiclass_acc(labs_test, pred_ens[i, :, :])
    for i in range(pred_ens.shape[0])
]
sep_ens_auc = [
    np.mean(multilabel_auc(labs_test, pred_ens[i, :, :]))
    for i in range(pred_ens.shape[0])
]

auc = np.mean(multilabel_auc(labs_test, pred_ens_stacked))
acc = multiclass_acc(labs_test, pred_ens_stacked)

save_table3(method="Unsupervised Pretraining",
            chestray=False if data_name == "perch" else True,
            model=type_,
            accuracy=acc,
            auc=auc,
            details=details_)
Exemplo n.º 5
0
pred_ens_swa = model.predict(ds_test)

pred_ens_stacked = pred_ens.mean(axis=0)
sep_ens_acc = [
    multiclass_acc(labs_test, pred_ens[i, :, :])
    for i in range(pred_ens.shape[0])
]
sep_ens_auc = [
    np.mean(multilabel_auc(labs_test, pred_ens[i, :, :]))
    for i in range(pred_ens.shape[0])
]

auc = np.mean(multilabel_auc(labs_test, pred_ens_stacked))
acc = multiclass_acc(labs_test, pred_ens_stacked)
cons_infil_scores = consolidation_infiltrates_acc(labs_test, pred_ens_stacked)

multilabel_auc(labs_test, pred_ens_swa)
multiclass_acc(labs_test, pred_ens_swa)

if display:
    perch_confusion_matrix(labs_test.argmax(axis=1),
                           pred_ens_stacked.argmax(axis=1))

save_table3(method="Supervised Pretraining",
            chestray=with_chestray,
            model=basemodel.name,
            accuracy=acc,
            auc=auc,
            details=details_,
            other=json.dumps({'acc_cons_infil': cons_infil_scores}))
Exemplo n.º 6
0
    pd.get_dummies(perch.test['labels']).values, pred_ensemble)
accuracy_weighted = multiclass_acc(
    pd.get_dummies(perch.test['labels']).values, pred_ensemble_weighted)
accuracy_arbitrators = multiclass_acc(
    pd.get_dummies(perch.test['labels']).values, pred_ensemble_arbitrators)
auc = np.mean(
    multilabel_auc(pd.get_dummies(perch.test['labels']).values, pred_ensemble))
auc_weighted = np.mean(
    multilabel_auc(
        pd.get_dummies(perch.test['labels']).values, pred_ensemble_weighted))
auc_arbitrators = np.mean(
    multilabel_auc(
        pd.get_dummies(perch.test['labels']).values,
        pred_ensemble_arbitrators))
accuracy_consolidation_infiltrates = consolidation_infiltrates_acc(
    pd.get_dummies(perch.test['labels']).values, pred_ensemble)

save_table3(
    method="Unsupervised Pretraining:Ensemble",
    chestray=False if data_name == "perch" else True,
    model=type_,
    accuracy=accuracy,
    auc=auc,
    details=details_,
    other=json.dumps({
        'weighted': [accuracy_weighted, auc_weighted],
        # 'weighted_model':[accuracy_weighted2,auc_weighted2],
        'acc_cons_infil': accuracy_consolidation_infiltrates,
        'arbitrators': [accuracy_arbitrators, auc_arbitrators]
    }))
Exemplo n.º 7
0
    pred_ens.append(model.model_perch.predict(ds_test_perch))
pred_ens = np.stack(pred_ens)

pred_ens_stacked = pred_ens[0:5, :, :].mean(axis=0)
sep_ens_acc = [
    multiclass_acc(labs_test_perch, pred_ens[i, :, :])
    for i in range(pred_ens.shape[0])
]
sep_ens_auc = [
    np.mean(multilabel_auc(labs_test_perch, pred_ens[i, :, :]))
    for i in range(pred_ens.shape[0])
]

auc = np.mean(multilabel_auc(labs_test_perch, pred_ens_stacked))
acc = multiclass_acc(labs_test_perch, pred_ens_stacked)
cons_infil_scores = consolidation_infiltrates_acc(labs_test_perch,
                                                  pred_ens_stacked)

test_pred_class = pred_ens_stacked.argmax(axis=1)
test_y_class = labs_test_perch.argmax(axis=1)

confusion_matrix(test_y_class, test_pred_class)

save_table3(method="Multi-task learning",
            chestray=True,
            model=basemodel.name,
            accuracy=acc,
            auc=auc,
            details=details_,
            other=json.dumps({'acc_cons_infil': cons_infil_scores}))