Esempio n. 1
0
def cal_AP(testloader, net, criterion, device):
    '''
    Calculate Average Precision
    '''
    losses = 0.
    cnt = 0
    with torch.no_grad():
        net = net.eval()
        preds = [[] for _ in range(5)]
        heatmaps = [[] for _ in range(5)]
        for images, labels in tqdm(testloader):
            images = images.to(device)
            labels = labels.to(device)
            output = net(images).cpu().numpy()
            for c in range(5):
                preds[c].append(output[:, c].reshape(-1))
                heatmaps[c].append(labels[:, c].cpu().numpy().reshape(-1))

        aps = []
        for c in range(5):
            preds[c] = np.concatenate(preds[c])
            heatmaps[c] = np.concatenate(heatmaps[c])
            if heatmaps[c].max() == 0:
                ap = float('nan')
            else:
                ap = ap_score(heatmaps[c], preds[c])
                aps.append(ap)
            print("AP = {}".format(ap))

    # print(losses / cnt)
    return None
Esempio n. 2
0
    def eval_dir_AP(self,Outpath,GT):
        imlist =[]
        imnamelist =[]
        for root,_,fnames in sorted(os.walk(GT)):
            for fname in fnames:
                if fname.endswith('.png') and 'M1BS' in fname and not fname.startswith('.'):
                    path = os.path.join(root,fname)
                    imlist.append((path,fname))
                    imnamelist.append(fname)
        with open(os.path.join(Outpath+'/Prec_recall.txt'),'w') as FILE:
            FILE.write('  Prec - Recall  - NAME\n')
            for path, name in imlist:
                preds = misc.imread(os.path.join(Outpath,name))
                labs = misc.imread(os.path.join(GT,name))
                labs = (labs == np.amax(labs)).astype(np.float)
                
                preds = preds.astype(np.uint8)
                AP = ap_score(labs.flatten(),preds.flatten())
                preds = (preds>=(50)).astype(np.float)

                
                tp = np.sum(preds[labs==1] == 1).astype(np.float)
                fn = np.sum(preds[labs==1] == 0).astype(np.float)
                fp = np.sum(preds[labs==0] == 1).astype(np.float)
                tn = np.sum(preds[labs==0] ==0).astype(np.float)

                print(tp,fn,fp,tn)
                conf_matrix = [                        tp/(tp+fn),fp/(tp+fp),fn/(fn+tn),tn/(tn+fn)]
                Prec = tp/(tp+fp)
                Recall = tp/(tp+fn)
            
                FILE.write(' %02.2f  |  %02.2f  |   %02.2f |  %s \n'%(AP,Prec,Recall,name))
            FILE.close()
Esempio n. 3
0
def cal_AP(testloader, net, criterion, device, num_obj, opt):
    '''
    Calculate Average Precision
    Evaluation for the semantic segmentation part 
    '''
    cnt = 0
    aps = []
    with torch.no_grad():
        net = net.eval()
        preds = [[] for _ in range(num_obj)]
        heatmaps = [[] for _ in range(num_obj)]
        for data in tqdm(testloader):
            if opt.vertex_reg == True:
                # Only train the center-voting part
                images, labels, vertex_targets, vertex_weights, extents = data
                images = images.to(device)
                labels = labels.type('torch.LongTensor').to(device)
                extents = extents.to(device)
                output_seg, _, _ = net(images, extents)
            else:
                # Only train the segmentation part
                images, labels = data
                images = images.to(device)
                labels = labels.type('torch.LongTensor').to(device)
                output_seg = net(images)
            output = output_seg.cpu().numpy()
            for c in range(num_obj):
                preds[c].append(output[:, c].reshape(-1))
                heatmaps[c].append(labels[:, c].cpu().numpy().reshape(-1))

        for c in range(num_obj):
            preds[c] = np.concatenate(preds[c])
            heatmaps[c] = np.concatenate(heatmaps[c])
            if heatmaps[c].max() == 0:
                ap = float('nan')
            else:
                ap = ap_score(heatmaps[c], preds[c])
                aps.append(ap)
            print("AP = {}".format(ap))

    # print(losses / cnt)
    return aps
Esempio n. 4
0
            res = ae.train_on_batch([batch_adj], [batch_train, batch_f])
        else:
            res = ae.train_on_batch([batch_adj], [batch_train])
        train_loss.append(res)
        curr_iter += 1
        if curr_iter >= num_iters_per_train_epoch:
            break
    train_loss = np.asarray(train_loss)
    train_loss = np.mean(train_loss, axis=0)
    print('Avg. training loss: {:s}'.format(str(train_loss)))
    print('\nEvaluating val set...')
    outputs, predictions = [], []
    for step in xrange(adj.shape[0] / val_batch_size + 1):
        low = step * val_batch_size
        high = low + val_batch_size
        batch_adj = adj[low:high].toarray()
        if batch_adj.shape[0] == 0:
            break
        if dataset in ['conflict', 'metabolic']:
            batch_adj = StandardScaler().fit_transform(batch_adj)
            decoded_lp = ae.predict_on_batch([batch_adj])[0]
        else:
            decoded_lp = ae.predict_on_batch([batch_adj])
        outputs.append(decoded_lp)
    decoded_lp = np.vstack(outputs)
    predictions.extend(decoded_lp[test_r, test_c])
    predictions.extend(decoded_lp[test_c, test_r])
    print('Val AUC: {:6f}'.format(auc_score(labels, predictions)))
    print('Val AP: {:6f}'.format(ap_score(labels, predictions)))
print('\nAll done.')
Esempio n. 5
0
parameters = {'C': Cs, 'gamma': Gammas}
cv = ms.StratifiedShuffleSplit(n_splits=5, test_size=0.2,
                               random_state=42)  #5-fold cross validation
clf = ms.GridSearchCV(svc,
                      parameters,
                      scoring='average_precision',
                      cv=cv,
                      n_jobs=-1,
                      verbose=49)  #replace scoring with any metric

clf.fit(X_train, y_train)

clf.best_params_  #print best parameters from grid search
clf.best_score_  #print best score from grid search
clf.score(X_test, y_test)
ap_score(y_test, clf.predict(X_test))
f1_score(y_test, clf.predict(X_test))
matthews_corrcoef(y_test, clf.predict(X_test))
accuracy_score(y_test, clf.predict(X_test))
recall_score(y_test, clf.predict(X_test))
confusion_matrix(y_test, clf.predict(X_test))

#model persistence
joblib.dump(clf, 'svc.joblib')

#plot

scores = [x[1] for x in clf.grid_scores_]
scores = np.array(scores).reshape(len(Cs), len(Gammas))

for ind, i in enumerate(Cs):
#code for SVM with grid search, comment out if tree is used
Cs = np.logspace(1, 5, 5)
Gammas = np.logspace(1, 5, 5)
parameters = {'clf__C': Cs, 'clf__gamma': Gammas}
cv = ms.StratifiedShuffleSplit(n_splits=5, test_size=0.2,
                               random_state=42)  #5-fold cross validation
clf = ms.GridSearchCV(model,
                      parameters,
                      scoring='average_precision',
                      cv=cv,
                      n_jobs=-1,
                      verbose=49)  #replace scoring with any metric
clf.fit(grpl_X_train, grpl_y_train)
clf.best_params_  #print best parameters from grid search
clf.best_score_  #print best score from grid search
ap_score(y_test, model.predict(X_test))
f1_score(y_test, model.predict(X_test))
matthews_corrcoef(y_test, model.predict(X_test))
accuracy_score(y_test, clf.predict(X_test))
confusion_matrix(y_test, model.predict(X_test))
recall_score(y_test, model.predict(X_test))

#plot
scores = [x[1] for x in clf.grid_scores_]
scores = np.array(scores).reshape(len(Cs), len(Gammas))

for ind, i in enumerate(Cs):
    plt.plot(Gammas, scores[ind], label='C: ' + str(i))
plt.legend()
plt.xlabel('Gamma')
plt.ylabel('Average precision')