def eval_contacts(model, test_iterator, use_cuda): logits = [] y = [] for x,y_mb in test_iterator: logits_this, y_this = predict_contacts(model, x, y_mb, use_cuda) logits += logits_this y += y_this y = torch.cat(y, 0) logits = torch.cat(logits, 0) loss = F.binary_cross_entropy_with_logits(logits, y).item() p_hat = F.sigmoid(logits) tp = torch.sum(y*p_hat).item() pr = tp/torch.sum(p_hat).item() re = tp/torch.sum(y).item() f1 = 2*pr*re/(pr + re) y = y.cpu().numpy() logits = logits.data.cpu().numpy() aupr = average_precision(y, logits) return loss, pr, re, f1, aupr
def calc_metrics(logits, y): y_hat = (logits > 0).astype(np.float32) TP = (y_hat * y).sum() precision = 1.0 if y_hat.sum() > 0: precision = TP / y_hat.sum() recall = TP / y.sum() F1 = 0 if precision + recall > 0: F1 = 2 * precision * recall / (precision + recall) AUPR = average_precision(y, logits) return precision, recall, F1, AUPR
def calculate_metrics(scores, y, thresholds): ## calculate accuracy, r, rho pred_level = np.digitize(scores, thresholds[1:], right=True) accuracy = np.mean(pred_level == y) r, _ = pearsonr(scores, y) rho, _ = spearmanr(scores, y) ## calculate average-precision score for each structural level aupr = np.zeros(4, dtype=np.float32) for i in range(4): target = (y > i).astype(np.float32) aupr[i] = average_precision(target, scores.astype(np.float32)) return accuracy, r, rho, aupr