def accuracy(self, pred_labels, true_labels): # The error count if len(pred_labels.shape) == 1: pred_labels = np.expand_dims(pred_labels,1) if len(true_labels.shape) == 1: true_labels = np.expand_dims(true_labels,1) err_count = np.count_nonzero(pred_labels-true_labels) # Overall accuracy scores = sk_accuracy(true_labels, pred_labels, normalize=True, sample_weight=None) # Initialize mapping of labels for confusion matrix unique_lbls = np.unique(true_labels) num_unique_lbls = len(unique_lbls) lbls_map = dict(enumerate(unique_lbls)) lbls_reverse_map = dict(map(reversed, lbls_map.items())) # Confusion Mastrix conf_matrix = np.zeros((num_unique_lbls,num_unique_lbls)) for i, lbl in lbls_map.items(): # Find the indexes in the true label array for current label positives_idx = np.where(true_labels==lbl)[0] # Find the unique values in the predicted labels array for the above indexes (also the occurance count of each value) u, counts = np.unique(pred_labels[positives_idx],return_counts=True) # Make sure that all unique true labels are represented insert_idxs = [lbls_reverse_map[u[i]] for i in range(len(u))] # Insert the percentage wise counts in the confusion matrix conf_matrix[i,insert_idxs] = counts/np.sum(counts) return scores, err_count, conf_matrix
def _sk_accuracy(preds, target, subset_accuracy): sk_preds, sk_target, mode = _input_format_classification(preds, target, threshold=THRESHOLD) sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy() if mode == DataType.MULTIDIM_MULTICLASS and not subset_accuracy: sk_preds, sk_target = np.transpose(sk_preds, (0, 2, 1)), np.transpose(sk_target, (0, 2, 1)) sk_preds, sk_target = sk_preds.reshape(-1, sk_preds.shape[2]), sk_target.reshape(-1, sk_target.shape[2]) elif mode == DataType.MULTIDIM_MULTICLASS and subset_accuracy: return np.all(sk_preds == sk_target, axis=(1, 2)).mean() elif mode == DataType.MULTILABEL and not subset_accuracy: sk_preds, sk_target = sk_preds.reshape(-1), sk_target.reshape(-1) return sk_accuracy(y_true=sk_target, y_pred=sk_preds)
def test_same_input(average): preds = _input_miss_class.preds target = _input_miss_class.target preds_flat = torch.cat(list(preds), dim=0) target_flat = torch.cat(list(target), dim=0) mc = Accuracy(num_classes=NUM_CLASSES, average=average) for i in range(NUM_BATCHES): mc.update(preds[i], target[i]) class_res = mc.compute() func_res = accuracy(preds_flat, target_flat, num_classes=NUM_CLASSES, average=average) sk_res = sk_accuracy(target_flat, preds_flat) assert torch.allclose(class_res, torch.tensor(sk_res).float()) assert torch.allclose(func_res, torch.tensor(sk_res).float())