def val(): myresnet_model.eval() val_acc = 0 test_loss = 0 with torch.no_grad(): confusion_matrix = torch.zeros(nb_classes, nb_classes) for inputs, labels, _ in valloader: inputs, labels = inputs.to(device), labels.to(device) logps = myresnet_model.forward(inputs) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() ps = torch.exp(logps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) val_acc += torch.mean(equals.type(torch.FloatTensor)).item() top_p = top_p.view(-1) labels = labels.view(-1) top_class = top_class.view(-1) for t, p in zip(labels, top_class): t = np.long(t) p = np.long(p) confusion_matrix[t, p] += 1 print('confusion_matrix: ', confusion_matrix) per_class_acc = confusion_matrix.diag() / confusion_matrix.sum(1) print('per_class_acc: ', per_class_acc) #per_class_acc = per_class_acc.detach().cpu().numpy() #per_class_acc = np.reshape(per_class_acc, (1, 2)) #per_class_acc = np.append(per_class_acc, np.array(per_class_acc), axis=0) return val_acc / len(valloader), per_class_acc
def show_per_class_accuracy(self): confusion_matrix = torch.zeros(self.config['num_classes'], self.config['num_classes']) with torch.no_grad(): for i, (inputs, classes) in enumerate(self.test_loader): inputs = inputs.to(self.device) classes = classes.to(self.device) outputs = self.net(inputs) _, preds = torch.max(outputs, 1) for t, p in zip(classes.view(-1), preds.view(-1)): confusion_matrix[t.long(), p.long()] += 1 class_acc = (100. * confusion_matrix.diag() / confusion_matrix.sum(1)).cpu().numpy() class_acc = zip(self.classes, class_acc) for class_name, acc_score in class_acc: print(f"{class_name}\t\t{acc_score:4f}")
else: erros = erros + 1 #ax.set_title("{} ({})".format(str(classes[preds[idx].item()]),str(classes[labels[idx].item()])),color=('green' if preds[idx]==labels[idx] else "red")) print('acertos: {} \nerros:{}'.format(acertos, erros), file=open(filename, "a")) from sklearn.metrics import confusion_matrix nb_classes = 2 confusion_matrix = torch.zeros(nb_classes, nb_classes) with torch.no_grad(): for i, (inputs, classes) in enumerate(test_loader): #['val']): inputs = inputs.to(device) classes = classes.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for t, p in zip(classes.view(-1), preds.view(-1)): confusion_matrix[t.long(), p.long()] += 1 print(confusion_matrix, file=open(filename, "a")) print(confusion_matrix.diag() / confusion_matrix.sum(1), file=open(filename, "a")) #import seaborn as sn #import matplotlib.pyplot as plt #plt.figure(figsize = (10,7)) #sn.heatmap(confusion_matrix, annot=True,fmt='g')
from sklearn.metrics import confusion_matrix nb_classes = 2 confusion_matrix = torch.zeros(nb_classes, nb_classes) with torch.no_grad(): for i, (inputs, classes) in enumerate(test_loader):#['val']): inputs = inputs.to(device) classes = classes.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for t, p in zip(classes.view(-1), preds.view(-1)): confusion_matrix[t.long(), p.long()] += 1 print(confusion_matrix,file=open(filename, "a")) print(confusion_matrix.diag()/confusion_matrix.sum(1),file=open(filename, "a")) #import seaborn as sn #import matplotlib.pyplot as plt #plt.figure(figsize = (10,7)) #sn.heatmap(confusion_matrix, annot=True,fmt='g')
def calculate_accuracy(confusion_matrix): assert confusion_matrix is not None return confusion_matrix.diag().sum() / confusion_matrix.sum()
topk_list = output_topk[1].cpu().detach().numpy().tolist() classes_list = classes.cpu().detach().numpy().tolist() for cl in range(len(classes_list)): if classes_list[cl] in topk_list[cl]: correct_topk += 1 predicted_labels.append(preds.cpu().detach().numpy().tolist()) for t, p in zip(classes.view(-1), preds.view(-1)): confusion_matrix[t.long(), p.long()] += 1 cm = confusion_matrix.detach().numpy().astype('int') print(cm) #plot_confusion_matrix(cm,np.array([i for i in classes_dic]),normalize=False) #plt.savefig('resnext101_d6.png') per_class_accuracies = ( confusion_matrix.diag() / confusion_matrix.sum(1)).cpu().detach().numpy().tolist() print(','.join("{:2.04f}".format(x) for x in per_class_accuracies)) total_correct = 0 total = 0 for i in range(nb_classes): total_correct += int(confusion_matrix[i][i].numpy()) total += int(confusion_matrix.sum(dim=1)[i].numpy()) print( "class {:d} --> accuracy: {:.2f}, correct predictions: {:d}, all: {:d}" .format(i + 1, (confusion_matrix.diag() / confusion_matrix.sum(1))[i] * 100, int(confusion_matrix[i][i].numpy()), int(confusion_matrix.sum(dim=1)[i].numpy())))
labels = data.y.to(device) labels = labels.item() index = output.data.detach().cpu().numpy().argmax() predictions1.append(index) lbls1.append(labels) # initialize with 2 classes nb_classes = 2 confusion_matrix = torch.zeros(nb_classes, nb_classes) # creates confusion matrix for t, p in zip(lbls1, predictions1): confusion_matrix[t, p] += 1 print('confusion_matrix: ', confusion_matrix) per_class_acc = confusion_matrix.diag() / confusion_matrix.sum(1) print('per_class_acc: ', per_class_acc) # roc roc = roc_auc_score(lbls1, predictions1) print('roc cg: ', roc) # create classification report and saving it as csv report = classification_report(lbls1, predictions1) print(report) """ Model Second: sparse graph model with cosine sim """ class Net(nn.Module): def __init__(self): super(Net, self).__init__()