def test(epoch): model.eval() test_loss = 0 correct = 0 target_arr = [] predict_arr = [] with torch.no_grad(): for batch_idx, (data, target) in enumerate(test_loader): data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) output = model(data) test_loss += criterion(output, target) _, pred = output.data.max(1) batch_correct = pred.eq(target).sum().item() correct += batch_correct predict_arr.append(pred.cpu().numpy()) target_arr.append(target.data.cpu().numpy()) writer.add_scalar('/Acc', 100 * float(batch_correct) / data.size(0), epoch * len(test_loader) + batch_idx) cm_path = './' + str(epoch) + '_confusematrix' cm = metrics.get_confuse_matrix(predict_arr, target_arr) np.save(cm_path, cm) test_loss /= len(test_loader) context = 'Test set: Average loss: {:.6f}, Accuracy: {}/{} ({:.2f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / float(len(test_loader.dataset))) print(context) log_file.write(context + '\r\n')
def test(epoch): model.eval() test_loss, correct = 0, 0 target_arr, predict_arr = [], [] data1 = [0] * args.num_classes data2 = [0] * args.num_classes data3 = [0] * args.num_classes with torch.no_grad(): for i , (data, target) in enumerate(test_loader): data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) output = model(data) test_loss += criterion(output, target) _, pred = output.data.max(1) for i in range(0, target.size(0)): data1[target[i]] += 1 data3[pred[i]] += 1 if target[i] == pred[i]: data2[target[i]] += 1 batch_correct = pred.eq(target).sum().item() correct += batch_correct predict_arr.append(pred.cpu().numpy()) target_arr.append(target.data.cpu().numpy()) writer.add_scalar('/Acc', 100 * float(batch_correct) / data.size(0), epoch * len(test_loader) + i) cm_path = args.check_path + '/' + str(epoch) + '_confusematrix' cm = metrics.get_confuse_matrix(predict_arr, target_arr) np.save(cm_path, cm) for j in range(10): recall = 0 if data1[j] == 0 else data2[j] / data1[j] precision = 0 if data3[j] == 0 else data2[j] / data3[j] context = 'Class%1s: recall is %.2f%% (%d in %d), precision is %.2f%% (%d in %d)' % ( str(j), 100 * recall, data2[j], data1[j], 100 * precision, data2[j], data3[j]) print(context) f.write(context + "\r\n") test_loss /= len(test_loader) context = 'Test set: Average loss: {:.6f}, Accuracy: {}/{} ({:.2f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / float(len(test_loader.dataset))) print(context) f.write(context + '\r\n')
def validate(epoch, model, clf, args, f, writer, test=True): model.eval() transform = transforms.Compose([ transforms.Resize(32), transforms.ToTensor(), transforms.Normalize(mean=np.array([0.485, 0.456, 0.406]), std=np.array([0.229, 0.224, 0.225])), ]) test_set = torchvision.datasets.ImageFolder( root=args.test_set if test else args.train_set, transform=transform) test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch_size, shuffle=True) correct, total = 0, 0 target_arr, predict_arr = [], [] fea, l = torch.zeros(0), torch.zeros(0) data1 = [0] * args.num_classes data2 = [0] * args.num_classes data3 = [0] * args.num_classes for i, (data, target) in enumerate(test_loader): data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) output = model.forward(data) predicted = clf.predict(output.data.cpu().numpy()) predict_arr.append(predicted) target_arr.append(target.data.cpu().numpy()) fea = torch.cat((fea, output.data.cpu())) l = torch.cat((l, target.data.cpu().float())) for i in range(0, target.size(0)): data1[target[i]] += 1 data3[predicted[i]] += 1 if target[i] == predicted[i]: data2[target[i]] += 1 correct += (torch.tensor(predicted) == target.data.cpu()).sum() total += target.size(0) writer.add_scalar('/Acc', 100. * correct / float(total), epoch * len(test_loader) + i) k_n = (epoch * 2) if test else (epoch * 2 + 1) writer.add_embedding(mat=fea, metadata=l, global_step=k_n) cm_path = args.check_path + '/' + 'train' + str(epoch) + '_confusematrix' if test: cm_path = args.check_path + '/' + 'test' + str( epoch) + '_confusematrix' cm = metrics.get_confuse_matrix(predict_arr, target_arr) np.save(cm_path, cm) for j in range(args.num_classes): recall = 0 if data1[j] == 0 else data2[j] / data1[j] precision = 0 if data3[j] == 0 else data2[j] / data3[j] context = 'Class%1s: recall is %.2f%% (%d in %d), precision is %.2f%% (%d in %d)' % ( str(j), 100 * recall, data2[j], data1[j], 100 * precision, data2[j], data3[j]) print(context) f.write(context + "\r\n") context = 'Accuracy of model in ' + ('test' if test else 'train') + \ ' set is {}/{}({:.2f}%)'.format(correct, total, 100. * float( correct) / float(total)) f.write(context + '\r\n') print(context)