def test_loop_fn(loader): total_samples = 0 correct = 0 model.eval() for x, (data, target) in loader: output = model(data) pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() total_samples += data.size()[0] accuracy = 100.0 * correct / total_samples test_utils.print_test_update(device, accuracy) return accuracy
def test_loop_fn(model, loader, device, context): total_samples = 0 correct = 0 model.eval() for data, target in loader: data = target[0].permute(0,3,1,2) target = target[1] output = model(data) #pred = output.max(1, keepdim=True)[1].float() _, preds = torch.max(output, 1) preds = preds.float() correct += preds.eq(target.view_as(preds)).sum().item() total_samples += target.shape[1]**2 print('device: {}, Running Accuracy: {}'.format(device, correct/total_samples)) accuracy = 100.0 * correct / total_samples test_utils.print_test_update(device, accuracy) logger.info('TEST: device: {}, accuracy: {}'.format(device, accuracy)) return accuracy