def validate(val_loader, model, criterion, adaptation): # switch to evaluate mode run_time = time.time() matrix = ConfusionMatrix(args['label_nums']) loss = 0 for i, (images, labels) in enumerate(val_loader): labels = labels.cuda(async=True) target_var = torch.autograd.Variable(labels, volatile=True) model.test(adaptation, images) output = model.output loss += criterion(output, target_var)/args['batch_size'] matrix = update_confusion_matrix(matrix, output.data, labels) loss /= (i+1) run_time = time.time() - run_time logger.info('=================================================') logger.info('val:' 'loss: {0:.4f}\t' 'accuracy: {1:.4f}\t' 'fg_accuracy: {2:.4f}\t' 'avg_precision: {3:.4f}\t' 'avg_recall: {4:.4f}\t' 'avg_f1score: {5:.4f}\t' 'run_time:{run_time:.2f}\t' .format(loss.data[0], matrix.accuracy(), matrix.fg_accuracy(), matrix.avg_precision(), matrix.avg_recall(), matrix.avg_f1score(),run_time=run_time)) logger.info('=================================================') return matrix.all_acc()
def main(): if len(args['device_ids']) > 0: torch.cuda.set_device(args['device_ids'][0]) test_loader = data.DataLoader(imageLabelLoader(args['data_path'], dataName=args['domainB'], phase='val'), batch_size=args['batch_size'], num_workers=args['num_workers'], shuffle=False) gym = deeplabG1G2() gym.initialize(args) gym.load( '/home/ben/mathfinder/PROJECT/AAAI2017/our_Method/v3/deeplab_feature_adaptation/checkpoints/Lip_to_July_lr_g1=0.00001_lr_g2=0.00000002_interval_g1=5_interval_d1=5_net_D=lsganMultOutput_D_if_adaptive=True_resume_decay=g2/best_Ori_on_B_model.pth' ) gym.eval() matrix = ConfusionMatrix(args['label_nums']) for i, (image, label) in enumerate(test_loader): label = label.cuda(async=True) target_var = torch.autograd.Variable(label, volatile=True) gym.test(image) output = gym.output matrix = update_confusion_matrix(matrix, output.data, label) print(matrix.all_acc()) print(matrix.f1score())