def evaluate_withpq(model, data_loader, device): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) cpu_device = torch.device("cpu") model.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' coco = get_coco_api_from_dataset(data_loader.dataset) iou_types = _get_iou_types(model) coco_evaluator = CocoEvaluator(coco, iou_types) test_data_pkl = h5py.File( 'resnet_imagenet_features/backbone.7.0_test_reconstructed.h5', 'r') #test_data_pkl = h5py.File('resnet_imagenet_features/backbone.7.0_test.h5', 'r') for images, targets in tqdm(data_loader, desc=header): images = list(image for image in images) targets = [{k: v.to(device) for k, v in t.items()} for t in targets] image_id = targets[0]['image_id'].item() quantized_x = test_data_pkl[str(image_id)][()] quantized_x = torch.from_numpy(quantized_x) imagepq = quantized_x.to(device) torch.cuda.synchronize() model_time = time.time() #print ("----",image_id,"----",imagepq.shape) outputs = model(images, imagepq, targets) outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] model_time = time.time() - model_time res = { target["image_id"].item(): output for target, output in zip(targets, outputs) } evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() coco_evaluator.summarize_per_category() torch.set_num_threads(n_threads) test_data_pkl.close() return coco_evaluator
def evaluate(model, data_loader, device): with torch.no_grad(): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) cpu_device = torch.device("cpu") model.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' coco = get_coco_api_from_dataset(data_loader.dataset) iou_types = _get_iou_types(model) coco_evaluator = CocoEvaluator(coco, iou_types) # for images, targets in metric_logger.log_every(data_loader, 100, header): for images, targets in tqdm(data_loader, desc=header): images = list(image.to(device) for image in images) targets = [{k: v.to(device) for k, v in t.items()} for t in targets] torch.cuda.synchronize() model_time = time.time() outputs = model(images, targets) outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] model_time = time.time() - model_time res = { target["image_id"].item(): output for target, output in zip(targets, outputs) } evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time # metric_logger.update(model_time=model_time, evaluator_time=evaluator_time) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() coco_evaluator.summarize_per_category() torch.set_num_threads(n_threads) return coco_evaluator