def test( dataloader, model, save_dir="", device="cuda", distributed=False, ): if distributed: model = model.module dataset = dataloader.dataset device = torch.device(device) num_devices = get_world_size() detections = compute_on_dataset(model, dataloader, device) synchronize() predictions = _accumulate_predictions_from_multiple_gpus(detections) if not is_main_process(): return return dataset.evaluation(predictions, str(save_dir))
def test( dataloader, model, save_dir="", device="cuda", distributed=False, ): if distributed: model = model.module dataset = dataloader.dataset device = torch.device(device) num_devices = get_world_size() # detections = compute_on_dataset(model, dataloader, device) # torch.save(detections, "/home/jty/pcdet/final_predictions_debug.pkl") detections=torch.load( "/home/jty/pcdet/final_predictions_debug.pkl") import pickle # with open( # '/home/jty/pcdet/pvrcnn/OpenLidarPerceptron/output/cfgs/kitti_models/pvrcnn_deecamp/default/eval/epoch_24/val/default/result.pkl', # 'rb') as input_file: # dt = pickle.load(input_file) with open( '/home/jty/Downloads/result.pkl', 'rb') as input_file: dt = pickle.load(input_file) synchronize() # predictions = _accumulate_predictions_from_multiple_gpus(dt) # predictions={} if not is_main_process(): return return dataset.evaluation(dt, str(save_dir))