def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu, logger): if is_main_process(): logger.info("Accumulating ...") all_predictions = all_gather(predictions_per_gpu) if not is_main_process(): return predictions = list() for p in all_predictions: predictions.extend(p) return predictions
def test(cfg, test_engine, loader, datasets, all_hooks): total_timer = Timer() total_timer.tic() all_results = [[] for _ in range(4)] eval = Evaluation(cfg) with torch.no_grad(): loader = iter(loader) for i in range(len(loader)): all_hooks.iter_tic() all_hooks.data_tic() inputs, targets, idx = next(loader) all_hooks.data_toc() all_hooks.infer_tic() result = test_engine(inputs, targets) all_hooks.infer_toc() all_hooks.post_tic() eval_results = eval.post_processing(result, targets, idx, datasets) all_results = [ results + eva for results, eva in zip(all_results, eval_results) ] all_hooks.post_toc() all_hooks.iter_toc() if is_main_process(): all_hooks.log_stats(i, 0, len(loader), len(datasets)) all_results = list(zip(*all_gather(all_results))) all_results = [[item for sublist in results for item in sublist] for results in all_results] if is_main_process(): total_timer.toc(average=False) logging_rank('Total inference time: {:.3f}s'.format( total_timer.average_time)) eval.evaluation(datasets, all_results)