def main(): loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] for logger in loggers: if 'cosypose' in logger.name: logger.setLevel(logging.DEBUG) logger.info("Starting ...") init_distributed_mode() parser = argparse.ArgumentParser('Evaluation') parser.add_argument('--config', default='tless-bop', type=str) parser.add_argument('--debug', action='store_true') parser.add_argument('--job_dir', default='', type=str) parser.add_argument('--comment', default='', type=str) parser.add_argument('--nviews', dest='n_views', default=1, type=int) args = parser.parse_args() coarse_run_id = None refiner_run_id = None n_workers = 8 n_plotters = 8 n_views = 1 n_frames = None scene_id = None group_id = None n_groups = None n_views = args.n_views skip_mv = args.n_views < 2 skip_predictions = False object_set = 'tless' if 'tless' in args.config: object_set = 'tless' coarse_run_id = 'tless-coarse--10219' refiner_run_id = 'tless-refiner--585928' n_coarse_iterations = 1 n_refiner_iterations = 4 elif 'ycbv' in args.config: object_set = 'ycbv' refiner_run_id = 'ycbv-refiner-finetune--251020' n_coarse_iterations = 0 n_refiner_iterations = 2 else: raise ValueError(args.config) if args.config == 'tless-siso': ds_name = 'tless.primesense.test' assert n_views == 1 elif args.config == 'tless-vivo': ds_name = 'tless.primesense.test.bop19' elif args.config == 'ycbv': ds_name = 'ycbv.test.keyframes' else: raise ValueError(args.config) if args.debug: if 'tless' in args.config: scene_id = None group_id = 64 n_groups = 2 else: scene_id = 48 n_groups = 2 n_frames = None n_workers = 0 n_plotters = 0 n_rand = np.random.randint(1e10) save_dir = RESULTS_DIR / f'{args.config}-n_views={n_views}-{args.comment}-{n_rand}' logger.info(f"SAVE DIR: {save_dir}") logger.info(f"Coarse: {coarse_run_id}") logger.info(f"Refiner: {refiner_run_id}") # Load dataset scene_ds = make_scene_dataset(ds_name) if scene_id is not None: mask = scene_ds.frame_index['scene_id'] == scene_id scene_ds.frame_index = scene_ds.frame_index[mask].reset_index(drop=True) if n_frames is not None: scene_ds.frame_index = scene_ds.frame_index[mask].reset_index(drop=True)[:n_frames] # Predictions predictor, mesh_db = load_models(coarse_run_id, refiner_run_id, n_workers=n_plotters, object_set=object_set) mv_predictor = MultiviewScenePredictor(mesh_db) base_pred_kwargs = dict( n_coarse_iterations=n_coarse_iterations, n_refiner_iterations=n_refiner_iterations, skip_mv=skip_mv, pose_predictor=predictor, mv_predictor=mv_predictor, ) if skip_predictions: pred_kwargs = {} elif 'tless' in ds_name: pix2pose_detections = load_pix2pose_results(all_detections='bop19' in ds_name).cpu() pred_kwargs = { 'pix2pose_detections': dict( detections=pix2pose_detections, **base_pred_kwargs ), } elif 'ycbv' in ds_name: posecnn_detections = load_posecnn_results() pred_kwargs = { 'posecnn_init': dict( detections=posecnn_detections, use_detections_TCO=posecnn_detections, **base_pred_kwargs ), } else: raise ValueError(ds_name) scene_ds_pred = MultiViewWrapper(scene_ds, n_views=n_views) if group_id is not None: mask = scene_ds_pred.frame_index['group_id'] == group_id scene_ds_pred.frame_index = scene_ds_pred.frame_index[mask].reset_index(drop=True) elif n_groups is not None: scene_ds_pred.frame_index = scene_ds_pred.frame_index[:n_groups] pred_runner = MultiviewPredictionRunner( scene_ds_pred, batch_size=1, n_workers=n_workers, cache_data=len(pred_kwargs) > 1) all_predictions = dict() for pred_prefix, pred_kwargs_n in pred_kwargs.items(): logger.info(f"Prediction: {pred_prefix}") preds = pred_runner.get_predictions(**pred_kwargs_n) for preds_name, preds_n in preds.items(): all_predictions[f'{pred_prefix}/{preds_name}'] = preds_n logger.info("Done with predictions") torch.distributed.barrier() # Evaluation predictions_to_evaluate = set() if 'ycbv' in ds_name: det_key = 'posecnn_init' all_predictions['posecnn'] = posecnn_detections predictions_to_evaluate.add('posecnn') elif 'tless' in ds_name: det_key = 'pix2pose_detections' else: raise ValueError(ds_name) predictions_to_evaluate.add(f'{det_key}/refiner/iteration={n_refiner_iterations}') if args.n_views > 1: for k in [ # f'ba_input', # f'ba_output', f'ba_output+all_cand' ]: predictions_to_evaluate.add(f'{det_key}/{k}') all_predictions = OrderedDict({k: v for k, v in sorted(all_predictions.items(), key=lambda item: item[0])}) # Evaluation. meters = get_pose_meters(scene_ds) mv_group_ids = list(iter(pred_runner.sampler)) scene_ds_ids = np.concatenate(scene_ds_pred.frame_index.loc[mv_group_ids, 'scene_ds_ids'].values) sampler = ListSampler(scene_ds_ids) eval_runner = PoseEvaluation(scene_ds, meters, n_workers=n_workers, cache_data=True, batch_size=1, sampler=sampler) eval_metrics, eval_dfs = dict(), dict() for preds_k, preds in all_predictions.items(): if preds_k in predictions_to_evaluate: logger.info(f"Evaluation : {preds_k} (N={len(preds)})") if len(preds) == 0: preds = eval_runner.make_empty_predictions() eval_metrics[preds_k], eval_dfs[preds_k] = eval_runner.evaluate(preds) preds.cpu() else: logger.info(f"Skipped: {preds_k} (N={len(preds)})") all_predictions = gather_predictions(all_predictions) metrics_to_print = dict() if 'ycbv' in ds_name: metrics_to_print.update({ f'posecnn/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean': f'PoseCNN/AUC of ADD(-S)', f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean': f'Singleview/AUC of ADD(-S)', f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=1_matching=CLASS/AUC/objects/mean': f'Singleview/AUC of ADD-S', f'{det_key}/ba_output+all_cand/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean': f'Multiview (n={args.n_views})/AUC of ADD(-S)', f'{det_key}/ba_output+all_cand/ADD-S_ntop=1_matching=CLASS/AUC/objects/mean': f'Multiview (n={args.n_views})/AUC of ADD-S', }) elif 'tless' in ds_name: metrics_to_print.update({ f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=BOP_matching=OVERLAP/AUC/objects/mean': f'Singleview/AUC of ADD-S', # f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=BOP_matching=BOP/0.1d': f'Singleview/ADD-S<0.1d', f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=ALL_matching=BOP/mAP': f'Singleview/mAP@ADD-S<0.1d', f'{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=OVERLAP/AUC/objects/mean': f'Multiview (n={args.n_views})/AUC of ADD-S', # f'{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=BOP/0.1d': f'Multiview (n={args.n_views})/ADD-S<0.1d', f'{det_key}/ba_output+all_cand/ADD-S_ntop=ALL_matching=BOP/mAP': f'Multiview (n={args.n_views}/mAP@ADD-S<0.1d)', }) else: raise ValueError metrics_to_print.update({ f'{det_key}/ba_input/ADD-S_ntop=BOP_matching=OVERLAP/norm': f'Multiview before BA/ADD-S (m)', f'{det_key}/ba_output/ADD-S_ntop=BOP_matching=OVERLAP/norm': f'Multiview after BA/ADD-S (m)', }) if get_rank() == 0: save_dir.mkdir() results = format_results(all_predictions, eval_metrics, eval_dfs, print_metrics=False) (save_dir / 'full_summary.txt').write_text(results.get('summary_txt', '')) full_summary = results['summary'] summary_txt = 'Results:' for k, v in metrics_to_print.items(): if k in full_summary: summary_txt += f"\n{v}: {full_summary[k]}" logger.info(f"{'-'*80}") logger.info(summary_txt) logger.info(f"{'-'*80}") torch.save(results, save_dir / 'results.pth.tar') (save_dir / 'summary.txt').write_text(summary_txt) logger.info(f"Saved: {save_dir}")
def make_eval_bundle(args, model_training): eval_bundle = dict() model_training.cfg = args def load_model(run_id): if run_id is None: return None run_dir = EXP_DIR / run_id cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader) cfg = check_update_config(cfg) model = create_model_pose( cfg, renderer=model_training.renderer, mesh_db=model_training.mesh_db).cuda().eval() ckpt = torch.load(run_dir / 'checkpoint.pth.tar')['state_dict'] model.load_state_dict(ckpt) model.eval() model.cfg = cfg return model if args.train_refiner: refiner_model = model_training coarse_model = load_model(args.coarse_run_id_for_test) elif args.train_coarse: coarse_model = model_training refiner_model = load_model(args.refiner_run_id_for_test) else: raise ValueError predictor = CoarseRefinePosePredictor(coarse_model=coarse_model, refiner_model=refiner_model) base_pred_kwargs = dict( pose_predictor=predictor, mv_predictor=None, use_gt_detections=False, skip_mv=True, ) for ds_name in args.test_ds_names: assert ds_name in {'ycbv.test.keyframes', 'tless.primesense.test'} scene_ds = make_scene_dataset(ds_name, n_frames=args.n_test_frames) logger.info(f'TEST: Loaded {ds_name} with {len(scene_ds)} images.') scene_ds_pred = MultiViewWrapper(scene_ds, n_views=1) # Predictions pred_runner = MultiviewPredictionRunner( scene_ds_pred, batch_size=1, n_workers=args.n_dataloader_workers, cache_data=False) detections = None pred_kwargs = dict() if 'tless' in ds_name: detections = load_pix2pose_results( all_detections=False, remove_incorrect_poses=False).cpu() coarse_detections = load_pix2pose_results( all_detections=False, remove_incorrect_poses=True).cpu() det_k = 'pix2pose_detections' coarse_k = 'pix2pose_coarse' elif 'ycbv' in ds_name: detections = load_posecnn_results().cpu() coarse_detections = detections det_k = 'posecnn_detections' coarse_k = 'posecnn_coarse' else: raise ValueError(ds_name) if refiner_model is not None: pred_kwargs.update({ coarse_k: dict( detections=coarse_detections, use_detections_TCO=True, n_coarse_iterations=0, n_refiner_iterations=1, **base_pred_kwargs, ) }) if coarse_model is not None: pred_kwargs.update({ det_k: dict( detections=detections, use_detections_TCO=False, n_coarse_iterations=coarse_model.cfg.n_iterations, n_refiner_iterations=1 if refiner_model is not None else 0, **base_pred_kwargs, ) }) # Evaluation meters = get_pose_meters(scene_ds) meters = {k.split('_')[0]: v for k, v in meters.items()} mv_group_ids = list(iter(pred_runner.sampler)) scene_ds_ids = np.concatenate( scene_ds_pred.frame_index.loc[mv_group_ids, 'scene_ds_ids'].values) sampler = ListSampler(scene_ds_ids) eval_runner = PoseEvaluation(scene_ds, meters, batch_size=1, cache_data=True, n_workers=args.n_dataloader_workers, sampler=sampler) save_dir = Path(args.save_dir) / 'eval' / ds_name save_dir.mkdir(exist_ok=True, parents=True) eval_bundle[ds_name] = (pred_runner, pred_kwargs, eval_runner, save_dir) return eval_bundle