def test_net(args, ind_range=None): """Run inference on all images in a dataset or over an index range of images in a dataset using a single GPU. """ dataset = build_dataset(cfg.TEST.DATASETS, is_train=False) all_hooks = build_test_hooks(args.cfg_file.split('/')[-1], log_period=int( np.ceil(10 / cfg.TEST.IMS_PER_GPU))) if ind_range is not None: start_ind, end_ind = ind_range else: start_ind = 0 end_ind = len(dataset) model = initialize_model_from_cfg() all_boxes = test(model, dataset, start_ind, end_ind, all_hooks) if ind_range is not None: det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range) else: det_name = 'detections.pkl' det_file = os.path.join(cfg.CKPT, 'test', det_name) save_object(dict(all_boxes=all_boxes, ), det_file) logging_rank('Wrote detections to: {}'.format(os.path.abspath(det_file)), local_rank=0) return all_boxes,
def main(): if not os.path.isdir(cfg.CKPT): mkdir_p(cfg.CKPT) if args.cfg_file is not None: shutil.copyfile(args.cfg_file, os.path.join(cfg.CKPT, args.cfg_file.split('/')[-1])) assert_and_infer_cfg(make_immutable=False) # Create model model = Generalized_RCNN() logging_rank(model, distributed=args.distributed, local_rank=args.local_rank) # Create checkpointer checkpointer = CheckPointer(cfg.CKPT, weights_path=cfg.TRAIN.WEIGHTS, auto_resume=cfg.TRAIN.AUTO_RESUME, local_rank=args.local_rank) # Load model or random-initialization model = checkpointer.load_model(model, convert_conv1=cfg.MODEL.CONV1_RGB2BGR) if cfg.MODEL.BATCH_NORM == 'freeze': model = convert_bn2affine_model(model, merge=not checkpointer.resume) elif cfg.MODEL.BATCH_NORM == 'sync': model = convert_bn2syncbn_model(model) model.to(args.device) # Create optimizer optimizer = Optimizer(model, cfg.SOLVER, local_rank=args.local_rank).build() optimizer = checkpointer.load_optimizer(optimizer) logging_rank('The mismatch keys: {}'.format(mismatch_params_filter(sorted(checkpointer.mismatch_keys))), distributed=args.distributed, local_rank=args.local_rank) # Create scheduler scheduler = LearningRateScheduler(optimizer, cfg.SOLVER, start_iter=0, local_rank=args.local_rank) scheduler = checkpointer.load_scheduler(scheduler) # Create training dataset and loader datasets = build_dataset(cfg.TRAIN.DATASETS, is_train=True, local_rank=args.local_rank) train_loader = make_train_data_loader(datasets, is_distributed=args.distributed, start_iter=scheduler.iteration) # Model Distributed if args.distributed: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, ) else: model = torch.nn.DataParallel(model) # Build hooks all_hooks = build_train_hooks(cfg, optimizer, scheduler, max_iter=cfg.SOLVER.MAX_ITER, warmup_iter=cfg.SOLVER.WARM_UP_ITERS, ignore_warmup_time=False) # Train train(model, train_loader, optimizer, scheduler, checkpointer, all_hooks)
def test_net_on_dataset(args, multi_gpu=False): """Run inference on a dataset.""" dataset = build_dataset(cfg.TEST.DATASETS, is_train=False) total_timer = Timer() total_timer.tic() if multi_gpu: num_images = len(dataset) all_boxes, all_segms, all_keyps, all_parss, all_pscores, all_uvs = \ multi_gpu_test_net_on_dataset(args, num_images) else: all_boxes, all_segms, all_keyps, all_parss, all_pscores, all_uvs = test_net(args) total_timer.toc(average=False) logging_rank('Total inference time: {:.3f}s'.format(total_timer.average_time), local_rank=0) return evaluation(dataset, all_boxes, all_segms, all_keyps, all_parss, all_pscores, all_uvs)