import os import sys sys.path.append( os.path.dirname( os.path.abspath( os.path.dirname(os.path.abspath(os.path.dirname(__file__)))))) from mmdetection.mmdet.models import build_detector from mmdetection.mmdet.apis import inference_detector, show_result cfg = mmcv.Config.fromfile('configs/faster_rcnn_r50_fpn_1x.py') cfg.model.pretrained = None # construct the model and load checkpoint model = build_detector(cfg.model, test_cfg=cfg.test_cfg) _ = load_checkpoint( model, 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth' ) # test a single image img = mmcv.imread('test.jpg') result = inference_detector(model, img, cfg) show_result(img, result) # test a list of images # imgs = ['test1.jpg', 'test2.jpg'] # for i, result in enumerate(inference_detector(model, imgs, cfg, device='cuda:0')): # print(i, imgs[i]) # show_result(imgs[i], result)
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.gpus == 1: model = build_detector( cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint) model = MMDataParallel(model, device_ids=[0]) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=1, dist=False, shuffle=False) outputs = single_test(model, data_loader, args.show) else: model_args = cfg.model.copy() model_args.update(train_cfg=None, test_cfg=cfg.test_cfg) model_type = getattr(detectors, model_args.pop('type')) outputs = parallel_test( model_type, model_args, args.checkpoint, dataset, _data_func, range(args.gpus), workers_per_gpu=args.proc_per_gpu) if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_file = args.out + '.json' results2json(dataset, outputs, result_file) coco_eval(result_file, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}.json'.format(name) results2json(dataset, outputs_, result_file) coco_eval(result_file, eval_types, dataset.coco)
def evaluate_model(model_name, paper_arxiv_id, weights_url, weights_name, paper_results, config): print('---') print('Now Evaluating %s' % model_name) evaluator = COCOEvaluator(root='./.data/vision/coco', model_name=model_name, paper_arxiv_id=paper_arxiv_id, paper_results=paper_results) out = 'results.pkl' launcher = 'none' if out is not None and not out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(config) cfg.data.test[ 'ann_file'] = './.data/vision/coco/annotations/instances_val2017.json' cfg.data.test['img_prefix'] = './.data/vision/coco/val2017/' # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if launcher == 'none': distributed = False else: distributed = True init_dist(launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) local_checkpoint, _ = urllib.request.urlretrieve( weights_url, '%s/.cache/torch/%s' % (str(Path.home()), weights_name)) print(local_checkpoint) # '/home/ubuntu/GCNet/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth' checkpoint = load_checkpoint(model, local_checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES evaluator.reset_time() if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs, cache_exists = single_gpu_test(model, data_loader, False, evaluator) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) if cache_exists: print('Cache exists: %s' % (evaluator.batch_hash)) evaluator.save() else: from mmdetection.mmdet.core import results2json rank, _ = get_dist_info() if out and rank == 0: print('\nwriting results to {}'.format(out)) mmcv.dump(outputs, out) eval_types = ['bbox'] if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = out else: if not isinstance(outputs[0], dict): result_files = results2json(dataset, outputs, out) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = out + '.{}'.format(name) result_files = results2json( dataset, outputs_, result_file) anns = json.load(open(result_files['bbox'])) evaluator.detections = [] evaluator.add(anns) evaluator.save()
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # 불필요한 loss 와 학습에서 제외 할 때 사용 if 'no_train_modules' in cfg.train_cfg: no_train_modules = cfg.train_cfg.no_train_modules if len(no_train_modules) != 0: for param in model.backbone.parameters(): param.requires_grad = False for param in model.neck.parameters(): param.requires_grad = False if 'rpn_haed' in no_train_modules: for param in model.rpn_head.parameters(): param.requires_grad = False elif 'bbox_head' in no_train_modules: for param in model.bbox_head.parameters(): param.requires_grad = False elif 'mask_head' in no_train_modules and hasattr( cfg.model, 'mask_head'): for param in model.mask_head.parameters(): param.requires_grad = False train_dataset = get_dataset(cfg.data.train) from imgaug import augmenters as iaa # augmentation = iaa.Sequential([ # iaa.Fliplr(0.5), # # iaa.Invert(0.5), # iaa.Affine( # scale={"x": (0.7, 1.0), "y": (0.7, 1.0)}, # translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)}, # ), # iaa.Multiply((0.5, 1.0)) # ], random_order=True) augmentation = iaa.SomeOf( (0, None), [ iaa.Fliplr(0.5), # iaa.Invert(0.5), iaa.Affine( scale={ "x": (0.7, 1.0), "y": (0.7, 1.0) }, translate_percent={ "x": (-0.05, 0.05), "y": (-0.05, 0.05) }, ), iaa.Multiply((0.4, 1.0)) ]) train_dataset.set_augmentation(augmentation) train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)