def set_configuration( self, cfg_in ): cfg = self.get_configuration() cfg.merge_config( cfg_in ) self._net_config = str( cfg.get_value( "net_config" ) ) self._weight_file = str( cfg.get_value( "weight_file" ) ) self._class_names = str( cfg.get_value( "class_names" ) ) self._thresh = float( cfg.get_value( "thresh" ) ) self._gpu_index = str( cfg.get_value( "gpu_index" ) ) from mmdet.models import build_detector from mmcv.runner import load_checkpoint self._cfg = mmcv.Config.fromfile( self._net_config ) self._cfg.model.pretrained = None self._model = build_detector( self._cfg.model, test_cfg=self._cfg.test_cfg ) _ = load_checkpoint( self._model, self._weight_file )
def main(args): cfg = Config.fromfile(args.config) os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) logger.info('MMDetection Version: {}'.format(__version__)) logger.info('Config: {}'.format(cfg.text)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # if cfg.weights2d_path[0] is not None and cfg.load_from is None: # expand_sd = pretrain2d_to_3d(model, cfg.weights2d_path[0]) # model.load_state_dict(expand_sd, strict=False) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: datasets.append(build_dataset(cfg.data.val)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience validate = cfg.get('evaluation', False) model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=validate, logger=logger)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') print(f"loaded checkpoint from {args.checkpoint}") if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def measure_inference_speed(cfg, checkpoint, max_iter, log_interval, is_fuse_conv_bn): # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, # Because multiple processes will occupy additional CPU resources, # FPS statistics will be more unstable when workers_per_gpu is not 0. # It is reasonable to set workers_per_gpu to 0. workers_per_gpu=0, dist=True, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, checkpoint, map_location='cpu') if is_fuse_conv_bn: model = fuse_conv_bn(model) model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) model.eval() # the first several iterations may be very slow so skip them num_warmup = 5 pure_inf_time = 0 fps = 0 # benchmark with 2000 image and take the average for i, data in enumerate(data_loader): torch.cuda.synchronize() start_time = time.perf_counter() with torch.no_grad(): model(return_loss=False, rescale=True, **data) torch.cuda.synchronize() elapsed = time.perf_counter() - start_time if i >= num_warmup: pure_inf_time += elapsed if (i + 1) % log_interval == 0: fps = (i + 1 - num_warmup) / pure_inf_time print( f'Done image [{i + 1:<3}/ {max_iter}], ' f'fps: {fps:.1f} img / s, ' f'times per image: {1000 / fps:.1f} ms / img', flush=True) if (i + 1) == max_iter: fps = (i + 1 - num_warmup) / pure_inf_time print( f'Overall fps: {fps:.1f} img / s, ' f'times per image: {1000 / fps:.1f} ms / img', flush=True) break return fps
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) datasets = [build_dataset(cfg.data.train)] model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) if cfg.load_from: checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu') model.CLASSES = datasets[0].CLASSES if cfg.load_from: checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu') model.CLASSES = datasets[0].CLASSES if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES) data_loader = build_dataloader(datasets[0], imgs_per_gpu=cfg.data.imgs_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=cfg.gpus, dist=False, shuffle=False) # put model on gpus model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() model.train() if hasattr(model, 'module'): model_load = model.module optimizer_all = obj_from_dict(cfg.optimizer, torch.optim, dict(params=model_load.parameters())) optimizer = obj_from_dict(cfg.optimizer, torch.optim, dict(params=model_load.agg.parameters())) check_video = None start_epoch = 0 meta = None epoch = start_epoch vis = visdom.Visdom(env='fuse_c') loss_cls_window = vis.line(X=torch.zeros((1, )).cpu(), Y=torch.zeros((1)).cpu(), opts=dict(xlabel='minibatches', ylabel='Loss of classification', title='Loss of classification ', legend=['Loss of classification'])) loss_init_window = vis.line(X=torch.zeros((1, )).cpu(), Y=torch.zeros((1)).cpu(), opts=dict(xlabel='minibatches', ylabel='Loss of init reppoint', title='Loss of init reppoint', legend=['Loss of init reppoint'])) loss_refine_window = vis.line(X=torch.zeros((1, )).cpu(), Y=torch.zeros((1)).cpu(), opts=dict(xlabel='minibatches', ylabel='Loss of refine reppoint', title='Loss of refine reppoint', legend=['Loss of refine reppoint' ])) loss_total_window = vis.line(X=torch.zeros((1, )).cpu(), Y=torch.zeros((1)).cpu(), opts=dict(xlabel='minibatches', ylabel='Loss all', title='Loss all', legend=['Loss all'])) loss_trans_window = vis.line(X=torch.zeros((1, )).cpu(), Y=torch.zeros((1)).cpu(), opts=dict(xlabel='minibatches', ylabel='Loss trans', title='Loss trans', legend=['Loss trans'])) training_sample = 0 for e in range(cfg.total_epochs): i = 0 if epoch % 1 == 0: if meta is None: meta = dict(epoch=epoch + 1, iter=i) else: meta.update(epoch=epoch + 1, iter=i) checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(model.state_dict()) } print() if optimizer_all is not None: checkpoint['optimizer'] = optimizer_all.state_dict() if not os.path.exists(cfg.work_dir): os.mkdir(cfg.work_dir) filename = os.path.join(cfg.work_dir, 'epoch_{}.pth'.format(epoch)) torch.save(checkpoint, filename) for i, data in enumerate(data_loader): # if len(data['gt_bboxes'].data[0][0]) == 0: # continue optimizer.zero_grad() optimizer_all.zero_grad() reference_id = (data['img_meta'].data[0][0]['filename'].split('/') [-1]).split('.')[0] video_id = data['img_meta'].data[0][0]['filename'].split('/')[-2] print('start image:', data['img_meta'].data[0][0]['filename']) print('end image:', data['img_meta'].data[-1][-1]['filename']) # print(len(data['img'].data),len(data['img'].data[0])) # exit() for m in range(len(data['img_meta'].data)): start_name = data['img_meta'].data[m][0]['filename'].split( '/')[-2] # print(data['img_meta'].data[m][0]['filename']) for n in range(len(data['img_meta'].data[m])): check_name = data['img_meta'].data[m][n]['filename'].split( '/')[-2] # print(data['img_meta'].data[m][n]['filename']) if start_name != check_name: print('end of video') data['img_meta'].data[m][n] = data['img_meta'].data[m][ 0] data['gt_bboxes'].data[m][n] = data['gt_bboxes'].data[ m][0] data['gt_labels'].data[m][n] = data['gt_labels'].data[ m][0] data['img'].data[m][n] = data['img'].data[m][0] # losses,loss_trans=model(return_loss=True, **data) losses = model(return_loss=True, **data) # print(losses) if isinstance(losses, list): loss_all = [] log = [] for p in range(len(losses)): # print(p) # print(losses[p]) loss, log_var = parse_losses(losses[p]) loss_all.append(loss) log.append(log_var) else: losses, log_vars = parse_losses(losses) if isinstance(losses, list): losses = loss_all[0] + 0.5 * loss_all[1] + 0.5 * loss_all[ 2] + 0.5 * loss_all[3] losses = losses / 2.5 # print(loss_trans.shape) # loss_trans=torch.mean(loss_trans)*0.1 # losses=losses+loss_trans # if losses.item()>10: # losses.backward(retain_graph=False) # optimizer.zero_grad() # continue losses.backward() if epoch < 10: optimizer.step() else: optimizer_all.step() # if training_sample<700: # optimizer.step() # else: # optimizer_all.step() # print('transform kernel check',model.module.agg.trans_kernel.sum().item()) log_vars = log[0] vis.line(X=torch.ones(1).cpu() * training_sample, Y=(log_vars['loss_cls']) * torch.ones(1).cpu(), win=loss_cls_window, update='append') vis.line(X=torch.ones(1).cpu() * training_sample, Y=(log_vars['loss_pts_init']) * torch.ones(1).cpu(), win=loss_init_window, update='append') vis.line(X=torch.ones(1).cpu() * training_sample, Y=(log_vars['loss_pts_refine']) * torch.ones(1).cpu(), win=loss_refine_window, update='append') vis.line(X=torch.ones(1).cpu() * training_sample, Y=(losses).item() * torch.ones(1).cpu(), win=loss_total_window, update='append') # vis.line( # X=torch.ones(1).cpu() * training_sample, # Y=loss_trans.item() * torch.ones(1).cpu(), # win=loss_trans_window, # update='append') print('agg') print('epoch:',epoch,'index:',i,'video_id:',video_id,'reference_id:',reference_id, \ 'loss_cls:',log_vars['loss_cls'],'loss_init_box:',log_vars['loss_pts_init'], \ 'loss_refine_box:',log_vars['loss_pts_refine']) log_vars = log[1] print('refer') print('epoch:',epoch,'index:',i,'video_id:',video_id,'reference_id:',reference_id, \ 'loss_cls:',log_vars['loss_cls'],'loss_init_box:',log_vars['loss_pts_init'], \ 'loss_refine_box:',log_vars['loss_pts_refine']) log_vars = log[2] print('support') print('epoch:',epoch,'index:',i,'video_id:',video_id,'reference_id:',reference_id, \ 'loss_cls:',log_vars['loss_cls'],'loss_init_box:',log_vars['loss_pts_init'], \ 'loss_refine_box:',log_vars['loss_pts_refine']) training_sample += 1 # if i % 300 == 0: # if meta is None: # meta = dict(epoch=epoch + 1, iter=i) # else: # meta.update(epoch=epoch + 1, iter=i) # checkpoint = { # 'meta': meta, # 'state_dict': weights_to_cpu(model.state_dict()) # } # if optimizer_all is not None: # checkpoint['optimizer'] = optimizer_all.state_dict() # if not os.path.exists(cfg.work_dir): # os.mkdir(cfg.work_dir) # filename=os.path.join(cfg.work_dir,'epoch_{}_{}.pth'.format(epoch,i)) # torch.save(checkpoint,filename) epoch += 1
def main(): # noqa: C901 """Start test.""" args = parse_args() if args.work_dir is not None: mmcv.mkdir_or_exist(args.work_dir) if args.tmpdir is None: args.tmpdir = osp.join(args.work_dir, 'tmp_dir') mmcv.mkdir_or_exist(args.tmpdir) if args.out is None: args.out = osp.join(args.work_dir, 'result.pkl') if args.checkpoint is None: args.checkpoint = osp.join(args.work_dir, 'latest.pth') fps_file = osp.join(args.work_dir, 'fps.pkl') mAP_file = osp.join(args.work_dir, 'mAP.pkl') else: mAP_file, fps_file = None, None if args.checkpoint is None: raise ValueError('Checkpoint file cannot be empty.') if args.config.endswith(".json"): load_method = mmcv.load mmcv.load = json_to_dict cfg = mmcv.Config.fromfile(args.config) mmcv.load = load_method else: cfg = mmcv.Config.fromfile(args.config) cfg = mmcv.Config.fromfile(args.config) cfg.model.pretrained = None cfg.data.test.test_mode = True if args.dist: init_dist('pytorch', **cfg.dist_params) # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=True, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, map_location='cpu') model.CLASSES = dataset.CLASSES if args.dist: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir) else: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, fps_file) rank, _ = get_dist_info() if args.out and rank == 0: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) eval_types = args.eval if eval_types: if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) assert not isinstance(outputs[0], dict) result_files = results2json(dataset, outputs, args.out) coco_eval(result_files, eval_types, dataset.coco, dump_file=mAP_file)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated if isinstance(cfg.data.val, dict): cfg.data.val.test_mode = True elif isinstance(cfg.data.val, list): for ds_cfg in cfg.data.val: ds_cfg.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline) dataset = build_dataset(cfg.data.val) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) # import pickle # outputs = pickle.load(open('kitti.pkl','rb')) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in ['interval', 'tmpdir', 'start', 'gpu_collect']: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs))
pred_img_path = os.path.join(output_dir, pred_img_name) # Ensure path exists. if not os.path.exists(pred_list_path): os.makedirs(pred_list_path) if not os.path.exists(pred_img_path): os.makedirs(pred_img_path) cfg = mmcv.Config.fromfile( '/nfs/project/libo_i/mmdetection/configs/KITTI/cascade_mask_rcnn_x101_64x4d_fpn_1x.py' ) cfg.model.pretrained = None # construct the model and load checkpoint model = build_detector(cfg.model, test_cfg=cfg.test_cfg) _ = load_checkpoint( model, '/nfs/project/libo_i/mmdetection/work_dirs/kitti_cascade_mask_rcnn_x101_64x4d_fpn_1x/epoch_24.pth' ) for ind, result in enumerate( inference_detector( model, list( map(lambda item: os.path.join(test_image_path, item), test_image_list)), cfg, device='cuda:0')): text_save_name = "{}.txt".format(test_image_list[ind][:-4]) text_save_path = os.path.join(pred_list_path, text_save_name)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.imgs_per_gpu > 0: cfg.data.imgs_per_gpu = args.imgs_per_gpu # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) if hasattr(cfg, 'fuse') and cfg.fuse: train_dataset = get_dataset(cfg.datasets[0].train) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES datasets = list() for flow in cfg.workflow: mode, epoches = flow cur_datasets = list() for dataset_cfg in cfg.datasets: if hasattr(dataset_cfg, mode): cur_datasets.append(get_dataset(getattr(dataset_cfg, mode))) datasets.append(ConcatDataset(cur_datasets)) val_dataset = None if cfg.data.train.get('val_every', None): val_dataset = list() for dataset_cfg in cfg.datasets: if hasattr(dataset_cfg, 'val'): val_dataset.append(get_dataset(dataset_cfg.val)) val_dataset = ConcatDataset(val_dataset) if hasattr(cfg.model, 'smpl_head') and cfg.model.smpl_head.loss_cfg.get( 'adversarial_cfg', False): train_adv_smpl_detector( model, datasets, cfg, distributed=distributed, validate=args.validate, logger=logger, create_dummy=args.create_dummy, val_dataset=val_dataset, load_pretrain=args.load_pretrain, ) else: train_smpl_detector_fuse(model, datasets, cfg, distributed=distributed, validate=args.validate, logger=logger, create_dummy=args.create_dummy, val_dataset=val_dataset, load_pretrain=args.load_pretrain)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('MMDetection Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: datasets.append(build_dataset(cfg.data.val)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp)
def test_sparse_rcnn_forward(): config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' model = _get_detector_cfg(config_path) model = _replace_r50_with_r18(model) model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) detector.init_weights() input_shape = (1, 3, 100, 100) mm_inputs = _demo_mm_inputs(input_shape, num_items=[5]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # Test forward train with non-empty truth batch detector.train() gt_bboxes = mm_inputs['gt_bboxes'] gt_bboxes = [item for item in gt_bboxes] gt_labels = mm_inputs['gt_labels'] gt_labels = [item for item in gt_labels] losses = detector.forward(imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 detector.forward_dummy(imgs) # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[0]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') gt_bboxes = mm_inputs['gt_bboxes'] gt_bboxes = [item for item in gt_bboxes] gt_labels = mm_inputs['gt_labels'] gt_labels = [item for item in gt_labels] losses = detector.forward(imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], rescale=True, return_loss=False) batch_results.append(result) # test empty proposal in roi_head with torch.no_grad(): # test no proposal in the whole batch detector.roi_head.simple_test([imgs[0][None, :]], torch.empty( (1, 0, 4)), torch.empty((1, 100, 4)), [img_metas[0]], torch.ones((1, 4)))
def test_two_stage_forward(cfg_file): models_with_semantic = [ 'htc/htc_r50_fpn_1x_coco.py', 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py', 'scnet/scnet_r50_fpn_20e_coco.py', ] if cfg_file in models_with_semantic: with_semantic = True else: with_semantic = False model = _get_detector_cfg(cfg_file) model = _replace_r50_with_r18(model) model.backbone.init_cfg = None # Save cost if cfg_file in [ 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 ]: model.roi_head.bbox_head.num_classes = 80 model.roi_head.bbox_head.loss_cls.num_classes = 80 model.roi_head.mask_head.num_classes = 80 model.test_cfg.rcnn.score_thr = 0.05 model.test_cfg.rcnn.max_per_img = 100 from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 128, 128) # Test forward train with a non-empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[10], with_semantic=with_semantic) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[0], with_semantic=with_semantic) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test RoI forward train with an empty proposals if cfg_file in [ 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' # noqa: E501 ]: mm_inputs.pop('gt_semantic_seg') feature = detector.extract_feat(imgs[0][None, :]) losses = detector.roi_head.forward_train(feature, img_metas, [torch.empty( (0, 5))], **mm_inputs) assert isinstance(losses, dict) # Test forward test with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result) cascade_models = [ 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py', 'htc/htc_r50_fpn_1x_coco.py', 'scnet/scnet_r50_fpn_20e_coco.py', ] # test empty proposal in roi_head with torch.no_grad(): # test no proposal in the whole batch detector.simple_test(imgs[0][None, :], [img_metas[0]], proposals=[torch.empty((0, 4))]) # test no proposal of aug features = detector.extract_feats([imgs[0][None, :]] * 2) detector.roi_head.aug_test(features, [torch.empty((0, 4))] * 2, [[img_metas[0]]] * 2) # test rcnn_test_cfg is None if cfg_file not in cascade_models: feature = detector.extract_feat(imgs[0][None, :]) bboxes, scores = detector.roi_head.simple_test_bboxes( feature, [img_metas[0]], [torch.empty((0, 4))], None) assert all([bbox.shape == torch.Size((0, 4)) for bbox in bboxes]) assert all([ score.shape == torch.Size( (0, detector.roi_head.bbox_head.fc_cls.out_features)) for score in scores ]) # test no proposal in the some image x1y1 = torch.randint(1, 100, (10, 2)).float() # x2y2 must be greater than x1y1 x2y2 = x1y1 + torch.randint(1, 100, (10, 2)) detector.simple_test( imgs[0][None, :].repeat(2, 1, 1, 1), [img_metas[0]] * 2, proposals=[torch.empty((0, 4)), torch.cat([x1y1, x2y2], dim=-1)]) # test no proposal of aug detector.roi_head.aug_test( features, [torch.cat([x1y1, x2y2], dim=-1), torch.empty((0, 4))], [[img_metas[0]]] * 2) # test rcnn_test_cfg is None if cfg_file not in cascade_models: feature = detector.extract_feat(imgs[0][None, :].repeat( 2, 1, 1, 1)) bboxes, scores = detector.roi_head.simple_test_bboxes( feature, [img_metas[0]] * 2, [torch.empty((0, 4)), torch.cat([x1y1, x2y2], dim=-1)], None) assert bboxes[0].shape == torch.Size((0, 4)) assert scores[0].shape == torch.Size( (0, detector.roi_head.bbox_head.fc_cls.out_features))
def test_two_stage_forward(cfg_file): models_with_semantic = [ 'htc/htc_r50_fpn_1x_coco.py', 'scnet/scnet_r50_fpn_20e_coco.py', ] if cfg_file in models_with_semantic: with_semantic = True else: with_semantic = False model = _get_detector_cfg(cfg_file) model['pretrained'] = None # Save cost if cfg_file in [ 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 ]: model.roi_head.bbox_head.num_classes = 80 model.roi_head.bbox_head.loss_cls.num_classes = 80 model.roi_head.mask_head.num_classes = 80 model.test_cfg.rcnn.score_thr = 0.05 model.test_cfg.rcnn.max_per_img = 100 from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 256, 256) # Test forward train with a non-empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[10], with_semantic=with_semantic) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[0], with_semantic=with_semantic) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test forward test with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result)
def main(): args = parse_args() assert args.out or args.show or args.json_out, \ ('Please specify at least one operation (save or show the results) ' 'with the argument "--out" or "--show" or "--json_out"') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') if args.json_out is not None and args.json_out.endswith('.json'): args.json_out = args.json_out[:-5] cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) while not osp.isfile(args.checkpoint): print('Waiting for {} to exist...'.format(args.checkpoint)) time.sleep(60) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES # assert not distributed if not distributed: model = MMDataParallel(model, device_ids=[0]) # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10] outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) rank, _ = get_dist_info() if args.out and rank == 0: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): if dataset.ann_file == 'data/coco/annotations/image_info_test-dev2017.json': result_files = results2json_segm(dataset, outputs, args.out, dump=True) else: result_files = results2json_segm(dataset, outputs, args.out, dump=False) if 'lvis' in dataset.ann_file: ## an ugly fix to make it compatible with coco eval from lvis import LVISEval lvisEval = LVISEval(cfg.data.test.ann_file, result_files, 'segm') lvisEval.run() lvisEval.print_results() #fix lvis api eval iou_thr error, should be 0.9 but was 0.8999 lvisEval.params.iou_thrs[8] = 0.9 for iou in [0.5, 0.6, 0.7, 0.8, 0.9]: print('AP at iou {}: {}'.format( iou, lvisEval._summarize('ap', iou_thr=iou))) else: coco_eval(result_files, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file, dump=False) coco_eval(result_files, eval_types, dataset.coco) ##eval on lvis-77###### cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json' cfg.data.test.img_prefix = 'data/lvis/val2017/' cfg.data.test.test_mode = True dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # model_orig=model.module # model = MMDataParallel(model, device_ids=[0]).cuda() # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10] outputs = single_gpu_test(model, data_loader) print('\nwriting results to {}'.format('xxx')) # mmcv.dump(outputs, 'xxx') eval_types = ['segm'] if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = 'xxx' coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_files = results2json_segm(dataset, outputs, 'xxx', dump=False) from lvis import LVISEval lvisEval = LVISEval( 'data/lvis/lvis_v0.5_val_cocofied.json', result_files, 'segm') lvisEval.run() lvisEval.print_results() # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999 lvisEval.params.iou_thrs[8] = 0.9 for iou in [0.5, 0.6, 0.7, 0.8, 0.9]: print('AP at iou {}: {}'.format( iou, lvisEval._summarize('ap', iou_thr=iou))) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = 'xxx' + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file, dump=False) coco_eval(result_files, eval_types, dataset.coco) # Save predictions in the COCO json format if args.json_out and rank == 0: if not isinstance(outputs[0], dict): results2json(dataset, outputs, args.json_out) else: for name in outputs[0]: outputs_ = [out[name] for out in outputs] result_file = args.json_out + '.{}'.format(name) results2json(dataset, outputs_, result_file)
def set_configuration(self, cfg_in): cfg = self.get_configuration() cfg.merge_config(cfg_in) self._config_file = str(cfg.get_value("config_file")) self._seed_weights = str(cfg.get_value("seed_weights")) self._train_directory = str(cfg.get_value("train_directory")) self._output_directory = str(cfg.get_value("output_directory")) self._gpu_count = int(cfg.get_value("gpu_count")) self._integer_labels = strtobool(cfg.get_value("integer_labels")) self._launcher = str(cfg.get_value("launcher")) self._validate = strtobool(cfg.get_value("validate")) self._training_data = [] from mmcv import Config self._cfg = Config.fromfile(self._config_file) if self._cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if self._train_directory is not None: self._cfg.work_dir = self._train_directory self._groundtruth_store = os.path.join(self._train_directory, self._tmp_annotation_file) if not os.path.exists(self._train_directory): os.mkdir(self._train_directory) else: self._groundtruth_store = self._tmp_annotation_file if self._seed_weights is not None: self._cfg.resume_from = self._seed_weights if self._gpu_count > 0: self._cfg.gpus = self._gpu_count else: self._cfg.gpus = torch.cuda.device_count() if self._cfg.checkpoint_config is not None: from mmdet import __version__ self._cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=self._cfg.text) if self._launcher == 'none': self._distributed = False else: self._distributed = True from mmdet.apis import init_dist init_dist(self._launcher, **self._cfg.dist_params) from mmdet.apis import get_root_logger self._logger = get_root_logger(self._cfg.log_level) self._logger.info('Distributed training: {}'.format(self._distributed)) if self._random_seed is not "none": logger.info('Set random seed to {}'.format(self._random_seed)) from mmdet.apis import set_random_seed set_random_seed(int(self._random_seed)) from mmdet.models import build_detector self._model = build_detector(self._cfg.model, train_cfg=self._cfg.train_cfg, test_cfg=self._cfg.test_cfg)
def test_config_build_detector(): """ Test that all detection models defined in the configs can be initialized. """ from xdoctest.utils import import_module_from_path from mmdet.models import build_detector config_dpath = _get_config_directory() print('Found config_dpath = {!r}'.format(config_dpath)) # import glob # config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py'))) # config_names = [relpath(p, config_dpath) for p in config_fpaths] # Only tests a representative subset of configurations config_names = [ # 'dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py', # 'dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py', # 'dcn/faster_rcnn_dpool_r50_fpn_1x.py', 'dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py', # 'dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py', # 'dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py', # 'dcn/faster_rcnn_mdpool_r50_fpn_1x.py', # 'dcn/faster_rcnn_mdconv_c3-c5_group4_r50_fpn_1x.py', # 'dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py', # --- # 'htc/htc_x101_32x4d_fpn_20e_16gpu.py', 'htc/htc_without_semantic_r50_fpn_1x.py', # 'htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py', # 'htc/htc_x101_64x4d_fpn_20e_16gpu.py', # 'htc/htc_r50_fpn_1x.py', # 'htc/htc_r101_fpn_20e.py', # 'htc/htc_r50_fpn_20e.py', # --- 'cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py', # 'cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py', # --- # 'scratch/scratch_faster_rcnn_r50_fpn_gn_6x.py', # 'scratch/scratch_mask_rcnn_r50_fpn_gn_6x.py', # --- # 'grid_rcnn/grid_rcnn_gn_head_x101_32x4d_fpn_2x.py', 'grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py', # --- 'double_heads/dh_faster_rcnn_r50_fpn_1x.py', # --- 'empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py', # 'empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x.py', # 'empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x.py', # 'empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x.py', # --- # 'ms_rcnn/ms_rcnn_r101_caffe_fpn_1x.py', # 'ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py', # 'ms_rcnn/ms_rcnn_r50_caffe_fpn_1x.py', # --- # 'guided_anchoring/ga_faster_x101_32x4d_fpn_1x.py', # 'guided_anchoring/ga_rpn_x101_32x4d_fpn_1x.py', # 'guided_anchoring/ga_retinanet_r50_caffe_fpn_1x.py', # 'guided_anchoring/ga_fast_r50_caffe_fpn_1x.py', # 'guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x.py', # 'guided_anchoring/ga_rpn_r101_caffe_rpn_1x.py', # 'guided_anchoring/ga_faster_r50_caffe_fpn_1x.py', 'guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py', # --- 'foveabox/fovea_r50_fpn_4gpu_1x.py', # 'foveabox/fovea_align_gn_ms_r101_fpn_4gpu_2x.py', # 'foveabox/fovea_align_gn_r50_fpn_4gpu_2x.py', # 'foveabox/fovea_align_gn_r101_fpn_4gpu_2x.py', 'foveabox/fovea_align_gn_ms_r50_fpn_4gpu_2x.py', # --- # 'hrnet/cascade_rcnn_hrnetv2p_w32_20e.py', # 'hrnet/mask_rcnn_hrnetv2p_w32_1x.py', # 'hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e.py', # 'hrnet/htc_hrnetv2p_w32_20e.py', # 'hrnet/faster_rcnn_hrnetv2p_w18_1x.py', # 'hrnet/mask_rcnn_hrnetv2p_w18_1x.py', # 'hrnet/faster_rcnn_hrnetv2p_w32_1x.py', # 'hrnet/faster_rcnn_hrnetv2p_w40_1x.py', 'hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py', # --- # 'gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py', # 'gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py', 'gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py', # 'gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py', # --- # 'wider_face/ssd300_wider_face.py', # --- 'pascal_voc/ssd300_voc.py', 'pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py', 'pascal_voc/ssd512_voc.py', # --- # 'gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py', # 'gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py', # 'gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py', # 'gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py', 'gcnet/mask_rcnn_r50_fpn_sbn_1x.py', # --- 'gn/mask_rcnn_r50_fpn_gn_contrib_2x.py', # 'gn/mask_rcnn_r50_fpn_gn_2x.py', # 'gn/mask_rcnn_r101_fpn_gn_2x.py', # --- # 'reppoints/reppoints_moment_x101_dcn_fpn_2x.py', 'reppoints/reppoints_moment_r50_fpn_2x.py', # 'reppoints/reppoints_moment_x101_dcn_fpn_2x_mt.py', 'reppoints/reppoints_partial_minmax_r50_fpn_1x.py', 'reppoints/bbox_r50_grid_center_fpn_1x.py', # 'reppoints/reppoints_moment_r101_dcn_fpn_2x.py', # 'reppoints/reppoints_moment_r101_fpn_2x_mt.py', # 'reppoints/reppoints_moment_r50_fpn_2x_mt.py', 'reppoints/reppoints_minmax_r50_fpn_1x.py', # 'reppoints/reppoints_moment_r50_fpn_1x.py', # 'reppoints/reppoints_moment_r101_fpn_2x.py', # 'reppoints/reppoints_moment_r101_dcn_fpn_2x_mt.py', 'reppoints/bbox_r50_grid_fpn_1x.py', # --- # 'fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py', # 'fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py', 'fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py', # --- 'albu_example/mask_rcnn_r50_fpn_1x.py', # --- 'libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py', # 'libra_rcnn/libra_retinanet_r50_fpn_1x.py', # 'libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py', # 'libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x.py', # 'libra_rcnn/libra_fast_rcnn_r50_fpn_1x.py', # --- # 'ghm/retinanet_ghm_r50_fpn_1x.py', # --- # 'fp16/retinanet_r50_fpn_fp16_1x.py', 'fp16/mask_rcnn_r50_fpn_fp16_1x.py', 'fp16/faster_rcnn_r50_fpn_fp16_1x.py' ] print('Using {} config files'.format(len(config_names))) for config_fname in config_names: config_fpath = join(config_dpath, config_fname) config_mod = import_module_from_path(config_fpath) config_mod.model config_mod.train_cfg config_mod.test_cfg print('Building detector, config_fpath = {!r}'.format(config_fpath)) # Remove pretrained keys to allow for testing in an offline environment if 'pretrained' in config_mod.model: config_mod.model['pretrained'] = None detector = build_detector(config_mod.model, train_cfg=config_mod.train_cfg, test_cfg=config_mod.test_cfg) assert detector is not None
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info meta['config'] = cfg.pretty_text # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds if args.seed is not None: logger.info(f'Set random seed to {args.seed}, ' f'deterministic: {args.deterministic}') set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed meta['exp_name'] = osp.basename(args.config) model = build_detector( cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__ + get_git_hash()[:7], CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
def main(): args = parse_args() args.eval = ['bbox'] # NOTE v-qiaofl added. assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') args.checkpoint = '/data/liqiaofei/projects/od/work_dirs2/reppoints_moment_r50_fpn_1x/epoch_12.pth' args.config = '/home/liqiaofei/mmdetection-v1.1.0/configs/reppoints/reppoints_moment_r50_fpn_1x.py' cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus cfg.train_cfg.rcnn.sampler.add_gt_as_proposals = False # Actually it doesn't matter. if args.ckpt: cfg.resume_from = args.ckpt if args.imgs_per_gpu > 0: cfg.data.imgs_per_gpu = args.imgs_per_gpu if args.nms_thr: cfg.test_cfg.rcnn.nms.iou_thr = args.nms_thr FOCAL_LENGTH = cfg.get('FOCAL_LENGTH', 1000) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # train_dataset = get_dataset(cfg.datasets[0].train) train_dataset = get_dataset(cfg.datasets[1].train) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = Runner(model, lambda x: x, optimizer, cfg.work_dir, cfg.log_level) runner.resume(cfg.resume_from) model = runner.model # ONLY FOR DEBUG # print('remove DDP for debug!') # model = model._modules['module'] model.eval() dataset_cfg = eval_dataset_mapper[args.dataset] dataset_cfg.update(cfg.common_val_cfg) dataset_cfg.pop('max_samples') dataset = get_dataset(dataset_cfg) # dataset.debugging = True shuffle = False if args.dataset in stable_list else True data_loader = build_dataloader_fuse( dataset, 1, 0, cfg.gpus, dist=False, shuffle=shuffle, drop_last=False, ) dump_dir = os.path.join(cfg.work_dir, f'eval_{args.dataset}') os.makedirs(dump_dir, exist_ok=True) if args.viz_dir: os.makedirs(args.viz_dir, exist_ok=True) eval_handler = eval_handler_mapper[args.dataset]( writer=tqdm.write, viz_dir=args.viz_dir, FOCAL_LENGTH=FOCAL_LENGTH, work_dir=cfg.work_dir) # type: EvalHandler with torch.no_grad(): for i, data_batch in enumerate(tqdm(data_loader)): file_name = data_batch['img_meta'].data[0][0]['file_name'] try: bbox_results, pred_results = model(**data_batch, return_loss=False, use_gt_bboxes=args.use_gt) pred_results['bboxes'] = bbox_results if args.paper_dir: os.makedirs(args.paper_dir, exist_ok=True) img = denormalize(data_batch['img'].data[0][0].numpy()) verts = pred_results['pred_vertices'] + pred_results[ 'pred_translation'] dump_folder = osp.join(args.paper_dir, file_name) os.makedirs(dump_folder, exist_ok=True) plt.imsave(osp.join(dump_folder, 'img.png'), img) for obj_i, vert in enumerate(verts): nr.save_obj(osp.join(dump_folder, f'{obj_i}.obj'), vert, torch.tensor(smpl.faces.astype(np.int64))) save_pack = eval_handler(data_batch, pred_results, use_gt=args.use_gt) save_pack.update({'bbox_results': pred_results['bboxes']}) if args.dump_pkl: with open( osp.join(dump_dir, f"{save_pack['file_name']}.pkl"), 'wb') as f: pickle.dump(save_pack, f) except Exception as e: tqdm.write(f"Fail on {file_name}") tqdm.write(str(e)) eval_handler.finalize()
def main(): args = parse_args() assert args.out or args.show or args.json_out, \ ('Please specify at least one operation (save or show the results) ' 'with the argument "--out" or "--show" or "--json_out"') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') if args.json_out is not None and args.json_out.endswith('.json'): args.json_out = args.json_out[:-5] cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None # cfg.data.test.test_mode = False # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) ## uncomment to only eval on first 100 imgs data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) print('load model from {}'.format(cfg.load_from)) # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES # set0 = mmcv.load('../liyu_mmdet/set0.pkl') # set1 = mmcv.load('../liyu_mmdet/set1.pkl') # set2 = mmcv.load('../liyu_mmdet/set2.pkl') # set3 = mmcv.load('../liyu_mmdet/set3.pkl') # set4 = mmcv.load('../liyu_mmdet/set4.pkl') # set5 = mmcv.load('../liyu_mmdet/set5.pkl') # set6 = mmcv.load('../liyu_mmdet/set6.pkl') # set7 = mmcv.load('../liyu_mmdet/set7.pkl') # set0 = mmcv.load('./set0.pkl') # set1 = mmcv.load('./set1.pkl') # set2 = mmcv.load('./set2.pkl') # set3 = mmcv.load('./set3.pkl') # set4 = mmcv.load('./set4.pkl') # set5 = mmcv.load('./set5.pkl') # set6 = mmcv.load('./set6.pkl') # set7 = mmcv.load('./set7.pkl') # set_combine = set0+set1+set2+set3+set4+set5+set6+set7 # prefix = 'mrcnnr50_14.3_clshead' # set0 = mmcv.load('./{}_set0.pkl'.format(prefix)) # set1 = mmcv.load('./{}_set1.pkl'.format(prefix)) # set2 = mmcv.load('./{}_set2.pkl'.format(prefix)) # set3 = mmcv.load('./{}_set3.pkl'.format(prefix)) # set_combine = set0+set1+set2+set3 # prefix = '/mrcnnr50_ag_coco_clshead' prefix = 'mrcnnr50_ag_3fc_ft_cocolongtail_cat400_epoch_2' prefix = 'mrcnn_r50_ag_cocolt' print(prefix) set0 = mmcv.load('./{}_set0.pkl'.format(prefix)) set1 = mmcv.load('./{}_set1.pkl'.format(prefix)) set2 = mmcv.load('./{}_set2.pkl'.format(prefix)) set3 = mmcv.load('./{}_set3.pkl'.format(prefix)) set4 = mmcv.load('./{}_set4.pkl'.format(prefix)) set5 = mmcv.load('./{}_set5.pkl'.format(prefix)) set6 = mmcv.load('./{}_set6.pkl'.format(prefix)) set7 = mmcv.load('./{}_set7.pkl'.format(prefix)) # set0 = mmcv.load('./set0.pkl') # set1 = mmcv.load('./set1.pkl') # set2 = mmcv.load('./set2.pkl') # set3 = mmcv.load('./set3.pkl') # set4 = mmcv.load('./set4.pkl') # set5 = mmcv.load('./set5.pkl') # set6 = mmcv.load('./set6.pkl') # set7 = mmcv.load('./set7.pkl') set_combine = set0 + set1 + set2 + set3 + set4 + set5 + set6 + set7 # set_liyu = mmcv.load('../mmdet_ensemble/results319.pkl') # mmcv.dump(set_combine, args.out) # result_files = results2json(dataset, set_combine, # args.out) print('pkl result dumped, start eval') # result_files = results2json(dataset, set_combine, # args.out, dump=False) # # lvis_eval(result_files, args.eval, dataset.lvis) result_files = results2json(dataset, set_combine, args.out, dump=False) coco_eval(result_files, args.eval, dataset.coco)
def load_model(): model = build_detector(cfg.model, test_cfg=cfg.test_cfg) _ = load_checkpoint(model, model_cfgs[0][1]) # 7 it/s return model
def main(): args = parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid img_dir = args.img_dir out_dir = args.out_dir batch_size = args.batch_size cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader if args.img_dir != '': file_list = common.load_filepaths(args.img_dir, suffix=('.jpg', '.png', '.jpeg'), recursive=True) elif args.img_list != '': file_list = parse_testfile(args.img_list) else: raise "Both img_dir and img_list is empty." dataset = FilesDataset(file_list, cfg.test_pipeline) data_loader = build_dataloader(dataset, imgs_per_gpu=batch_size, workers_per_gpu=batch_size, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') model = reweight_cls(model, args.tau).cuda() model = MMDataParallel(model, device_ids=[0]) model.eval() count = 0 for i, data in enumerate(data_loader): with torch.no_grad(): # bbox_results, segm_results results = model(return_loss=False, rescale=True, **data) # batch #for result in results: # file_path = file_list[count] # save_name = file_path.replace('/home/songbai.xb/workspace/projects/TAO/data/TAO/frames/val/', '') # save_path = os.path.join(out_dir, save_name) # common.makedirs(os.path.dirname(save_path)) # save_in_tao_format(result, save_path) # count += 1 file_path = file_list[i] save_name = file_path.replace( '/home/songbai.xb/workspace/projects/TAO/data/TAO/frames/val/', '') save_name = save_name.replace('.jpg', '.pkl').replace('.jpeg', '') save_path = os.path.join(out_dir, save_name) common.makedirs(os.path.dirname(save_path)) save_in_tao_format(results[0], save_path)
def main(): args = parse_args() assert args.out or args.show, \ ('Please specify at least one operation (save or show the results) ' 'with the argument "--out" or "--show"') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) rank, _ = get_dist_info() if args.out and rank == 0: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_files = results2json(dataset, outputs, args.out) coco_eval(result_files, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file) coco_eval(result_files, eval_types, dataset.coco)
def main(): args = parse_args() # assert args.show or args.json_out, \ # ('Please specify at least one operation (save or show the results) ' # 'with the argument "--out" or "--show" or "--json_out"') if args.json_out is not None and args.json_out.endswith('.json'): args.json_out = args.json_out[:-5] cfg = mmcv.Config.fromfile(args.config) checkpoint_file = args.checkpoint if not checkpoint_file: def _epoch_num(name): return int( re.findall('epoch_[0-9]*.pth', name)[0].replace('epoch_', '').replace('.pth', '')) pths = sorted(glob.glob(os.path.join(cfg.work_dir, 'epoch_*.pth')), key=_epoch_num) if len(pths) > 0: print("Found {}, use it as checkpoint by default.".format( pths[-1])) checkpoint_file = pths[-1] if not checkpoint_file: raise ValueError("Checkpoints not found, check work_dir non empty.") # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=args.shuffle) # TODO: hack shuffle True # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, checkpoint_file, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES num_evals = args.num_evals if num_evals < 0: num_evals = len(data_loader) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, num_evals, args.show) rank, _ = get_dist_info() if rank == 0: gt_bboxes, gt_labels, gt_ignore, dataset_name = get_pascal_gts( dataset, num_evals) print('\nStarting evaluate {}'.format(dataset_name)) eval_map(outputs, gt_bboxes, gt_labels, gt_ignore, scale_ranges=None, iou_thr=0.5, dataset=dataset_name, print_summary=True) # Always output to pkl for analysing. if args.out is None: args.out = osp.join( cfg.work_dir, args.config.split('/')[-1].replace('.py', '_results.pkl')) with open(args.out, 'wb') as f: pickle.dump(outputs, f, pickle.HIGHEST_PROTOCOL) # Save predictions in the COCO json format if args.json_out and rank == 0: if not isinstance(outputs[0], dict): results2json(dataset, outputs, args.json_out) else: for name in outputs[0]: outputs_ = [out[name] for out in outputs] result_file = args.json_out + '.{}'.format(name) results2json(dataset, outputs_, result_file)
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.gpus == 1: model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint) model = MMDataParallel(model, device_ids=[0]) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=1, dist=False, shuffle=False) outputs = single_test(model, data_loader, args.show) else: model_args = cfg.model.copy() model_args.update(train_cfg=None, test_cfg=cfg.test_cfg) model_type = getattr(detectors, model_args.pop('type')) outputs = parallel_test(model_type, model_args, args.checkpoint, dataset, _data_func, range(args.gpus), workers_per_gpu=args.proc_per_gpu) if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_file = args.out + '.json' results2json(dataset, outputs, result_file) coco_eval(result_file, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}.json'.format(name) results2json(dataset, outputs_, result_file) coco_eval(result_file, eval_types, dataset.coco)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) model = MMDataParallel(model, device_ids=[0]) model.eval() # the first several iterations may be very slow so skip them num_warmup = 5 pure_inf_time = 0 # benchmark with 2000 image and take the average for i, data in enumerate(data_loader): torch.cuda.synchronize() start_time = time.perf_counter() with torch.no_grad(): model(return_loss=False, rescale=True, **data) torch.cuda.synchronize() elapsed = time.perf_counter() - start_time if i >= num_warmup: pure_inf_time += elapsed if (i + 1) % args.log_interval == 0: fps = (i + 1 - num_warmup) / pure_inf_time print(f'Done image [{i + 1:<3}/ 2000], fps: {fps:.1f} img / s') if (i + 1) == 2000: pure_inf_time += elapsed fps = (i + 1 - num_warmup) / pure_inf_time spf = 1 / fps print(f'Overall fps: {fps:.1f} img / s') print(f'Overall spf: {spf:.4f} s / img') break
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds if args.seed is not None: logger.info(f'Set random seed to {args.seed}, ' f'deterministic: {args.deterministic}') set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) from mmcv.runner import get_dist_info rank, world_size = get_dist_info() if rank == 0: print(model) print("Model have {} paramerters.".format( sum(x.numel() for x in model.parameters()) / 1e6)) if hasattr(model, 'backbone'): print("Model has {} backbone.".format( sum(x.numel() for x in model.backbone.parameters()) / 1e6)) if hasattr(model, 'neck'): print("Model has {} neck.".format( sum(x.numel() for x in model.neck.parameters()) / 1e6)) if hasattr(model, 'roi_head'): print("Model has {} bbox head.".format( sum(x.numel() for x in model.roi_head.bbox_head.parameters()) / 1e6)) if hasattr(model, 'bbox_head'): print("Model has {} bbox head.".format( sum(x.numel() for x in model.bbox_head.parameters()) / 1e6)) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.pretty_text, CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
from mmdet.datasets import build_dataloader, build_dataset from mmcv.runner import load_checkpoint from mmdet.models import build_detector from mmdet.core.evaluation import reval_map cfg = Config.fromfile('../configs/r3det/r3det_r50_fpn_2x.py') dataset = build_dataset(cfg.data.val) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=0, dist=False, shuffle=False) model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) checkpoint = load_checkpoint( model, "../work_dirs/r3det_r50_fpn_2x_20200616/epoch_24.pth", map_location='cpu') if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) dataset.evaluate(outputs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # load anchors if isinstance(cfg.model, dict) and cfg.model.get('type', 'FasterRCNN') == 'MyFasterRCNN': anchors = dict() with open(os.path.join(cfg.work_dir, 'anchors.json'), 'r') as f: anchors = json.load(f) print("loaded anchors: {}\n".format(anchors)) cfg.model['anchors'] = anchors # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES my_config.set('classes', model.CLASSES) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # update gpu num if dist.is_initialized(): cfg.gpus = dist.get_world_size() cfg.data.imgs_per_gpu = int(cfg.data.imgs_per_gpu * args.scale_bs) cfg.data.workers_per_gpu = int(cfg.data.workers_per_gpu * args.scale_bs) if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer[ 'lr'] * cfg.gpus / 8 * cfg.data.imgs_per_gpu / 2 # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # log cfg logger.info('training config:{}\n'.format(pprint.pformat(cfg._cfg_dict))) # log git hash logger.info('git hash: {}'.format(get_git_hash())) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: datasets.append(build_dataset(cfg.data.val)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES, git_hash=get_git_hash()) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) try: logger.info(summary(model, cfg)) except RuntimeError: logger.info('RuntimeError during summary') logger.info(str(model)) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, logger=logger) logger.info('git hash: {}'.format(get_git_hash()))
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([('{}: {}'.format(k, v)) for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('Config:\n{}'.format(cfg.text)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
def set_configuration( self, cfg_in ): cfg = self.get_configuration() cfg.merge_config( cfg_in ) self._config_file = str( cfg.get_value( "config_file" ) ) self._seed_weights = str( cfg.get_value( "seed_weights" ) ) self._train_directory = str( cfg.get_value( "train_directory" ) ) self._output_directory = str( cfg.get_value( "output_directory" ) ) self._gpu_count = int( cfg.get_value( "gpu_count" ) ) self._integer_labels = strtobool( cfg.get_value( "integer_labels" ) ) self._launcher = str( cfg.get_value( "launcher" ) ) self._validate = strtobool( cfg.get_value( "validate" ) ) self._training_data = [] from mmcv import Config self._cfg = Config.fromfile( self._config_file ) if self._cfg.get( 'cudnn_benchmark', False ): torch.backends.cudnn.benchmark = True if self._train_directory is not None: self._cfg.work_dir = self._train_directory self._groundtruth_store = os.path.join( self._train_directory, self._tmp_annotation_file ) if not os.path.exists( self._train_directory ): os.mkdir( self._train_directory ) else: self._groundtruth_store = self._tmp_annotation_file if self._seed_weights is not None: self._cfg.resume_from = self._seed_weights if self._gpu_count > 0: self._cfg.gpus = self._gpu_count else: self._cfg.gpus = torch.cuda.device_count() if self._cfg.checkpoint_config is not None: from mmdet import __version__ self._cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=self._cfg.text ) if self._launcher == 'none': self._distributed = False else: self._distributed = True from mmdet.apis import init_dist init_dist( self._launcher, **self._cfg.dist_params ) from mmdet.apis import get_root_logger self._logger = get_root_logger( self._cfg.log_level ) self._logger.info( 'Distributed training: {}'.format( self._distributed ) ) if self._random_seed is not "none": logger.info( 'Set random seed to {}'.format( self._random_seed ) ) from mmdet.apis import set_random_seed set_random_seed( int( self._random_seed ) ) from mmdet.models import build_detector self._model = build_detector( self._cfg.model, train_cfg=self._cfg.train_cfg, test_cfg=self._cfg.test_cfg )