def main(): import os # os.environ["CUDA_VISIBLE_DEVICES"] = "0" args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # get dataset # from mmdet.models.decision_net.utils import modify_cfg # video_name = '01c783268c' # cfg.data.train = modify_cfg(cfg, video_name) train_dataset = get_dataset(cfg.data.train) val_dataset = get_dataset(cfg.data.val) print("len of dataset: {}.".format(len(train_dataset))) # train train_detector(model, [train_dataset, val_dataset], cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset(cfg.data.train) if hasattr(cfg, 'data2') and hasattr(cfg.data2, 'train'): train_dataset2 = get_dataset(cfg.data2.train) else: train_dataset2 = None train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger, train_dataset2=train_dataset2)
def _non_dist_train(model, dataset, cfg, validate=False): # prepare data loaders data_loaders = [ build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False) ] if validate: val_dataset = get_dataset(cfg.data.val) data_loaders += [ build_dataloader(val_dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False) ] cfg.workflow += [('val', 1)] # put model on gpus model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level) runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def save_det_result(config_file, out_dir, checkpoint_file=None, img_dir=None, score_thr=0.2): """Visualize results and save to the disk """ cfg = Config.fromfile(config_file) data_test = cfg.data.test dataset = get_dataset(data_test) classnames = [dataset.CLASSES] # use checkpoint path in cfg if not checkpoint_file: checkpoint_file = osp.join(cfg.work_dir, 'latest.pth') # use testset in cfg if not img_dir: img_dir = data_test.img_prefix model = init_detector(config_file, checkpoint_file, device='cuda:0') img_list = os.listdir(img_dir) for img_name in img_list: img_path = osp.join(img_dir, img_name) img_out_path = osp.join(out_dir, img_name) result = inference_detector(model, img_path) img = draw_poly_detections(img_path, result, classnames, scale=1.0, threshold=score_thr, colormap=[(212, 188, 0)]) print(img_out_path) cv2.imwrite(img_out_path, img)
def main(): args = parse_args() # build the model and load checkpoint model = init_detector(args.config, args.checkpoint, device='cuda:0') num2class_file = '/cos_person/275/1745/object_detection/class-descriptions-boxable.csv' num2class_csv = pd.read_csv(num2class_file) classes = [] for cat in num2class_csv['Id']: classes.append(cat) model.eval() output_csv = pd.read_csv(sample_submit_path_name) output_csv.loc[:, 'PredictionString'] = np.zeros( [len(output_csv)]) # ensure all is predicted cfg = mmcv.Config.fromfile(args.config) dataset = get_dataset(cfg.data.test) for i, result in enumerate(inference_detector(model, dataset.image_list)): string_output = general_processing.object_result_to_string( None, result, classes) ID = os.path.splitext(os.path.basename(dataset.image_list[i]))[0] output_csv.loc[output_csv['ImageId'] == ID, 'PredictionString'] = string_output output_csv.to_csv(output_file_path_name, index=False)
def main(): # 读取命令行的参数 args = parse_args() # 读取配置文件 cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args # 如果命令行中没有设定工作空间,就按照默认的——work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x';如果有输入就更新 if args.work_dir is not None: cfg.work_dir = args.work_dir # 如果是在预训练的基础上继续训练,那就更新cfg,否则就按照默认的resume_from = None if args.resume_from is not None: cfg.resume_from = args.resume_from # 输入的gpu数量来设置 cfg.gpus = args.gpus if cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. # 如果不设置分布式的,那么distributed的值为false if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) # 核心调用 build_detector,get_dataset,train_detector # train函数的核心 —— 调用build_detector()来创建模型,将config配置文件中的数据加载到建立的模型中去,返回的是对应的网络实例化的对象 model = build_detector( # 获得config文件的model配置数据,train的配置数据,test的配置数据 cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # 注册数据集,获得cfg中的data字典其中的train字段,也为字典类型 # 返回是一个dict,有数据集相关的数据和datasets所有的数据集标签。 train_dataset = get_dataset(cfg.data.train) # 开始训练 train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def _non_dist_train(model, train_dataset, cfg, validate=False): build_fn = lambda dataset, cfg_dict: build_dataloader( dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False, balanced=cfg_dict.get('balanced', False)) datasets = [train_dataset] data_loaders = [build_fn(train_dataset, cfg.data.train)] if validate: val_dataset = get_dataset(cfg.data.val) datasets.append(val_dataset) data_loaders.append(build_fn(val_dataset, cfg.data.val)) # put model on gpus model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() # build runner runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir, cfg.log_level) runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args cfg.work_dir = cfg.work_dir + '_' + time.strftime('Time_%m%d_%H%M%S', time.localtime()) if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # log cfg logger.info('training config:{}\n'.format(pprint.pformat(cfg._cfg_dict))) # log git hash logger.info('git hash: {}'.format(get_git_hash())) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset(cfg.data.train) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, classes=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) args.resume_from = "/home/ubuntu/code/fengda/MaskTrackRCNN/pretrained_models/epoch_12.pth" # network_data = torch.load(args.resume_from) load_checkpoint(model, args.resume_from) # model.eval() # for param in model.parameters(): # param.requires_grad = False model.load_flow() # model.flow_head.train() # for param in model.flow_head.parameters(): # param.requires_grad = True # get dataset train_dataset = get_dataset(cfg.data.train) print("len of dataset: {}.".format(len(train_dataset))) # train train_flownet(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset(cfg.data.train) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger) # 训练完成后做一次评估 import os.path as osp checkpoint = osp.join(cfg.work_dir, 'latest.pth') out = osp.join(cfg.work_dir, 'val_cropped_dets.pkl') _do_dota_eval(args.config, checkpoint, out)
def main(): import os os.environ["CUDA_VISIBLE_DEVICES"] = "3" args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': import torch.distributed as dist dist.init_process_group('gloo', init_method='file:///tmp/somefile', rank=0, world_size=1) distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset(cfg.data.train) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=cfg.text, CLASSES=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES train_detector( model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def __init__(self, config_file, checkpoint_file): # init RoITransformer self.config_file = config_file self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None # cfg.data.test.test_mode = True cfg.data.val.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = get_dataset(cfg.data.val) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.log_dir) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) rank, _ = get_dist_info() if args.out and rank == 0: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) from cvtools.evaluation.eval_dota import EvalDOTADets eval_data_dets = EvalDOTADets(args.out, cfg.data.val.ann_file) eval_data_dets.eval(dataset='hrsc2016_L2')
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) for key, val in model.named_parameters(): if key.find("backbone") >= 0 or key.find("neck") >= 0: val.requires_grad = False train_dataset = get_dataset(cfg.data.train) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, classes=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): # ipdb.set_trace() args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark 在图片输入尺度固定时开启,可以加速 if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: # 创建工作目录存放训练文件,如果不键入,会自动按照py配置文件生成对应的目录 cfg.work_dir = args.work_dir if args.resume_from is not None: # 断点继续训练的权值文件 cfg.resume_from = args.resume_from cfg.gpus = args.gpus # if cfg.checkpoint_config is not None: # # save mmdet version in checkpoints as meta data # cfg.checkpoint_config.meta = dict( # mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) # 模型的build和inference一样,就不多说了 model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # ipdb.set_trace() # 注意传入的是cfg.data.train train_dataset = get_dataset(cfg.data.train) train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() # 解析命令行参数 cfg = Config.fromfile(args.config) # 读取配置文件 # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir # checkpoint save path if args.resume_from is not None: cfg.resume_from = args.resume_from # checkpoint resume from path cfg.gpus = args.gpus # gpus numbers # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': # if laucher == none , then distributed == False means no distributed training. distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset( cfg.data.train ) # get dataset param: train(a dict containing configs) return a specific dataset class if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES # a tuple containing class names train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() fb_cfg = mmcv_config.fromfile(args.fb_cfg) _space = fb_cfg.search_space # base = _space['base'] # depth = _space['depth'] # space = _space['space'] model_cfg = mmcv_config.fromfile(args.model_cfg) # set cudnn_benchmark if model_cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: model_cfg.work_dir = args.work_dir if args.resume_from is not None: model_cfg.resume_from = args.resume_from model_cfg.gpus = args.gpus if model_cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data model_cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=model_cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **model_cfg.dist_params) # init logger before other steps logger = get_root_logger(model_cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = detection(mmcv_config(model_cfg['model_cfg']), mmcv_config(model_cfg['train_cfg']), mmcv_config(model_cfg['test_cfg']), _space, args.theta_txt) print(model) train_dataset = get_dataset(model_cfg.data.train) train_detector(model, train_dataset, model_cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() os.makedirs(args.output, exist_ok=True) cfg = Config.fromfile(args.config) dataset = get_dataset(cfg.data.train) for i in tqdm(np.random.randint(0, len(dataset), 500)): data = dataset[i] img = data['img'].data.numpy().transpose(1, 2, 0) masks = data['gt_masks'].data.transpose(1, 2, 0).astype(bool) bboxes = data['gt_bboxes'].data.numpy() img = mmcv.imdenormalize(img, mean=cfg.img_norm_cfg.mean, std=cfg.img_norm_cfg.std, to_bgr=False) img = draw_masks(img, masks).astype(np.uint8) draw_bounding_boxes_on_image_array(img, bboxes, use_normalized_coordinates=False, thickness=5) cv2.imwrite(osp.join(args.output, f'{i}_{np.random.randint(0, 10000)}.jpg'), img[..., ::-1])
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) # 首先要先注册 BACKBONES、 NECKS、 ROI_EXTRACTORS、 HEADS、 DETECTORS、 # 然后 BACKBONES.register_module(class SSDVGG) @HEADS.register_module(class AnchorHead) # @HEADS.register_module(class SSDHead) @DETECTORS.register_module(class SingleStageDetector) # 最后 build_detector() 相当于SingleStageDetector(**args) model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset(cfg.data.train) train_detector( model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def reset(self, index=0): # if self.is_train: # dataset = get_dataset(self.cfg.data.train) # else: # dataset = get_dataset(self.cfg.data.val) # # prepare data loaders # data_loaders = [build_dataloader(dataset, self.cfg.data.imgs_per_gpu, self.cfg.data.workers_per_gpu, 1, dist=False)] # Build data loader by each video. if self.is_train: # Randomly select a video from train list video_name = random.sample(self.videos, 1)[0] else: # Select video from validation list video_name = self.videos[index] # Get data loader of the selected video self.cfg_test = modify_cfg(self.cfg, video_name) # TODO DATASET 生成方式 # self.dataset = obj_from_dict(self.cfg_test, datasets, dict(test_mode=True)) self.dataset = get_dataset(self.cfg_test) print('video name: {}.\t len of dataset{}. '.format( video_name, len(self.dataset))) self.data_loader = get_dataloader(self.dataset) if self.is_train: # random choose a start frame, at list 10 frames to run. if len(self.data_loader) <= 10: self.start_frame = 0 else: self.start_frame = np.random.randint( min(len(self.data_loader) - 10, 10)) else: # start from the first frame. self.start_frame = 0 self.idx_frame = 0 self.data_current_full = get_VIS_data(self.data_loader, self.start_frame + self.idx_frame, dataset=self.dataset) self.data_last_full = self.data_current_full self.feat_last_full = self.get_self_feat(self.data_current_full['img']) self._state_reset() self.rewards = [] self.done = False return self.state
def main(): args = parse_args() cfg = Config.fromfile(args.config) # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir cfg.gpus = args.gpus if cfg.checkpoint_config is not None: # save mmdet version in checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) # if args.aux_cls_weight is not None: # cfg.train_cfg.aux.cls_weight = args.aux_cls_weight # if args.aux_reg_weight is not None: # cfg.train_cfg.aux.reg_weight = args.aux_reg_weight model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset(cfg.data.train) train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None #cfg.data.test.test_mode = True cfg.data.test.test_mode = True # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) #cfg.data.train.ann_file += '_400-500.pkl' dataset = get_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=24, workers_per_gpu=8, #cfg.data.workers_per_gpu, dist=False, #shuffle=True ) #for i, batch in enumerate(data_loader): # print(batch['img'].data[0].size()) # print(batch['img_meta']) # if i == 1: # break # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg).cuda() #checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility #if 'CLASSES' in checkpoint['meta']: # model.CLASSES = checkpoint['meta']['CLASSES'] #else: # model.CLASSES = dataset.CLASSES model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show)
def main(cfg_file, test_num=1): """ data_path: path to images label_path: path to annotations idxes: index of image is going to be tested with output process """ cfg = mmcv.Config.fromfile(cfg_file) dataset = get_dataset(cfg.data.val) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=1, num_gpus=1, dist=True, shuffle=False) for i, data in enumerate(data_loader): imgs = tensor2imgs(data['img'].data[0], **cfg.img_norm_cfg) gt_boxes = data['gt_bboxes'].data[0] gt_labels = data['gt_labels'].data[0] inp_shapes = [ meta['pad_shape'][:2] for meta in data['img_meta'].data[0] ] outs = gt2out(gt_boxes, gt_labels, inp_shapes, stride=4, categories=len(dataset.CLASSES)) bboxes, labels = out2box(outs, data['img_meta'].data[0], len(dataset.CLASSES)) vis_bbox(imgs[0], gt_boxes[0].cpu().numpy(), gt_labels[0].cpu().numpy(), show=True, show_str='ground truth') print('num detected box:', bboxes.shape[0]) vis_bbox(imgs[0], bboxes, labels, show=True, show_str='transformed boxes', color='green') if i >= test_num: break
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = get_dataset(cfg.data.test) outputs = mmcv.load(args.out) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_files = results2json(dataset, outputs, args.out) coco_eval(result_files, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file) coco_eval(result_files, eval_types, dataset.coco)
def _non_dist_train(model, dataset, cfg, validate=False): # prepare data loaders data_loaders = [ build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False) ] # put model on gpus model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level) runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config) # register eval hooks if validate: # Support batch_size > 1 in validation val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) val_dataset = get_dataset(cfg.data.val) val_dataloader = build_dataloader( val_dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # eval_cfg = cfg.get('evaluation', {}) eval_hook = EvalHook runner.register_hook(eval_hook(val_dataloader, **cfg.data.val)) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def main(): parser = argparse.ArgumentParser(description='MMDet test detector') parser.add_argument('config', help='test config file path') args = parser.parse_args() cfg = mmcv.Config.fromfile(args.config) dataset = get_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) coco = dataset.coco assert isinstance(coco, COCO) cocoGt = coco #画图 imgIds = cocoGt.getImgIds(list(x + 1 for x in range(165))) imageFile = "/media/wl/000675B10007A33A/DatasetRepo/wider_face_split/WIDER_train/images/" plt.figure() for i in range(len(imgIds)): imgId = imgIds[i] Img_gt = cocoGt.loadImgs(imgId)[0] imageUrl = imageFile + Img_gt['file_name'] #显示GT标签 annId_gt = cocoGt.getAnnIds(Img_gt['id']) imgAnn_gt = cocoGt.loadAnns(ids=annId_gt) print(imgAnn_gt) I = io.imread(imageUrl) plt.imshow(I) cocoGt.showAnns(imgAnn_gt) plt.title('GT') plt.show()
def main(): # import os # os.environ['CUDA_VISIBLE_DEVICES'] = '2' args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') global work_dirs work_dirs = os.path.dirname(args.checkpoint) cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': import torch.distributed as dist dist.init_process_group('gloo', init_method='file:///tmp/somefile', rank=0, world_size=1) distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = get_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: # pass model = MMDataParallel(model, device_ids=[0]) # model = MMDataParallel(model, device_ids=[3]) outputs = single_gpu_test(model, data_loader, args.show, args.log_dir) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) rank, _ = get_dist_info() if args.out and rank == 0: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) # import pickle # F=open(r'/disk2/zzr/work_dirs/PANet_r50_isaid/epoch_12_test_on_val_opencv_remap_maxdets_1000.pkl','rb') # outputs = pickle.load(F) outputs = tran2obb_results(outputs) # outputs = tran2mix_results(outputs) # outputs = trans2ms_results(outputs) # outputs = trans2mix_results(outputs) # outputs = trans2mask_results(outputs) # outputs = trans2hbb_results(outputs) # outputs = trans2mask_score(outputs) # outputs = trans2mask_results_V2(outputs) # outputs = assembel_mask(outputs) # outputs = assembel_mask_V2(outputs) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_file = args.out + '.json' results2json(dataset, outputs, result_file) coco_eval(result_file, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}.json'.format(name) results2json(dataset, outputs_, result_file) coco_eval(result_file, eval_types, dataset.coco)
def evaluate_model(model_name, paper_arxiv_id, weights_url, weights_name, paper_results, config): evaluator = COCOEvaluator(root='./.data/vision/coco', model_name=model_name, paper_arxiv_id=paper_arxiv_id, paper_results=paper_results) out = 'results.pkl' launcher = 'none' if out is not None and not out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(config) cfg.data.test[ 'ann_file'] = './.data/vision/coco/annotations/instances_val2017.json' cfg.data.test['img_prefix'] = './.data/vision/coco/val2017/' # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if launcher == 'none': distributed = False else: distributed = True init_dist(launcher, **cfg.dist_params) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if launcher == 'none': distributed = False else: distributed = True init_dist(launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = get_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) local_checkpoint, _ = urllib.request.urlretrieve(weights_url, weights_name) # '/home/ubuntu/GCNet/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth' checkpoint = load_checkpoint(model, local_checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES evaluator.reset_time() if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs, cache_exists = single_gpu_test(model, data_loader, False, evaluator) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, '') if cache_exists: print('Cache exists: %s' % (evaluator.batch_hash)) evaluator.save() else: from mmdet.core import results2json rank, _ = get_dist_info() if out and rank == 0: print('\nwriting results to {}'.format(out)) mmcv.dump(outputs, out) eval_types = ['bbox'] if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = out else: if not isinstance(outputs[0], dict): result_files = results2json(dataset, outputs, out) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = out + '.{}'.format(name) result_files = results2json( dataset, outputs_, result_file) anns = json.load(open(result_files['bbox'])) evaluator.detections = [] evaluator.add(anns) evaluator.save()
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.work_dir is not None: cfg.work_dir = args.work_dir pathlib.Path(cfg.work_dir).mkdir(parents=True, exist_ok=True) cfg.gpus = args.gpus # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.work_dir) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) if distributed: model = MMDistributedDataParallel(model).cuda() else: model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() train_dataset = get_dataset(cfg.data.train) optimizer = build_optimizer(model, cfg.optimizer) train_loader = build_dataloader(train_dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=distributed) start_epoch = it = 0 last_epoch = -1 lr_scheduler, lr_warmup_scheduler = build_scheduler( optimizer, total_iters_each_epoch=len(train_loader), total_epochs=cfg.total_epochs, last_epoch=last_epoch, optim_cfg=cfg.optimizer, lr_cfg=cfg.lr_config) # -----------------------start training--------------------------- logger.info('**********************Start training**********************') train_model(model, optimizer, train_loader, lr_scheduler=lr_scheduler, optim_cfg=cfg.optimizer, start_epoch=start_epoch, total_epochs=cfg.total_epochs, start_iter=it, rank=args.local_rank, logger=logger, ckpt_save_dir=cfg.work_dir, lr_warmup_scheduler=lr_warmup_scheduler, ckpt_save_interval=cfg.checkpoint_config.interval, max_ckpt_save_num=args.max_ckpt_save_num, log_interval=cfg.log_config.interval) logger.info('**********************End training**********************')
def main(): args = parse_args() print(args) cfg = Config.fromfile(args.config) if args.balanced: cfg.dataset_type = 'BalancedCustomDataset' cfg.data.train.type = 'BalancedCustomDataset' # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir # setup workdir, num_classes, train anno file per classes index sub_dir = '{}-{}'.format(args.start_index, args.end_index) cfg.work_dir = os.path.join(cfg.work_dir, sub_dir) cfg.data.train.ann_file = cfg.data.train.ann_file + '_' + sub_dir + '.pkl' assert isinstance(cfg.model.bbox_head, (list, tuple, dict)) if isinstance(cfg.model.bbox_head, (list, tuple)): for i in range(len(cfg.model.bbox_head)): cfg.model.bbox_head[ i].num_classes = args.end_index - args.start_index + 1 else: cfg.model.bbox_head.num_classes = args.end_index - args.start_index + 1 latest_ckp = os.path.join(cfg.work_dir, 'latest.pth') if os.path.exists(latest_ckp): cfg.load_from = latest_ckp else: print('not found: ', latest_ckp) print(cfg) # patch done if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) train_dataset = get_dataset(cfg.data.train) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=train_dataset.CLASSES) # add an attribute for visualization convenience model.CLASSES = train_dataset.CLASSES train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)