if 'train' in cfg.data: cfg.data.train.data_dir = args.data_dir if 'val' in cfg.data: cfg.data.val.data_dir = args.data_dir if 'test' in cfg.data: cfg.data.test.data_dir = args.data_dir # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(log_level=cfg.log_level) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_model(cfg.model) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) test_dataset = build_dataset(cfg.data.val) test_network(model, test_dataset, cfg=cfg,
dict(type='GroupScale', scales=[(171, 128)]), dict(type='GroupCenterCrop', out_size=112), dict(type='GroupToTensor', switch_rgb_channels=True, div255=True, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) ])), ) return Dict(data) if __name__ == '__main__': logger = get_root_logger(log_level='INFO') args = parse_args() _cfg = Config.fromfile(args.cfg) cfg = dict(model=prepare_model_config(_cfg['model']['backbone']), data=prepare_data_config(args.dataset_name, args.data_dir)) cfg = Dict(cfg) cfg.gpus = args.gpus cfg.data.videos_per_gpu = args.batchsize cfg.work_dir = args.work_dir if args.checkpoint is None: load_from = cfg.work_dir else: load_from = args.checkpoint