def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # log env info env_info_dict = collect_env.collect_env() env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('mmedit Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmedit_version=__version__, config=cfg.text, ) # meta information meta = dict() if cfg.get('exp_name', None) is None: cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0] meta['exp_name'] = cfg.exp_name meta['mmedit Version'] = __version__ meta['seed'] = args.seed meta['env_info'] = env_info # add an attribute for visualization convenience train_model(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) rank, _ = get_dist_info() # set random seeds if args.seed is not None: if rank == 0: print('set random seed to', args.seed) set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) loader_cfg = { **dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data), **dict(samples_per_gpu=1, drop_last=False, shuffle=False, dist=distributed), **cfg.data.get('test_dataloader', {}) } data_loader = build_dataloader(dataset, **loader_cfg) # build the model and load checkpoint model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) args.save_image = args.save_path is not None empty_cache = cfg.get('empty_cache', False) if not distributed: _ = load_checkpoint(model, args.checkpoint, map_location='cpu') model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, save_path=args.save_path, save_image=args.save_image) else: find_unused_parameters = cfg.get('find_unused_parameters', False) model = DistributedDataParallelWrapper( model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) device_id = torch.cuda.current_device() _ = load_checkpoint( model, args.checkpoint, map_location=lambda storage, loc: storage.cuda(device_id)) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, save_path=args.save_path, save_image=args.save_image, empty_cache=empty_cache) if rank == 0: print('') # print metrics stats = dataset.evaluate(outputs) for stat in stats: print('Eval-{}: {}'.format(stat, stats[stat])) # save result pickle if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out)
def main(): args = parse_args() checkpoint_list = os.listdir(args.checkpoint_dir) print(checkpoint_list) for checkpoint in checkpoint_list: if '.pth' in checkpoint: cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) rank, _ = get_dist_info() # set random seeds if args.seed is not None: if rank == 0: print('set random seed to', args.seed) set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.get( 'val_workers_per_gpu', cfg.data.workers_per_gpu), dist=distributed, shuffle=False) # build the model and load checkpoint model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) args.save_image = args.save_path is not None # distributed test find_unused_parameters = cfg.get('find_unused_parameters', False) model = DistributedDataParallelWrapper( model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) device_id = torch.cuda.current_device() _ = load_checkpoint( model, os.path.join(args.checkpoint_dir, checkpoint), map_location=lambda storage, loc: storage.cuda(device_id)) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, save_path=args.save_path, save_image=args.save_image) if rank == 0: # print metrics stats = dataset.evaluate(outputs) write_file = open( os.path.join(args.checkpoint_dir, 'eval_result_new.txt'), 'a') for stat in stats: print('{}: Eval-{}: {}'.format(checkpoint, stat, stats[stat])) write_file.write('{}: Eval-{}: {} '.format( checkpoint, stat, stats[stat])) write_file.write('\n') write_file.close() # save result pickle if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out)