def main(): print('settings:\n', args) #annotate training data for 1st time if args.annotate: train_annotation() #change config config_path = 'configs/restorers/srresnet_srgan/msrresnet_x4c64b16_g1_1000k_div2k.py' cfg = change_config(config_path) check_params(cfg) # Initialize distributed training (only need to initialize once), comment it if have already run this part os.environ['RANK'] = '0' os.environ['WORLD_SIZE'] = '1' os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' #'50297' init_dist('pytorch', **cfg.dist_params) # Build dataset datasets = [build_dataset(cfg.data.train)] # Build the SRCNN model model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # Create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # Meta information meta = dict() # if cfg.get('exp_name', None) is None: # cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0] meta['exp_name'] = '_'.join([ 'bs' + str(args.bs), 'iter' + str(args.iter), 'block' + str(args.num_blocks), args.loss ]) meta['mmedit Version'] = mmedit.__version__ meta['seed'] = 0 meta['start_time'] = datetime.now().strftime("%d/%m/%Y %H:%M:%S") # Train the model train_model(model, datasets, cfg, distributed=True, validate=True, meta=meta)
def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # log env info env_info_dict = collect_env.collect_env() env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('mmedit Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmedit_version=__version__, config=cfg.text, ) # meta information meta = dict() if cfg.get('exp_name', None) is None: cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0] meta['exp_name'] = cfg.exp_name meta['mmedit Version'] = __version__ meta['seed'] = args.seed meta['env_info'] = env_info # add an attribute for visualization convenience train_model(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)