def benchmark_eval(args): cfg = setup(args) model = build_model(cfg) model.eval() logger.info("Model:\n{}".format(model)) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) dummy_data = list(itertools.islice(data_loader, 100)) def f(): while True: yield from DatasetFromList(dummy_data, copy=False) for _ in range(5): # warmup model(dummy_data[0]) max_iter = 400 timer = Timer() with tqdm.tqdm(total=max_iter) as pbar: for idx, d in enumerate(f()): if idx == max_iter: break model(d) pbar.update() logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
def main(args): config.merge_from_list(args.opts) cfg, logger = default_setup(config, args) if args.debug: batches = int(cfg.SOLVER.IMS_PER_BATCH / 8 * args.num_gpus) if cfg.SOLVER.IMS_PER_BATCH != batches: cfg.SOLVER.IMS_PER_BATCH = batches logger.warning( "SOLVER.IMS_PER_BATCH is changed to {}".format(batches)) if "MODEL.WEIGHTS" in args.opts: if cfg.MODEL.WEIGHTS.endswith(".pth") and not PathManager.exists( cfg.MODEL.WEIGHTS): ckpt_name = cfg.MODEL.WEIGHTS.split("/")[-1] model_prefix = cfg.OUTPUT_DIR.split("cvpods_playground")[1][1:] remote_file_path = os.path.join(cfg.OSS.DUMP_PREFIX, model_prefix, ckpt_name) logger.warning( f"The specified ckpt file ({cfg.MODEL.WEIGHTS}) was not found locally," f" try to load the corresponding dump file on OSS ({remote_file_path})." ) cfg.MODEL.WEIGHTS = remote_file_path valid_files = [cfg.MODEL.WEIGHTS] else: list_of_files = glob.glob(os.path.join(cfg.OUTPUT_DIR, '*.pth')) assert list_of_files, "No checkpoint file found in {}.".format( cfg.OUTPUT_DIR) list_of_files.sort(key=os.path.getctime) latest_file = list_of_files[-1] if not args.end_iter: valid_files = [latest_file] else: files = [f for f in list_of_files if str(f) <= str(latest_file)] valid_files = [] for f in files: try: model_iter = int(re.split(r'(model_|\.pth)', f)[-3]) except Exception: logger.warning("remove {}".format(f)) continue if args.start_iter <= model_iter <= args.end_iter: valid_files.append(f) assert valid_files, "No .pth files satisfy your requirement" # * means all if need specific format then *.csv for current_file in valid_files: cfg.MODEL.WEIGHTS = current_file model = build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, model) if comm.is_main_process(): verify_results(cfg, res) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model))
def stage_main(args, cfg, build): cfg.merge_from_list(args.opts) cfg, logger = default_setup(cfg, args) model_build_func = build """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg, model_build_func) trainer.resume_or_load(resume=args.resume) if args.eval_only: DetectionCheckpointer(trainer.model, save_dir=cfg.OUTPUT_DIR, resume=args.resume).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) res = Trainer.test(cfg, trainer.model) if comm.is_main_process(): verify_results(cfg, res) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, trainer.model)) return res # check wheather worksapce has enough storeage space # assume that a single dumped model is 700Mb file_sys = os.statvfs(cfg.OUTPUT_DIR) free_space_Gb = (file_sys.f_bfree * file_sys.f_frsize) / 2**30 eval_space_Gb = (cfg.SOLVER.LR_SCHEDULER.MAX_ITER // cfg.SOLVER.CHECKPOINT_PERIOD) * 700 / 2**10 if eval_space_Gb > free_space_Gb: logger.warning(f"{Fore.RED}Remaining space({free_space_Gb}GB) " f"is less than ({eval_space_Gb}GB){Style.RESET_ALL}") if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) trainer.train() if comm.is_main_process() and cfg.MODEL.AS_PRETRAIN: # convert last ckpt to pretrain format convert_to_pretrained_model(input=os.path.join(cfg.OUTPUT_DIR, "model_final.pth"), save_path=os.path.join( cfg.OUTPUT_DIR, "model_final_pretrain_weight.pkl"))
def main(args): config.merge_from_list(args.opts) cfg = setup(args) model = build_model(cfg) logger.info("Model:\n{}".format(model)) if args.eval_only: DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume) return do_test(cfg, model) distributed = comm.get_world_size() > 1 if distributed: model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False) do_train(cfg, model) return do_test(cfg, model)
class DefaultTrainer(SimpleTrainer): """ A trainer with default training logic. Compared to `SimpleTrainer`, it contains the following logic in addition: 1. Create model, optimizer, scheduler, dataloader from the given config. 2. Load a checkpoint or `cfg.MODEL.WEIGHTS`, if exists. 3. Register a few common hooks. It is created to simplify the **standard model training workflow** and reduce code boilerplate for users who only need the standard training workflow, with standard features. It means this class makes *many assumptions* about your training logic that may easily become invalid in a new research. In fact, any assumptions beyond those made in the :class:`SimpleTrainer` are too much for research. The code of this class has been annotated about restrictive assumptions it mades. When they do not work for you, you're encouraged to: 1. Overwrite methods of this class, OR: 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and nothing else. You can then add your own hooks if needed. OR: 3. Write your own training loop similar to `tools/plain_train_net.py`. Also note that the behavior of this class, like other functions/classes in this file, is not stable, since it is meant to represent the "common default behavior". It is only guaranteed to work well with the standard models and training workflow in cvpods. To obtain more stable behavior, write your own training logic with other public APIs. Attributes: scheduler: checkpointer (DetectionCheckpointer): cfg (BaseConfig): Examples: .. code-block:: python trainer = DefaultTrainer(cfg) trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS trainer.train() """ def __init__(self, cfg, model_build_func): """ Args: cfg (BaseConfig): """ logger = logging.getLogger("cvpods") if not logger.isEnabledFor( logging.INFO): # setup_logger is not called for d2 setup_logger() self.start_iter = 0 data_loader = self.build_train_loader(cfg) epoch_iters = adjust_epoch_and_iter(cfg, data_loader) self.max_iter = cfg.SOLVER.LR_SCHEDULER.MAX_ITER self.max_epoch = cfg.SOLVER.LR_SCHEDULER.MAX_EPOCH model = model_build_func(cfg) model = maybe_convert_module(model) logger.info(f"Model structure: {model}") # Assume these objects must be constructed in this order. optimizer = self.build_optimizer(cfg, model) # For training, wrap with DDP. But don't need this for inference. if comm.get_world_size() > 1: model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False, find_unused_parameters=True) # TODO: @wangfeng02, `batch_subdivisions` super().__init__(model, data_loader, optimizer, cfg.SOLVER.BATCH_SUBDIVISIONS) if not cfg.SOLVER.LR_SCHEDULER.get("EPOCH_WISE", False): epoch_iters = -1 self.scheduler = self.build_lr_scheduler(cfg, optimizer, epoch_iters=epoch_iters) # Assume no other objects need to be checkpointed. # We can later make it checkpoint the stateful hooks self.checkpointer = DetectionCheckpointer( # Assume you want to save checkpoints together with logs/statistics model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=self.scheduler, ) self.cfg = cfg self.register_hooks(self.build_hooks()) def resume_or_load(self, resume=True): """ If `resume==True`, and last checkpoint exists, resume from it. Otherwise, load a model specified by the config. Args: resume (bool): whether to do resume or not """ self.checkpointer.resume = resume # The checkpoint stores the training iteration that just finished, thus we start # at the next iteration (or iter zero if there's no checkpoint). self.start_iter = (self.checkpointer.resume_or_load( self.cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(self.optimizer, self.scheduler), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # run writers in the end, so that evaluation metrics are written ret.append( hooks.PeriodicWriter(self.build_writers(), period=self.cfg.GLOBAL.LOG_INTERVAL)) # Put `PeriodicDumpLog` after writers so that can dump all the files, # including the files generated by writers return ret def build_writers(self): """ Build a list of writers to be used. By default it contains writers that write metrics to the screen, a json file, and a tensorboard event file respectively. If you'd like a different list of writers, you can overwrite it in your trainer. Returns: list[EventWriter]: a list of :class:`EventWriter` objects. It is now implemented by: .. code-block:: python return [ CommonMetricPrinter(self.max_iter), JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(self.cfg.OUTPUT_DIR), ] """ # Assume the default print/log frequency. return [ # It may not always print what you want to see, since it prints "common" metrics only. CommonMetricPrinter(self.max_iter), JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(self.cfg.OUTPUT_DIR), ] def train(self): """ Run training. Returns: OrderedDict of results, if evaluation is enabled. Otherwise None. """ super().train(self.start_iter, self.max_iter, self.max_epoch) if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): assert hasattr(self, "_last_eval_results" ), "No evaluation results obtained during training!" verify_results(self.cfg, self._last_eval_results) return self._last_eval_results @classmethod def build_optimizer(cls, cfg, model): """ Returns: torch.optim.Optimizer: """ return build_optimizer(cfg, model) @classmethod def build_lr_scheduler(cls, cfg, optimizer, **kwargs): """ It now calls :func:`cvpods.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer, **kwargs) @classmethod def build_train_loader(cls, cfg): """ Returns: iterable It now calls :func:`cvpods.data.build_detection_train_loader`. Overwrite it if you'd like a different data loader.:w """ return build_detection_train_loader(cfg) @classmethod def build_test_loader(cls, cfg): """ Returns: iterable It now calls :func:`cvpods.data.build_detection_test_loader`. Overwrite it if you'd like a different data loader. """ return build_detection_test_loader(cfg) @classmethod def build_evaluator(cls, cfg, dataset_name): """ Returns: DatasetEvaluator or None It is not implemented by default. """ raise NotImplementedError( "Please either implement `build_evaluator()` in subclasses, or pass " "your evaluator as arguments to `DefaultTrainer.test()`.") @classmethod def test(cls, cfg, model, evaluators=None, output_folder=None): """ Args: cfg (BaseConfig): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as `cfg.DATASETS.TEST`. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len( cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators)) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, data_loader.dataset, output_folder=output_folder) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method.") results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i) logger.info("Evaluation results for {} in csv format:".format( dataset_name)) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results
def __init__(self, cfg, model_build_func): """ Args: cfg (BaseConfig): """ logger = logging.getLogger("cvpods") if not logger.isEnabledFor( logging.INFO): # setup_logger is not called for d2 setup_logger() self.start_iter = 0 data_loader = self.build_train_loader(cfg) maybe_adjust_epoch_and_iter(cfg, data_loader) self.max_iter = cfg.SOLVER.LR_SCHEDULER.MAX_ITER self.max_epoch = cfg.SOLVER.LR_SCHEDULER.MAX_EPOCH model = model_build_func(cfg) model = maybe_convert_module(model) logger.info(f"Model structure: {model}") # Assume these objects must be constructed in this order. optimizer = self.build_optimizer(cfg, model) # For training, wrap with DDP. But don't need this for inference. if comm.get_world_size() > 1: if cfg.TRAINER.FP16.ENABLED: if cfg.TRAINER.FP16.TYPE == "APEX": model, optimizer = amp.initialize( model, optimizer, opt_level=cfg.TRAINER.FP16.OPTS.OPT_LEVEL) model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False, find_unused_parameters=True) # TODO: @wangfeng02, `batch_subdivisions` super().__init__(model, data_loader, optimizer, cfg.SOLVER.BATCH_SUBDIVISIONS) if not cfg.SOLVER.LR_SCHEDULER.get("EPOCH_WISE", False): epoch_iters = -1 else: epoch_iters = cfg.SOLVER.LR_SCHEDULER.get("EPOCH_ITERS") logger.warning(f"Setup LR Scheduler in EPOCH mode: {epoch_iters}") self.scheduler = self.build_lr_scheduler(cfg, optimizer, epoch_iters=epoch_iters) # Assume no other objects need to be checkpointed. # We can later make it checkpoint the stateful hooks optional = {} if cfg.TRAINER.FP16.ENABLED: optional["amp"] = amp self.checkpointer = DetectionCheckpointer( # Assume you want to save checkpoints together with logs/statistics model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=self.scheduler, **optional, ) self.cfg = cfg self.register_hooks(self.build_hooks())
def __init__(self, cfg, model_build_func): """ Args: cfg (BaseConfig): """ logger = logging.getLogger("cvpods") if not logger.isEnabledFor( logging.INFO): # setup_logger is not called for d2 setup_logger() # For simulate large batch training images_per_batch = cfg.SOLVER.IMS_PER_BATCH batch_subdivisions = cfg.SOLVER.BATCH_SUBDIVISIONS assert ( batch_subdivisions > 0 ), "cfg.SOLVER.BATCH_SUBDIVISIONS ({}) must be greater than or equal to 1.".format( batch_subdivisions ) if batch_subdivisions > 1: # if batch_subdivisions is equal to 1, the following check is redundant assert ( images_per_batch % batch_subdivisions == 0 ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the " \ "cfg.SOLVER.BATCH_SUBDIVISIONS ({}).".format(images_per_batch, batch_subdivisions) images_per_mini_batch = images_per_batch // batch_subdivisions num_workers = comm.get_world_size() assert ( images_per_mini_batch % num_workers == 0 ), "images per mini batch ({}, is calculated from cfg.SOLVER.IMS_PER_BATCH // " \ "cfg.SOLVER.BATCH_SUBDIVISIONS) must be divisible by the number of workers " \ "({}).".format(images_per_mini_batch, num_workers) assert ( images_per_mini_batch >= num_workers ), "images per mini batch ({}, is calculated from cfg.SOLVER.IMS_PER_BATCH // " \ "cfg.SOLVER.BATCH_SUBDIVISIONS) must be larger than the number of workers " \ "({}).".format(images_per_mini_batch, num_workers) self.batch_subdivisions = batch_subdivisions data_loader = self.build_train_loader(cfg) self.start_iter = 0 self.max_iter = cfg.SOLVER.LR_SCHEDULER.MAX_ITER self.max_epoch = cfg.SOLVER.LR_SCHEDULER.MAX_EPOCH self.cfg = cfg if self.max_epoch is not None: epoch_iter = len(data_loader.dataset) // ( comm.get_world_size() * data_loader.batch_size * cfg.SOLVER.BATCH_SUBDIVISIONS ) + 1 cfg.SOLVER.LR_SCHEDULER.MAX_ITER = self.max_iter = self.max_epoch * epoch_iter cfg.SOLVER.LR_SCHEDULER.STEPS = list( (x * epoch_iter for x in cfg.SOLVER.LR_SCHEDULER.STEPS)) cfg.SOLVER.LR_SCHEDULER.WARMUP_ITERS = int( cfg.SOLVER.LR_SCHEDULER.WARMUP_ITERS * epoch_iter) cfg.SOLVER.CHECKPOINT_PERIOD = epoch_iter * cfg.SOLVER.CHECKPOINT_PERIOD cfg.TEST.EVAL_PERIOD = epoch_iter * cfg.TEST.EVAL_PERIOD else: epoch_iter = -1 model = model_build_func(cfg) logger.info(f"Model structure: {model}") # Assume these objects must be constructed in this order. optimizer = self.build_optimizer(cfg, model) # For training, wrap with DDP. But don't need this for inference. if comm.get_world_size() > 1: model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False, find_unused_parameters=True) super().__init__(model, data_loader, optimizer) self.scheduler = self.build_lr_scheduler(cfg, optimizer, epoch_iters=epoch_iter) # Assume no other objects need to be checkpointed. # We can later make it checkpoint the stateful hooks self.checkpointer = DetectionCheckpointer( # Assume you want to save checkpoints together with logs/statistics model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=self.scheduler, ) self.register_hooks(self.build_hooks())
def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1) max_iter = cfg.SOLVER.MAX_ITER periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else []) # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement data_loader = build_detection_train_loader(cfg) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: for data, iteration in zip(data_loader, range(start_iter, max_iter)): iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum(loss for loss in loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = { k: v.item() for k, v in comm.reduce_dict(loss_dict).items() } losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if (cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter): do_test(cfg, model) # Compared to "train_net.py", the test results are not dumped to EventStorage comm.synchronize() if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration)
help="output directory for the converted caffe2 model") parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() logger = setup_logger() logger.info("Command line arguments: " + str(args)) cfg = setup_cfg(args) # create a torch model torch_model = build_model(cfg) DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS) # get a sample data data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) first_batch = next(iter(data_loader)) # convert and save caffe2 model caffe2_model = export_caffe2_model(cfg, torch_model, first_batch) caffe2_model.save_protobuf(args.output) # draw the caffe2 graph caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=first_batch) # run evaluation with the converted model if args.run_eval: dataset = cfg.DATASETS.TEST[0]