def benchmark_train(args): cfg = setup(args) model = build_model(cfg) logger.info("Model:\n{}".format(model)) if comm.get_world_size() > 1: model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False) optimizer = build_optimizer(cfg, model) checkpointer = DefaultCheckpointer(model, optimizer=optimizer) checkpointer.load(cfg.MODEL.WEIGHTS) cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 data_loader = build_train_loader(cfg) dummy_data = list(itertools.islice(data_loader, 100)) def f(): while True: yield from DatasetFromList(dummy_data, copy=False) max_iter = 400 trainer = SimpleTrainer(model, f(), optimizer) trainer.register_hooks([ hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]) ]) trainer.train(1, max_iter)
def __init__(self, cfg): self.cfg = deepcopy(cfg) if self.cfg.MODEL.DEVICE.startswith("cuda:"): torch.cuda.set_device(self.cfg.MODEL.DEVICE) self.cfg.MODEL.DEVICE = "cuda" self.model = cfg.build_model(self.cfg) self.model.eval() checkpointer = DefaultCheckpointer(self.model) checkpointer.load(cfg.MODEL.WEIGHTS) self.transform_gen = build_transform_gens(cfg.INPUT.AUG.TEST_PIPELINES) self.input_format = cfg.INPUT.FORMAT assert self.input_format in ["RGB", "BGR"], self.input_format