def do_train(cfg): model = instantiate(cfg.model) logger = logging.getLogger("detectron2") logger.info("Model:\n{}".format(model)) model.to(cfg.train.device) cfg.optimizer.params.model = model optim = instantiate(cfg.optimizer) train_loader = instantiate(cfg.dataloader.train) model = create_ddp_model(model, **cfg.train.ddp) trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)( model, train_loader, optim) checkpointer = DetectionCheckpointer( model, cfg.train.output_dir, optimizer=optim, trainer=trainer, ) trainer.register_hooks([ hooks.IterationTimer(), hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)), hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer) if comm.is_main_process() else None, hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)), hooks.PeriodicWriter( default_writers(cfg.train.output_dir, cfg.train.max_iter), period=cfg.train.log_period, ) if comm.is_main_process() else None, ]) checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=True) start_iter = 0 trainer.train(start_iter, cfg.train.max_iter)
def do_flop(cfg): if isinstance(cfg, CfgNode): data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) model = build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) else: data_loader = instantiate(cfg.dataloader.test) model = instantiate(cfg.model) model.to(cfg.train.device) DetectionCheckpointer(model).load(cfg.train.init_checkpoint) model.eval() counts = Counter() total_flops = [] for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa flops = FlopCountAnalysis(model, data) if idx > 0: flops.unsupported_ops_warnings(False).uncalled_modules_warnings( False) counts += flops.by_operator() total_flops.append(flops.total()) logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops)) logger.info("Average GFlops for each type of operators:\n" + str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])) logger.info("Total GFlops: {:.1f}±{:.1f}".format( np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9))
def do_test(cfg, model): if "evaluator" in cfg.dataloader: ret = inference_on_dataset( model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator) ) print_csv_format(ret) return ret
def do_train(args, cfg): """ Args: cfg: an object with the following attributes: model: instantiate to a module dataloader.{train,test}: instantiate to dataloaders dataloader.evaluator: instantiate to evaluator for test set optimizer: instantaite to an optimizer lr_multiplier: instantiate to a fvcore scheduler train: other misc config defined in `configs/common/train.py`, including: output_dir (str) init_checkpoint (str) amp.enabled (bool) max_iter (int) eval_period, log_period (int) device (str) checkpointer (dict) ddp (dict) """ model = instantiate(cfg.model) logger = logging.getLogger("detectron2") logger.info("Model:\n{}".format(model)) model.to(cfg.train.device) cfg.optimizer.params.model = model optim = instantiate(cfg.optimizer) train_loader = instantiate(cfg.dataloader.train) model = create_ddp_model(model, **cfg.train.ddp) trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)( model, train_loader, optim) checkpointer = DetectionCheckpointer( model, cfg.train.output_dir, optimizer=optim, trainer=trainer, ) trainer.register_hooks([ hooks.IterationTimer(), hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)), hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer) if comm.is_main_process() else None, hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)), hooks.PeriodicWriter( default_writers(cfg.train.output_dir, cfg.train.max_iter), period=cfg.train.log_period, ) if comm.is_main_process() else None, ]) checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume) if args.resume and checkpointer.has_checkpoint(): # The checkpoint stores the training iteration that just finished, thus we start # at the next iteration start_iter = trainer.iter + 1 else: start_iter = 0 trainer.train(start_iter, cfg.train.max_iter)
def test_instantiate_other_obj(self): # do nothing for other obj self.assertEqual(instantiate(5), 5) x = [3, 4, 5] self.assertEqual(instantiate(x), x) x = TestClass(1) self.assertIs(instantiate(x), x) x = {"xx": "yy"} self.assertIs(instantiate(x), x)
def create_data_benchmark(cfg, args): if args.config_file.endswith(".py"): dl_cfg = cfg.dataloader.train dl_cfg._target_ = DataLoaderBenchmark return instantiate(dl_cfg) else: kwargs = build_detection_train_loader.from_config(cfg) kwargs.pop("aspect_ratio_grouping", None) kwargs["_target_"] = DataLoaderBenchmark return instantiate(kwargs)
def test_interpolation(self): cfg = L(TestClass)(int_arg=3, extra_arg="${int_arg}") cfg.int_arg = 4 obj = instantiate(cfg) self.assertEqual(obj.extra_arg, 4) # Test that interpolation still works after serialization cfg = reload_lazy_config(cfg) cfg.int_arg = 5 obj = instantiate(cfg) self.assertEqual(obj.extra_arg, 5)
def test_process_unmatched_prev_idx(self): cfg = { "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa "video_height": self._img_size[0], "video_width": self._img_size[1], "max_num_instances": self._max_num_instances, "max_lost_frame_count": self._max_lost_frame_count, "min_box_rel_dim": self._min_box_rel_dim, "min_instance_period": self._min_instance_period, "track_iou_threshold": self._track_iou_threshold, } tracker = instantiate(cfg) prev_instances = tracker._initialize_extra_fields(self._prev_instances) prev_instances.ID_period = [3, 3] tracker._prev_instances = prev_instances curr_instances = tracker._initialize_extra_fields(self._curr_instances) matched_idx = np.array([0]) matched_prev_idx = np.array([1]) curr_instances = tracker._process_matched_idx(curr_instances, matched_idx, matched_prev_idx) curr_instances = tracker._process_unmatched_idx( curr_instances, matched_idx) curr_instances = tracker._process_unmatched_prev_idx( curr_instances, matched_prev_idx) self.assertTrue(curr_instances.ID[2] == 0)
def get(config_path, trained: bool = False, device: Optional[str] = None): """ Get a model specified by relative path under Detectron2's official ``configs/`` directory. Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): see :func:`get_config`. device (str or None): overwrite the device in config, if given. Returns: nn.Module: a detectron2 model. Will be in training mode. Example: :: from detectron2 import model_zoo model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) """ cfg = get_config(config_path, trained) if device is None and not torch.cuda.is_available(): device = "cpu" if device is not None and isinstance(cfg, CfgNode): cfg.MODEL.DEVICE = device if isinstance(cfg, CfgNode): model = build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) else: model = instantiate(cfg.model) if device is not None: model = model.to(device) if "train" in cfg and "init_checkpoint" in cfg.train: DetectionCheckpointer(model).load(cfg.train.init_checkpoint) return model
def do_parameter(cfg): if isinstance(cfg, CfgNode): model = build_model(cfg) else: model = instantiate(cfg.model) logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def _get_kwargs(self): # get kwargs of build_detection_train_loader cfg = model_zoo.get_config("common/data/coco.py").dataloader.train cfg.dataset.names = "coco_2017_val_100" cfg.pop("_target_") kwargs = {k: instantiate(v) for k, v in cfg.items()} return kwargs
def test_instantiate_lst(self): lst = [1, 2, L(TestClass)(int_arg=1)] x = L(TestClass)(int_arg=lst) # list as an argument should be recursively instantiated x = instantiate(x).int_arg self.assertEqual(x[:2], [1, 2]) self.assertIsInstance(x[2], TestClass) self.assertEqual(x[2].int_arg, 1)
def test_instantiate_dataclass_as_subconfig(self): cfg = L(TestClass)(int_arg=1, extra_arg=ShapeSpec(channels=1, width=3)) # Test original cfg as well as serialization for x in [cfg, reload_lazy_config(cfg)]: obj = instantiate(x) self.assertIsInstance(obj.extra_arg, ShapeSpec) self.assertEqual(obj.extra_arg.channels, 1) self.assertEqual(obj.extra_arg.height, None)
def test_instantiate_dataclass(self): cfg = L(ShapeSpec)(channels=1, width=3) # Test original cfg as well as serialization for x in [cfg, reload_lazy_config(cfg)]: obj = instantiate(x) self.assertIsInstance(obj, ShapeSpec) self.assertEqual(obj.channels, 1) self.assertEqual(obj.height, None)
def test_basic_construct(self): objconf = L(TestClass)( int_arg=3, list_arg=[10], dict_arg={}, extra_arg=L(TestClass)(int_arg=4, list_arg="${..list_arg}"), ) obj = instantiate(objconf) self.assertIsInstance(obj, TestClass) self.assertEqual(obj.int_arg, 3) self.assertEqual(obj.extra_arg.int_arg, 4) self.assertEqual(obj.extra_arg.list_arg, obj.list_arg) objconf.extra_arg.list_arg = [5] obj = instantiate(objconf) self.assertIsInstance(obj, TestClass) self.assertEqual(obj.extra_arg.list_arg, [5])
def test_basic_construct(self): cfg = L(TestClass)( int_arg=3, list_arg=[10], dict_arg={}, extra_arg=L(TestClass)(int_arg=4, list_arg="${..list_arg}"), ) for x in [cfg, reload_lazy_config(cfg)]: obj = instantiate(x) self.assertIsInstance(obj, TestClass) self.assertEqual(obj.int_arg, 3) self.assertEqual(obj.extra_arg.int_arg, 4) self.assertEqual(obj.extra_arg.list_arg, obj.list_arg) # Test interpolation x.extra_arg.list_arg = [5] obj = instantiate(x) self.assertIsInstance(obj, TestClass) self.assertEqual(obj.extra_arg.list_arg, [5])
def get_model_no_weights(config_path): """ Like model_zoo.get, but do not load any weights (even pretrained) """ cfg = model_zoo.get_config(config_path) if isinstance(cfg, CfgNode): if not torch.cuda.is_available(): cfg.MODEL.DEVICE = "cpu" return build_model(cfg) else: return instantiate(cfg.model)
def main(args): cfg = LazyConfig.load(args.config_file) cfg = LazyConfig.apply_overrides(cfg, args.opts) default_setup(cfg, args) if args.eval_only: model = instantiate(cfg.model) model = create_ddp_model(model) DetectionCheckpointer(model).load(cfg.train.init_checkpoint) print(do_test(cfg, model)) else: do_train(args, cfg)
def test_instantiate_namedtuple(self): x = L(TestClass)(int_arg=ShapeSpec(channels=1, width=3)) # test serialization with tempfile.TemporaryDirectory() as d: fname = os.path.join(d, "d2_test.yaml") OmegaConf.save(x, fname) with open(fname) as f: x = yaml.unsafe_load(f) x = instantiate(x) self.assertIsInstance(x.int_arg, ShapeSpec) self.assertEqual(x.int_arg.channels, 1)
def test_init(self): cfg = { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": self._img_size[0], "video_width": self._img_size[1], "max_num_instances": self._max_num_instances, "max_lost_frame_count": self._max_lost_frame_count, "min_box_rel_dim": self._min_box_rel_dim, "min_instance_period": self._min_instance_period, "track_iou_threshold": self._track_iou_threshold, } tracker = instantiate(cfg) self.assertTrue(tracker._video_height == self._img_size[0])
def do_activation(cfg): if isinstance(cfg, CfgNode): data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) model = build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) else: data_loader = instantiate(cfg.dataloader.test) model = instantiate(cfg.model) model.to(cfg.train.device) DetectionCheckpointer(model).load(cfg.train.init_checkpoint) model.eval() counts = Counter() total_activations = [] for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa count = activation_count_operators(model, data) counts += count total_activations.append(sum(count.values())) logger.info("(Million) Activations for Each Type of Operators:\n" + str([(k, v / idx) for k, v in counts.items()])) logger.info("Total (Million) Activations: {}±{}".format( np.mean(total_activations), np.std(total_activations)))
def benchmark_eval(args): cfg = setup(args) if args.config_file.endswith(".yaml"): model = build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) else: model = instantiate(cfg.model) model.to(cfg.train.device) DetectionCheckpointer(model).load(cfg.train.init_checkpoint) cfg.dataloader.num_workers = 0 data_loader = instantiate(cfg.dataloader.test) model.eval() logger.info("Model:\n{}".format(model)) dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False) def f(): while True: yield from dummy_data for k in range(5): # warmup model(dummy_data[k]) max_iter = 300 timer = Timer() with tqdm.tqdm(total=max_iter) as pbar: for idx, d in enumerate(f()): if idx == max_iter: break model(d) pbar.update() logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
def test_initialize_extra_fields(self): cfg = { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": self._img_size[0], "video_width": self._img_size[1], "max_num_instances": self._max_num_instances, "max_lost_frame_count": self._max_lost_frame_count, "min_box_rel_dim": self._min_box_rel_dim, "min_instance_period": self._min_instance_period, "track_iou_threshold": self._track_iou_threshold, } tracker = instantiate(cfg) instances = tracker._initialize_extra_fields(self._curr_instances) self.assertTrue(instances.has("ID")) self.assertTrue(instances.has("ID_period")) self.assertTrue(instances.has("lost_frame_count"))
def test_update(self): cfg = { "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa "video_height": self._img_size[0], "video_width": self._img_size[1], "max_num_instances": self._max_num_instances, "max_lost_frame_count": self._max_lost_frame_count, "min_box_rel_dim": self._min_box_rel_dim, "min_instance_period": self._min_instance_period, "track_iou_threshold": self._track_iou_threshold, } tracker = instantiate(cfg) _ = tracker.update(self._prev_instances) curr_instances = tracker.update(self._curr_instances) self.assertTrue(curr_instances.ID[0] == 1) self.assertTrue(curr_instances.ID[1] == 0)
def test_assign_new_id(self): cfg = { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": self._img_size[0], "video_width": self._img_size[1], "max_num_instances": self._max_num_instances, "max_lost_frame_count": self._max_lost_frame_count, "min_box_rel_dim": self._min_box_rel_dim, "min_instance_period": self._min_instance_period, "track_iou_threshold": self._track_iou_threshold, } tracker = instantiate(cfg) instances = deepcopy(self._curr_instances) instances = tracker._initialize_extra_fields(instances) instances = tracker._assign_new_id(instances) self.assertTrue(len(instances.ID) == 2) self.assertTrue(instances.ID[0] == 2) self.assertTrue(instances.ID[1] == 3)
def test_update(self): cfg = { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": self._img_size[0], "video_width": self._img_size[1], "max_num_instances": self._max_num_instances, "max_lost_frame_count": self._max_lost_frame_count, "min_box_rel_dim": self._min_box_rel_dim, "min_instance_period": self._min_instance_period, "track_iou_threshold": self._track_iou_threshold } tracker = instantiate(cfg) prev_instances = tracker.update(self._prev_instances) self.assertTrue(len(prev_instances.ID) == 2) self.assertTrue(prev_instances.ID[0] == 0) self.assertTrue(prev_instances.ID[1] == 1) curr_instances = tracker.update(self._curr_instances) self.assertTrue(len(curr_instances.ID) == 2) self.assertTrue(curr_instances.ID[0] == 1) self.assertTrue(curr_instances.ID[1] == 0)
def test_assign_cost_matrix_values(self): cfg = { "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa "video_height": self._img_size[0], "video_width": self._img_size[1], "max_num_instances": self._max_num_instances, "max_lost_frame_count": self._max_lost_frame_count, "min_box_rel_dim": self._min_box_rel_dim, "min_instance_period": self._min_instance_period, "track_iou_threshold": self._track_iou_threshold, } tracker = instantiate(cfg) pair1 = {"idx": 0, "prev_idx": 1, "IoU": 0.6} pair2 = {"idx": 1, "prev_idx": 0, "IoU": 0.8} bbox_pairs = [pair1, pair2] cost_matrix = np.full((2, 2), np.inf) target_matrix = copy.deepcopy(cost_matrix) target_matrix[0, 1] = -0.6 target_matrix[1, 0] = -0.8 cost_matrix = tracker.assign_cost_matrix_values( cost_matrix, bbox_pairs) self.assertTrue(np.allclose(cost_matrix, target_matrix))
def test_instantiate_dataclass(self): a = L(TestDataClass)(x=1, y="s") a = instantiate(a) self.assertEqual(a.x, 1) self.assertEqual(a.y, "s")
self.bias = nn.Parameter(torch.zeros(channels)) self._pos = 0 def forward(self, x): ret = self[self._pos](x) self._pos = (self._pos + 1) % len(self) w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) return ret * w + b if __name__ == "__main__": checkpoint = sys.argv[1] cfg = LazyConfig.load_rel("./configs/retinanet_SyncBNhead.py") model = cfg.model model.head.norm = lambda c: CycleBatchNormList(len(model.head_in_features), c) model = instantiate(model) model.cuda() DetectionCheckpointer(model).load(checkpoint) cfg.dataloader.train.total_batch_size = 8 logger.info("Running PreciseBN ...") with EventStorage(), torch.no_grad(): update_bn_stats(model, instantiate(cfg.dataloader.train), 500) logger.info("Running evaluation ...") inference_on_dataset(model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator))
def do_structure(cfg): if isinstance(cfg, CfgNode): model = build_model(cfg) else: model = instantiate(cfg.model) logger.info("Model Structure:\n" + str(model))