def test_pit_model(self): cfg = CN() cfg.MODEL = CN() add_pit_backbone_config(cfg) build_model = BACKBONE_REGISTRY.get("pit_d2go_model_wrapper") pit_models = { "pit_ti_ours": 160, "pit_ti": 224, "pit_s_ours_v1": 256, "pit_s": 224, } pit_model_weights = { "pit_ti_ours": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]128_[mcfg]pit_ti_ours_.HImkjNCpJI/checkpoint_best.pth", "pit_ti": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]128_[mcfg]pit_ti_.QJeFNUfYOD/checkpoint_best.pth", "pit_s_ours_v1": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]64_[mcfg]pit_s_ours_v1_.LXdwyBDaNY/checkpoint_best.pth", "pit_s": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]128_[mcfg]pit_s_.zReQLPOuJe/checkpoint_best.pth", } for model_name, org_size in pit_models.items(): print("model_name", model_name) cfg.MODEL.PIT.MODEL_CONFIG = f"manifold://mobile_vision_workflows/tree/workflows/wbc/deit/model_cfgs/{model_name}.json" cfg.MODEL.PIT.WEIGHTS = pit_model_weights[model_name] cfg.MODEL.PIT.DILATED = True model = build_model(cfg, None) model.eval() for input_size_h in [org_size, 192, 224, 256, 320]: for input_size_w in [org_size, 192, 224, 256, 320]: x = torch.rand(1, 3, input_size_h, input_size_w) y = model(x) print(f"x.shape: {x.shape}, y.shape: {y.shape}")
def cast_from_other_class(cls, other_cfg): """Cast an instance of other CfgNode to D2Go's CfgNode (or its subclass)""" new_cfg = CfgNode(other_cfg) # copy all fields inside __dict__, this will preserve fields like __deprecated_keys__ for k, v in other_cfg.__dict__.items(): new_cfg.__dict__[k] = v return new_cfg
def test_diff_cfg_no_new_allowed(self): """check that if new_allowed is False, new keys cause key error""" # create base config cfg1 = CfgNode() cfg1.A = CfgNode() cfg1.A.set_new_allowed(False) cfg1.A.Y = 2 # case 2: new allowed not set, new config has new keys cfg2 = cfg1.clone() cfg2.A.X = 2 self.assertRaises(KeyError, get_diff_cfg, cfg1, cfg2)
def _add_rcnn_default_config(_C): _C.EXPORT_CAFFE2 = CfgNode() _C.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False # Options about how to export the model _C.RCNN_EXPORT = CfgNode() # whether or not to include the postprocess (GeneralizedRCNN._postprocess) step # inside the exported model _C.RCNN_EXPORT.INCLUDE_POSTPROCESS = False _C.RCNN_PREPARE_FOR_EXPORT = "default_rcnn_prepare_for_export" _C.RCNN_PREPARE_FOR_QUANT = "default_rcnn_prepare_for_quant" _C.RCNN_PREPARE_FOR_QUANT_CONVERT = "default_rcnn_prepare_for_quant_convert"
def test_modeling_hook_cfg(self): """Create model with modeling hook using build_model""" cfg = CfgNode() cfg.MODEL = CfgNode() cfg.MODEL.DEVICE = "cpu" cfg.MODEL.META_ARCHITECTURE = "TestArch" cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"] model = build_model(cfg) self.assertEqual(model(2), 10) self.assertTrue(hasattr(model, "_modeling_hooks")) self.assertTrue(hasattr(model, "unapply_modeling_hooks")) orig_model = model.unapply_modeling_hooks() self.assertIsInstance(orig_model, TestArch) self.assertEqual(orig_model(2), 4)
def maybe_override_output_dir(cfg: CfgNode, output_dir: Optional[str]) -> None: """Overrides the output directory if `output_dir` is not None. """ if output_dir is not None and output_dir != cfg.OUTPUT_DIR: cfg.OUTPUT_DIR = output_dir logger.warning( f"Override cfg.OUTPUT_DIR ({cfg.OUTPUT_DIR}) to be the same as " f"output_dir {output_dir}")
def maybe_override_output_dir(cfg: CfgNode, output_dir: str): if cfg.OUTPUT_DIR != output_dir: with temp_defrost(cfg): logger.warning( "Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}". format(cfg.OUTPUT_DIR, output_dir)) cfg.OUTPUT_DIR = output_dir
def create_cfg_from_cli_args(args, default_cfg): """ Instead of loading from defaults.py, this binary only includes necessary configs building from scratch, and overrides them from args. There're two levels of config: _C: the config system used by this binary, which is a sub-set of training config, override by configurable_cfg. It can also be override by args.opts for convinience. configurable_cfg: common configs that user should explicitly specify in the args. """ _C = CN() _C.INPUT = default_cfg.INPUT _C.DATASETS = default_cfg.DATASETS _C.DATALOADER = default_cfg.DATALOADER _C.TEST = default_cfg.TEST if hasattr(default_cfg, "D2GO_DATA"): _C.D2GO_DATA = default_cfg.D2GO_DATA if hasattr(default_cfg, "TENSORBOARD"): _C.TENSORBOARD = default_cfg.TENSORBOARD # NOTE configs below might not be necessary, but must add to make code work _C.MODEL = CN() _C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE _C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON _C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON _C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS assert _C.MODEL.LOAD_PROPOSALS is False, "caffe2 model doesn't support" _C.OUTPUT_DIR = args.output_dir configurable_cfg = [ "DATASETS.TEST", args.datasets, "INPUT.MIN_SIZE_TEST", args.min_size, "INPUT.MAX_SIZE_TEST", args.max_size, ] cfg = _C.clone() cfg.merge_from_list(configurable_cfg) cfg.merge_from_list(args.opts) return cfg
def run_with_cmdline_args(args): cfg, output_dir, runner = prepare_for_launch(args) inference_config = None if args.inference_config_file: inference_config = CfgNode( CfgNode.load_yaml_with_base(args.inference_config_file)) return main( cfg, output_dir, runner, # binary specific optional arguments predictor_types=args.predictor_types, compare_accuracy=args.compare_accuracy, skip_if_fail=args.skip_if_fail, inference_config=inference_config, )
def test_diff_cfg_with_new_allowed(self): """diff config with new keys and new_allowed set to True""" # create base config cfg1 = CfgNode() cfg1.A = CfgNode() cfg1.A.set_new_allowed(True) cfg1.A.Y = 2 # case 3: new allowed set, new config has new keys cfg2 = cfg1.clone() cfg2.A.X = 2 gt = CfgNode() gt.A = CfgNode() gt.A.X = 2 self.assertEqual(gt, get_diff_cfg(cfg1, cfg2))
def test_get_diff_cfg(self): """check config that is diff from default config, no new keys""" # create base config cfg1 = CfgNode() cfg1.A = CfgNode() cfg1.A.Y = 2 # case 1: new allowed not set, new config has only old keys cfg2 = cfg1.clone() cfg2.set_new_allowed(False) cfg2.A.Y = 3 gt = CfgNode() gt.A = CfgNode() gt.A.Y = 3 self.assertEqual(gt, get_diff_cfg(cfg1, cfg2))
def test_modeling_hook_copy(self): """Create model with modeling hook, the model could be copied""" cfg = CfgNode() cfg.MODEL = CfgNode() cfg.MODEL.DEVICE = "cpu" cfg.MODEL.META_ARCHITECTURE = "TestArch" cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"] model = build_model(cfg) self.assertEqual(model(2), 10) model_copy = copy.deepcopy(model) orig_model = model.unapply_modeling_hooks() self.assertIsInstance(orig_model, TestArch) self.assertEqual(orig_model(2), 4) orig_model_copy = model_copy.unapply_modeling_hooks() self.assertEqual(orig_model_copy(2), 4)
def setup_after_launch( cfg: CfgNode, output_dir: str, runner: Optional[BaseRunner] = None, _scale_world_size: bool = True, # HACK: temporarily allow lightning_train_net to by pass this. ): """ Binary-level setup after entering DDP, including - creating working directory - setting up logger - logging environment - printing and dumping config - (optional) initializing runner """ create_dir_on_global_main_process(output_dir) setup_loggers(output_dir) log_system_info() cfg.freeze() maybe_override_output_dir(cfg, output_dir) logger.info("Running with full config:\n{}".format(cfg)) dump_cfg(cfg, os.path.join(output_dir, "config.yaml")) if runner: logger.info("Initializing runner ...") runner = initialize_runner(runner, cfg) logger.info("Running with runner: {}".format(runner)) # save the diff config if runner: default_cfg = runner.get_default_cfg() dump_cfg( get_diff_cfg(default_cfg, cfg), os.path.join(output_dir, "diff_config.yaml"), ) else: # TODO: support getting default_cfg without runner. pass # scale the config after dumping so that dumped config files keep original world size if _scale_world_size: auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
def test_merge_from_list_with_new_allowed(self): """ YACS's merge_from_list doesn't take new_allowed into account, D2Go override its behavior, and this test covers it. """ # new_allowed is not set cfg = CfgNode() cfg.A = CfgNode() cfg.A.X = 1 self.assertRaises(Exception, cfg.merge_from_list, ["A.Y", "2"]) # new_allowed is set for sub key cfg = CfgNode() cfg.A = CfgNode(new_allowed=True) cfg.A.X = 1 cfg.merge_from_list(["A.Y", "2"]) self.assertEqual(cfg.A.Y, 2) # note that the string will be converted to number # however new_allowed is not set for root key self.assertRaises(Exception, cfg.merge_from_list, ["B", "3"])
def get_default_cfg(): _C = super(GeneralizedRCNNRunner, GeneralizedRCNNRunner).get_default_cfg() _C.EXPORT_CAFFE2 = CfgNode() _C.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False _C.RCNN_PREPARE_FOR_EXPORT = "default_rcnn_prepare_for_export" _C.RCNN_PREPARE_FOR_QUANT = "default_rcnn_prepare_for_quant" _C.RCNN_PREPARE_FOR_QUANT_CONVERT = "default_rcnn_prepare_for_quant_convert" return _C
def _setup_after_launch(cfg: CN, output_dir: str, runner): """ Set things up after entering DDP, including - creating working directory - setting up logger - logging environment - initializing runner """ create_dir_on_global_main_process(output_dir) comm.synchronize() setup_loggers(output_dir) cfg.freeze() if cfg.OUTPUT_DIR != output_dir: with temp_defrost(cfg): logger.warning( "Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format( cfg.OUTPUT_DIR, output_dir ) ) cfg.OUTPUT_DIR = output_dir dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
def get_default_cfg(): """ Override `get_default_cfg` for adding non common config. """ from detectron2.config import get_cfg as get_d2_cfg cfg = get_d2_cfg() cfg = CfgNode.cast_from_other_class( cfg) # upgrade from D2's CfgNode to D2Go's CfgNode cfg.SOLVER.AUTO_SCALING_METHODS = ["default_scale_d2_configs"] return cfg
def test_deit_model(self): cfg = CN() cfg.MODEL = CN() add_deit_backbone_config(cfg) build_model = BACKBONE_REGISTRY.get("deit_d2go_model_wrapper") deit_models = { "8X-7-RM_4": 170, "DeiT-Tiny": 224, "DeiT-Small": 224, "32X-1-RM_2": 221, "8X-7": 160, "32X-1": 256, } deit_model_weights = { "8X-7-RM_4": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210511/deit_[model]deit_scaling_distill_[bs]128_[mcfg]8X-7-RM_4_.OIXarYpbZw/checkpoint_best.pth", "DeiT-Tiny": "manifold://mobile_vision_workflows/tree/workflows/cl114/DeiT-official-ckpt/deit_tiny_distilled_patch16_224-b40b3cf7.pth", "DeiT-Small": "manifold://mobile_vision_workflows/tree/workflows/cl114/DeiT-official-ckpt/deit_small_distilled_patch16_224-649709d9.pth", "32X-1-RM_2": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210511/deit_[model]deit_scaling_distill_[bs]64_[mcfg]32X-1-RM_2_.xusuFyNMdD/checkpoint_best.pth", "8X-7": "manifold://mobile_vision_workflows/tree/workflows/cl114/scaled_best/8X-7.pth", "32X-1": "manifold://mobile_vision_workflows/tree/workflows/cl114/scaled_best/32X-1.pth", } for model_name, org_size in deit_models.items(): print("model_name", model_name) cfg.MODEL.DEIT.MODEL_CONFIG = f"manifold://mobile_vision_workflows/tree/workflows/wbc/deit/model_cfgs/{model_name}.json" cfg.MODEL.DEIT.WEIGHTS = deit_model_weights[model_name] model = build_model(cfg, None) model.eval() for input_size_h in [org_size, 192, 224, 256, 320]: for input_size_w in [org_size, 192, 224, 256, 320]: x = torch.rand(1, 3, input_size_h, input_size_w) y = model(x) print(f"x.shape: {x.shape}, y.shape: {y.shape}")
def get_default_cfg(): """ Override `get_default_cfg` for adding non common config. """ from detectron2.config import get_cfg as get_d2_cfg cfg = get_d2_cfg() cfg = CfgNode.cast_from_other_class( cfg) # upgrade from D2's CfgNode to D2Go's CfgNode try: from d2go.runner import get_unintentional_added_configs_during_runner_import for key in get_unintentional_added_configs_during_runner_import(): cfg.register_deprecated_key(key) except ImportError: pass cfg.SOLVER.AUTO_SCALING_METHODS = ["default_scale_d2_configs"] return cfg
def do_train(cfg: CfgNode, trainer: pl.Trainer, task: GeneralizedRCNNTask) -> Dict[str, str]: """Runs the training loop with given trainer and task. Args: cfg: The normalized ConfigNode for this D2Go Task. trainer: PyTorch Lightning trainer. task: Lightning module instance. Returns: A map of model name to trained model config path. """ with EventStorage() as storage: task.storage = storage trainer.fit(task) final_ckpt = os.path.join(cfg.OUTPUT_DIR, FINAL_MODEL_CKPT) trainer.save_checkpoint(final_ckpt) # for validation monitor trained_cfg = cfg.clone() with temp_defrost(trained_cfg): trained_cfg.MODEL.WEIGHTS = final_ckpt model_configs = dump_trained_model_configs( cfg.OUTPUT_DIR, {"model_final": trained_cfg}) return model_configs
def _get_default_config(): cfg = CfgNode() cfg.INPUT = CfgNode() cfg.INPUT.CROP = CfgNode() cfg.INPUT.CROP.ENABLED = False cfg.INPUT.CROP.SIZE = (0.9, 0.9) cfg.INPUT.CROP.TYPE = "relative_range" cfg.MODEL = CfgNode() cfg.MODEL.MIN_DIM_SIZE = 360 cfg.INFERENCE_SDK = CfgNode() cfg.INFERENCE_SDK.MODEL = CfgNode() cfg.INFERENCE_SDK.MODEL.SCORE_THRESHOLD = 0.8 cfg.INFERENCE_SDK.IOU_TRACKER = CfgNode() cfg.INFERENCE_SDK.IOU_TRACKER.IOU_THRESHOLD = 0.15 cfg.INFERENCE_SDK.ENABLE_ID_TRACKING = True return cfg
def dump_cfg(cfg: CfgNode, path: str) -> None: if comm.is_main_process(): with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(path))
def get_default_config(): cfg = CfgNode() cfg.D2GO_DATA = CfgNode() cfg.D2GO_DATA.AUG_OPS = CfgNode() return cfg
def _test1(cfg): cfg.TEST1 = CfgNode() cfg.TEST1.X = 1 return cfg
def _test2(cfg): cfg.TEST2 = CfgNode() cfg.TEST2.Y = 2 return cfg