def test_default_dataset(self): runner = create_runner("d2go.runner.GeneralizedRCNNRunner") cfg = runner.get_default_cfg() cfg.DATASETS.TRAIN = ["default_dataset_train"] cfg.DATASETS.TEST = ["default_dataset_test"] with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir: image_dir = os.path.join(dataset_dir, "images") os.makedirs(image_dir) image_generator = LocalImageGenerator(image_dir, width=80, height=60) with register_toy_dataset("default_dataset_train", image_generator, num_images=3): train_loader = runner.build_detection_train_loader(cfg) for i, data in enumerate(train_loader): self.assertIsNotNone(data) # for training loader, it has infinite length if i == 6: break with register_toy_dataset("default_dataset_test", image_generator, num_images=3): test_loader = runner.build_detection_test_loader( cfg, dataset_name="default_dataset_test") all_data = [] for data in test_loader: all_data.append(data) self.assertEqual(len(all_data), 3)
def get( config_path, trained: bool = False, device: Optional[str] = None, runner="d2go.runner.GeneralizedRCNNRunner", ): """ Get a model specified by relative path under Detectron2's official ``configs/`` directory. Args: config_path (str): config file name relative to d2go's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): see :func:`get_config`. device (str or None): overwrite the device in config, if given. Returns: nn.Module: a d2go model. Will be in training mode. Example: :: from d2go import model_zoo model = model_zoo.get("faster_rcnn_fbnetv3a_C4.yaml", trained=True) """ cfg = get_config(config_path, trained) if device is not None: cfg.MODEL.DEVICE = device elif not torch.cuda.is_available(): cfg.MODEL.DEVICE = "cpu" runner = create_runner(runner) model = runner.build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) return model
def create_fake_detection_data_loader(height, width, is_train): with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir: runner = create_runner("d2go.runner.GeneralizedRCNNRunner") cfg = runner.get_default_cfg() cfg.DATASETS.TRAIN = ["default_dataset_train"] cfg.DATASETS.TEST = ["default_dataset_test"] with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir: image_dir = os.path.join(dataset_dir, "images") os.makedirs(image_dir) image_generator = LocalImageGenerator(image_dir, width=width, height=height) if is_train: with register_toy_dataset("default_dataset_train", image_generator, num_images=3): train_loader = runner.build_detection_train_loader(cfg) yield train_loader else: with register_toy_dataset("default_dataset_test", image_generator, num_images=3): test_loader = runner.build_detection_test_loader( cfg, dataset_name="default_dataset_test") yield test_loader
def create_detection_data_loader_on_toy_dataset( cfg, height, width, is_train, runner=None ): """ Args: cfg (CfgNode): the config used to create data loader, it can control things like resizing, augmentation. height, width (int): the height/width of the image files (not the resized image size) is_train (bool): training or testing """ if runner is None: runner = create_runner("d2go.runner.GeneralizedRCNNRunner") # change dataset name to toy dataset cfg.DATASETS.TRAIN = ["_toy_dataset_train_"] cfg.DATASETS.TEST = ["_toy_dataset_test_"] if is_train: with register_toy_coco_dataset( "_toy_dataset_train_", num_images=3, image_size=(width, height) ): train_loader = runner.build_detection_train_loader(cfg) yield train_loader else: with register_toy_coco_dataset( "_toy_dataset_test_", num_images=3, image_size=(width, height) ): test_loader = runner.build_detection_test_loader( cfg, dataset_name="_toy_dataset_test_" ) yield test_loader
def prepare_for_launch(args): """ Load config, figure out working directory, create runner. - when args.config_file is empty, returned cfg will be the default one - returned output_dir will always be non empty, args.output_dir has higher priority than cfg.OUTPUT_DIR. """ logger.info(args) runner = create_runner(args.runner) cfg = runner.get_default_cfg() if args.config_file: with PathManager.open(reroute_config_path(args.config_file), "r") as f: print("Loaded config file {}:\n{}".format(args.config_file, f.read())) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) else: cfg = create_cfg_from_cli_args(args, default_cfg=cfg) cfg.freeze() assert args.output_dir or args.config_file output_dir = args.output_dir or cfg.OUTPUT_DIR return cfg, output_dir, runner
def test_export_torchvision_format(): cfg_name = 'faster_rcnn_fbnetv3a_dsmask_C4.yaml' pytorch_model = model_zoo.get(cfg_name, trained=True) from typing import List, Dict class Wrapper(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model coco_idx_list = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91 ] self.coco_idx = torch.tensor(coco_idx_list) def forward(self, inputs: List[torch.Tensor]): x = inputs[0].unsqueeze(0) * 255 scale = 320.0 / min(x.shape[-2], x.shape[-1]) x = torch.nn.functional.interpolate(x, scale_factor=scale, mode="bilinear", align_corners=True, recompute_scale_factor=True) out = self.model(x[0]) res: Dict[str, torch.Tensor] = {} res["boxes"] = out[0] / scale res["labels"] = torch.index_select(self.coco_idx, 0, out[1]) res["scores"] = out[2] return inputs, [res] size_divisibility = max(pytorch_model.backbone.size_divisibility, 10) h, w = size_divisibility, size_divisibility * 2 runner = create_runner("d2go.runner.GeneralizedRCNNRunner") cfg = model_zoo.get_config(cfg_name) datasets = list(cfg.DATASETS.TRAIN) data_loader = runner.build_detection_test_loader(cfg, datasets) predictor_path = convert_and_export_predictor( cfg, copy.deepcopy(pytorch_model), "torchscript_int8@tracing", './', data_loader, ) orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit")) wrapped_model = Wrapper(orig_model) # optionally do a forward wrapped_model([torch.rand(3, 600, 600)]) scripted_model = torch.jit.script(wrapped_model) optimized_model = optimize_for_mobile(scripted_model) optimized_model.save("D2Go/d2go_optimized.pt")
def test_detr_res50_export(self): runner = create_runner("d2go.projects.detr.runner.DETRRunner") cfg = runner.get_default_cfg() cfg.MODEL.DEVICE = "cpu" # DETR self._set_detr_cfg(cfg, 6, 6, 100, 2048) # backbone cfg.MODEL.BACKBONE.NAME = "build_resnet_backbone" cfg.MODEL.RESNETS.DEPTH = 50 cfg.MODEL.RESNETS.STRIDE_IN_1X1 = False cfg.MODEL.RESNETS.OUT_FEATURES = ["res2", "res3", "res4", "res5"] # build model model = runner.build_model(cfg).eval() model = model.detr scripted_model = torch.jit.script(model) self._assert_model_output(model, scripted_model)
def test_disk_cached_dataloader(self): """Test the data loader backed by disk cache""" height = 6 width = 8 runner = create_runner("d2go.runner.GeneralizedRCNNRunner") cfg = runner.get_default_cfg() cfg.OUTPUT_DIR = self.output_dir cfg.DATALOADER.NUM_WORKERS = 2 def _test_data_loader(data_loader): first_batch = next(iter(data_loader)) self.assertTrue(first_batch[0]["height"], height) self.assertTrue(first_batch[0]["width"], width) # enable the disk cache cfg.merge_from_list(["D2GO_DATA.DATASETS.DISK_CACHE.ENABLED", "True"]) with enable_disk_cached_dataset(cfg): # no cache dir in the beginning self.assertEqual(self._count_cache_dirs(), 0) with create_detection_data_loader_on_toy_dataset( cfg, height, width, is_train=True) as train_loader: # train loader should create one cache dir self.assertEqual(self._count_cache_dirs(), 1) _test_data_loader(train_loader) with create_detection_data_loader_on_toy_dataset( cfg, height, width, is_train=False) as test_loader: # test loader should create another cache dir self.assertEqual(self._count_cache_dirs(), 2) _test_data_loader(test_loader) # test loader should release its cache del test_loader self.assertEqual(self._count_cache_dirs(), 1) # no cache dir in the end del train_loader self.assertEqual(self._count_cache_dirs(), 0)
def create_fake_detection_data_loader(height, width, is_train): runner = create_runner("d2go.runner.GeneralizedRCNNRunner") cfg = runner.get_default_cfg() cfg.DATASETS.TRAIN = ["default_dataset_train"] cfg.DATASETS.TEST = ["default_dataset_test"] min_size = min(width, height) max_size = max(width, height) cfg.INPUT.MIN_SIZE_TRAIN = (min_size, ) cfg.INPUT.MAX_SIZE_TRAIN = max_size cfg.INPUT.MIN_SIZE_TEST = min_size cfg.INPUT.MAX_SIZE_TEST = max_size if is_train: with register_toy_coco_dataset("default_dataset_train", num_images=3): train_loader = runner.build_detection_train_loader(cfg) yield train_loader else: with register_toy_coco_dataset("default_dataset_test", num_images=3): test_loader = runner.build_detection_test_loader( cfg, dataset_name="default_dataset_test") yield test_loader
def test_detr_fbnet_export(self): runner = create_runner("d2go.projects.detr.runner.DETRRunner") cfg = runner.get_default_cfg() cfg.MODEL.DEVICE = "cpu" # DETR self._set_detr_cfg(cfg, 3, 3, 50, 256) # backbone cfg.MODEL.BACKBONE.NAME = "FBNetV2C4Backbone" cfg.MODEL.FBNET_V2.ARCH = "FBNetV3_A_dsmask_C5" cfg.MODEL.FBNET_V2.WIDTH_DIVISOR = 8 cfg.MODEL.FBNET_V2.OUT_FEATURES = ["trunk4"] # build model model = runner.build_model(cfg).eval() model = model.detr print(model) scripted_model = torch.jit.script(model) self._assert_model_output(model, scripted_model) # print flops table = flop_count_table( FlopCountAnalysis(model, ([torch.rand(3, 224, 320)], ))) print(table)
def test_default_dataset(self): runner = create_runner("d2go.runner.GeneralizedRCNNRunner") cfg = runner.get_default_cfg() cfg.DATASETS.TRAIN = ["default_dataset_train"] cfg.DATASETS.TEST = ["default_dataset_test"] with register_toy_coco_dataset("default_dataset_train", num_images=3): train_loader = runner.build_detection_train_loader(cfg) for i, data in enumerate(train_loader): self.assertIsNotNone(data) # for training loader, it has infinite length if i == 6: break with register_toy_coco_dataset("default_dataset_test", num_images=3): test_loader = runner.build_detection_test_loader( cfg, dataset_name="default_dataset_test") all_data = [] for data in test_loader: all_data.append(data) self.assertEqual(len(all_data), 3)
def get_config(config_path, trained: bool = False, runner="d2go.runner.GeneralizedRCNNRunner"): """ Returns a config object for a model in model zoo. Args: config_path (str): config file name relative to d2go's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used instead; this will typically (though not always) initialize a subset of weights using an ImageNet pre-trained model, while randomly initializing the other weights. Returns: CfgNode: a config object """ cfg_file = get_config_file(config_path) runner = create_runner(runner) cfg = runner.get_default_cfg() cfg.merge_from_file(cfg_file) if trained: cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) return cfg
def test_create_runner(self): task_cls = create_runner( f"{GeneralizedRCNNTask.__module__}.{GeneralizedRCNNTask.__name__}") self.assertTrue(task_cls == GeneralizedRCNNTask)
if opts: cfg.merge_from_list(opts) return cfg def argument_parser(): parser = basic_argument_parser(distributed=True, requires_output_dir=False) parser.add_argument("--num-gpus", type=int, default=0, help="number of GPUs per machine") return parser if __name__ == "__main__": args = argument_parser().parse_args() task_cls = create_runner( args.runner) if args.runner else GeneralizedRCNNTask cfg = build_config(args.config_file, task_cls, args.opts) ret = main( cfg, args.output_dir, task_cls, eval_only=False, # eval_only num_machines=args.num_machines, num_gpus=args.num_gpus, num_processes=args.num_processes, ) if get_rank() == 0: print(ret)
def test_create_runner(self): runner = create_runner(".".join([ default_runner.Detectron2GoRunner.__module__, default_runner.Detectron2GoRunner.__name__, ])) self.assertTrue(isinstance(runner, default_runner.Detectron2GoRunner))