Exemplo n.º 1
0
def create_fake_detection_data_loader(height, width, is_train):
    with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
        runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
        cfg = runner.get_default_cfg()
        cfg.DATASETS.TRAIN = ["default_dataset_train"]
        cfg.DATASETS.TEST = ["default_dataset_test"]

        with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
            image_dir = os.path.join(dataset_dir, "images")
            os.makedirs(image_dir)
            image_generator = LocalImageGenerator(image_dir,
                                                  width=width,
                                                  height=height)

            if is_train:
                with register_toy_dataset("default_dataset_train",
                                          image_generator,
                                          num_images=3):
                    train_loader = runner.build_detection_train_loader(cfg)
                    yield train_loader
            else:
                with register_toy_dataset("default_dataset_test",
                                          image_generator,
                                          num_images=3):
                    test_loader = runner.build_detection_test_loader(
                        cfg, dataset_name="default_dataset_test")
                    yield test_loader
Exemplo n.º 2
0
    def test_script_only_model(self):
        def _validate(predictor):
            outputs = predictor(
                [torch.tensor(1),
                 torch.tensor(2),
                 torch.tensor(3)])
            self.assertEqual(len(outputs), 3)
            self.assertEqual(
                outputs, [torch.tensor(0),
                          torch.tensor(2),
                          torch.tensor(6)])

        # Method 1: explicitly set jit_mode to "trace"
        with make_temp_directory("test_test_script_only_model") as tmp_dir:
            model = ScriptingOnlyModel()
            predictor = self._export_simple_model("explicit",
                                                  model,
                                                  None,
                                                  tmp_dir,
                                                  predictor_type="torchscript")
            _validate(predictor)

        # Method 2: using torchscript@scripting as predictor type
        with make_temp_directory("test_test_script_only_model") as tmp_dir:
            model = ScriptingOnlyModel()
            predictor = self._export_simple_model(
                "implicit",
                model,
                None,
                tmp_dir,
                predictor_type="torchscript@scripting")
            _validate(predictor)
Exemplo n.º 3
0
def _register_toy_dataset(
    dataset_name, image_generator, num_images, num_classes=-1, num_keypoints=0
):
    json_dataset, meta_data = create_toy_dataset(
        image_generator,
        num_images=num_images,
        num_classes=num_classes,
        num_keypoints=num_keypoints,
    )

    with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
        json_file = os.path.join(tmp_dir, "{}.json".format(dataset_name))
        with open(json_file, "w") as f:
            json.dump(json_dataset, f)

        split_dict = {
            IM_DIR: image_generator.get_image_dir(),
            ANN_FN: json_file,
            "meta_data": meta_data,
        }
        register_dataset_split(dataset_name, split_dict)

        try:
            yield
        finally:
            DatasetCatalog.remove(dataset_name)
            MetadataCatalog.remove(dataset_name)
Exemplo n.º 4
0
 def test_export_and_load(
     cls, model, input_args, export_method, export_kwargs, output_checker
 ):
     """
     Illustrate the life-cycle of export and load, used for testing.
     """
     with make_temp_directory("test_export_and_load") as save_path:
         # run the orginal model
         assert isinstance(model, nn.Module), model
         assert isinstance(input_args, (list, tuple)), input_args
         original_output = model(*input_args)
         # export the model
         model.eval()  # TODO: decide where eval() should be called
         load_kwargs = cls.export(
             model, input_args, save_path, export_method, **export_kwargs
         )
         # sanity check for load_kwargs
         assert isinstance(load_kwargs, dict), load_kwargs
         assert json.dumps(load_kwargs), load_kwargs
         # loaded model back
         loaded_model = cls.load(save_path, **load_kwargs)
         # run the loaded model
         assert isinstance(loaded_model, nn.Module), loaded_model
         new_output = loaded_model(*input_args)
         # compare outputs
         output_checker(new_output, original_output)
Exemplo n.º 5
0
    def test_coco_conversions(self):
        test_data_0 = {
            "info": {},
            "imgs": {
                "img_1": {
                    "file_name": "0.jpg",
                    "width": 600,
                    "height": 600,
                    "id": "img_1",
                }
            },
            "anns": {0: {"id": 0, "image_id": "img_1", "bbox": [30, 30, 60, 20]}},
            "imgToAnns": {"img_1": [0]},
            "cats": {},
        }
        test_data_1 = copy.deepcopy(test_data_0)
        test_data_1["imgs"][123] = test_data_1["imgs"].pop("img_1")
        test_data_1["imgs"][123]["id"] = 123
        test_data_1["anns"][0]["image_id"] = 123
        test_data_1["imgToAnns"][123] = test_data_1["imgToAnns"].pop("img_1")

        for test_data, exp_output in [(test_data_0, [0, 0]), (test_data_1, [123, 123])]:
            with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
                src_json = os.path.join(tmp_dir, "source.json")
                out_json = os.path.join(tmp_dir, "output.json")

                with open(src_json, "w") as h_in:
                    json.dump(test_data, h_in)

                out_json = extended_coco.convert_coco_text_to_coco_detection_json(
                    src_json, out_json
                )

                self.assertEqual(out_json["images"][0]["id"], exp_output[0])
                self.assertEqual(out_json["annotations"][0]["image_id"], exp_output[1])
Exemplo n.º 6
0
    def test_build_model(self):
        cfg = self._get_default_cfg()
        cfg.INPUT.MIN_SIZE_TRAIN = (60,)
        cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True
        cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 3
        cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 5
        cfg.MODEL.KMEANS_ANCHORS.DATASETS = ("toy_dataset",)

        cfg.MODEL.DEVICE = "cpu"
        cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator"

        with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
            image_dir = os.path.join(dataset_dir, "images")
            os.makedirs(image_dir)
            image_generator = LocalImageGenerator(image_dir, width=80, height=60)
            with register_toy_dataset(
                "toy_dataset",
                image_generator,
                num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
            ):
                model = self.runner.build_model(cfg)
                trainer = SimpleTrainer(model, data_loader=[], optimizer=None)
                trainer_hooks = [compute_kmeans_anchors_hook(self.runner, cfg)]
                trainer.register_hooks(trainer_hooks)
                trainer.before_train()
                anchor_generator = model.proposal_generator.anchor_generator
                cell_anchors = [x for x in anchor_generator.cell_anchors]
                gt_anchors = np.array(
                    [
                        [-20, -15, 20, 15]  # toy_dataset's bbox is half size of image
                        for _ in range(cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS)
                    ]
                )
                np.testing.assert_allclose(cell_anchors[0], gt_anchors)
Exemplo n.º 7
0
    def test_coco_injection(self):

        with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
            image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)

            runner = Detectron2GoRunner()
            cfg = runner.get_default_cfg()
            cfg.merge_from_list(
                [
                    str(x)
                    for x in [
                        "D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
                        ["inj_ds1", "inj_ds2"],
                        "D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
                        [image_dir, "/mnt/fair"],
                        "D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
                        [json_file, "inj_ds2"],
                    ]
                ]
            )

            runner.register(cfg)
            inj_ds1 = DatasetCatalog.get("inj_ds1")
            self.assertEqual(len(inj_ds1), 10)
            for dic in inj_ds1:
                self.assertEqual(dic["width"], 80)
                self.assertEqual(dic["height"], 60)
Exemplo n.º 8
0
    def test_sub_dataset(self):
        with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
            image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)

            runner = Detectron2GoRunner()
            cfg = runner.get_default_cfg()
            cfg.merge_from_list(
                [
                    str(x)
                    for x in [
                        "D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
                        ["inj_ds"],
                        "D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
                        [image_dir],
                        "D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
                        [json_file],
                        "DATASETS.TEST",
                        ("inj_ds",),
                        "D2GO_DATA.TEST.MAX_IMAGES",
                        1,
                    ]
                ]
            )

            runner.register(cfg)
            with maybe_subsample_n_images(cfg) as new_cfg:
                test_loader = runner.build_detection_test_loader(
                    new_cfg, new_cfg.DATASETS.TEST[0]
                )
                self.assertEqual(len(test_loader), 1)
Exemplo n.º 9
0
    def test_default_dataset(self):
        runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
        cfg = runner.get_default_cfg()
        cfg.DATASETS.TRAIN = ["default_dataset_train"]
        cfg.DATASETS.TEST = ["default_dataset_test"]

        with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
            image_dir = os.path.join(dataset_dir, "images")
            os.makedirs(image_dir)
            image_generator = LocalImageGenerator(image_dir,
                                                  width=80,
                                                  height=60)

            with register_toy_dataset("default_dataset_train",
                                      image_generator,
                                      num_images=3):
                train_loader = runner.build_detection_train_loader(cfg)
                for i, data in enumerate(train_loader):
                    self.assertIsNotNone(data)
                    # for training loader, it has infinite length
                    if i == 6:
                        break

            with register_toy_dataset("default_dataset_test",
                                      image_generator,
                                      num_images=3):
                test_loader = runner.build_detection_test_loader(
                    cfg, dataset_name="default_dataset_test")
                all_data = []
                for data in test_loader:
                    all_data.append(data)
                self.assertEqual(len(all_data), 3)
Exemplo n.º 10
0
        def _test_export(self, predictor_type, compare_match=True):
            size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
            h, w = size_divisibility, size_divisibility * 2
            with create_fake_detection_data_loader(h, w, is_train=False) as data_loader:
                inputs = next(iter(data_loader))

                with make_temp_directory(
                    "test_export_{}".format(predictor_type)
                ) as tmp_dir:
                    # TODO: the export may change model it self, need to fix this
                    model_to_export = copy.deepcopy(self.test_model)
                    predictor_path = convert_and_export_predictor(
                        self.cfg, model_to_export, predictor_type, tmp_dir, data_loader
                    )

                    predictor = create_predictor(predictor_path)
                    predicotr_outputs = predictor(inputs)
                    _validate_outputs(inputs, predicotr_outputs)

                    if compare_match:
                        with torch.no_grad():
                            pytorch_outputs = self.test_model(inputs)

                        assert_instances_allclose(
                            predicotr_outputs[0]["instances"],
                            pytorch_outputs[0]["instances"],
                        )
Exemplo n.º 11
0
 def test_fetch_checkpoints_local(self):
     with make_temp_directory("test") as output_dir:
         output_dir = Path(output_dir)
         for i in range(5):
             create_file(output_dir / f"model_{i}.pth")
         create_file(output_dir / "model_final.pth")
         checkpoints = list(fetch_checkpoints_till_final(output_dir))
         assert len(checkpoints) == 6
Exemplo n.º 12
0
    def test_import_from_file(self):
        with make_temp_directory("test_import_from_file") as dir:
            filename = os.path.join(dir, "my_module.py")
            with open(filename, "w") as f:
                f.write("ANSWER = 42\n")

            my_module = import_file("my_module", filename)
            self.assertEqual(my_module.ANSWER, 42)
Exemplo n.º 13
0
def trace_and_save_torchscript(
    model: nn.Module,
    inputs: Tuple[torch.Tensor],
    output_path: str,
    mobile_optimization: Optional[MobileOptimizationConfig] = None,
    _extra_files: Optional[Dict[str, bytes]] = None,
):
    logger.info("Tracing and saving TorchScript to {} ...".format(output_path))
    PathManager.mkdirs(output_path)
    if _extra_files is None:
        _extra_files = {}

    # TODO: patch_builtin_len depends on D2, we should either copy the function or
    # dynamically registering the D2's version.
    from detectron2.export.torchscript_patch import patch_builtin_len

    with torch.no_grad(), patch_builtin_len():
        script_model = torch.jit.trace(model, inputs)

    with make_temp_directory("trace_and_save_torchscript") as tmp_dir:

        @contextlib.contextmanager
        def _synced_local_file(rel_path):
            remote_file = os.path.join(output_path, rel_path)
            local_file = os.path.join(tmp_dir, rel_path)
            yield local_file
            PathManager.copy_from_local(local_file,
                                        remote_file,
                                        overwrite=True)

        with _synced_local_file("model.jit") as model_file:
            torch.jit.save(script_model, model_file, _extra_files=_extra_files)

        with _synced_local_file("data.pth") as data_file:
            torch.save(inputs, data_file)

        if mobile_optimization is not None:
            logger.info("Applying optimize_for_mobile ...")
            liteopt_model = optimize_for_mobile(
                script_model,
                optimization_blocklist=mobile_optimization.
                optimization_blocklist,
                preserved_methods=mobile_optimization.preserved_methods,
                backend=mobile_optimization.backend,
            )
            with _synced_local_file("mobile_optimized.ptl") as lite_path:
                liteopt_model._save_for_lite_interpreter(lite_path)
            # liteopt_model(*inputs)  # sanity check
            op_names = torch.jit.export_opnames(liteopt_model)
            logger.info("Operator names from lite interpreter:\n{}".format(
                "\n".join(op_names)))

            logger.info("Applying augment_model_with_bundled_inputs ...")
            augment_model_with_bundled_inputs(liteopt_model, [inputs])
            liteopt_model.run_on_bundled_input(0)  # sanity check
            with _synced_local_file(
                    "mobile_optimized_bundled.ptl") as lite_path:
                liteopt_model._save_for_lite_interpreter(lite_path)
Exemplo n.º 14
0
    def test_model_info(self):
        with make_temp_directory("test_model_info") as tmp_dir:
            _save_test_model(tmp_dir)
            model_info = ModelInfo(path=tmp_dir, type="torchscript")
            # NOTE: decide if load_model is a public API or class method of ModelInfo
            from mobile_cv.predictor.model_wrappers import load_model

            model = load_model(model_info, model_root="")
            self.assertEqual(torch.tensor(2), model(torch.tensor(1)))
Exemplo n.º 15
0
 def test_simple_two_part_model(self):
     with make_temp_directory("test_simple_two_part_model") as tmp_dir:
         model = TwoPartSimpleModel()
         predictor = self._export_simple_model(None,
                                               model,
                                               torch.tensor(1),
                                               tmp_dir,
                                               predictor_type="torchscript")
         x = torch.tensor(42)
         self.assertEqual(predictor(x), model(x))
Exemplo n.º 16
0
 def test_fetch_lightning_checkpoints_local(self):
     with make_temp_directory("test") as output_dir:
         output_dir = Path(output_dir)
         ext = ModelCheckpoint.FILE_EXTENSION
         for i in range(5):
             create_file(output_dir / f"step={i}{ext}")
         create_file(output_dir / f"model_final{ext}")
         create_file(output_dir /
                     f"{ModelCheckpoint.CHECKPOINT_NAME_LAST}{ext}")
         checkpoints = list(fetch_checkpoints_till_final(output_dir))
         self.assertEqual(len(checkpoints), 6)
Exemplo n.º 17
0
    def test_export_torchvision_format(self):
        runner = GeneralizedRCNNRunner()
        cfg = runner.get_default_cfg()
        cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
        cfg.merge_from_list(get_quick_test_config_opts())

        cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
        pytorch_model = runner.build_model(cfg, eval_only=True)

        from typing import Dict, List

        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model

            def forward(self, inputs: List[torch.Tensor]):
                x = inputs[0].unsqueeze(0) * 255
                scale = 320.0 / min(x.shape[-2], x.shape[-1])
                x = torch.nn.functional.interpolate(
                    x,
                    scale_factor=scale,
                    mode="bilinear",
                    align_corners=True,
                    recompute_scale_factor=True,
                )
                out = self.model(x[0])
                res: Dict[str, torch.Tensor] = {}
                res["boxes"] = out[0] / scale
                res["labels"] = out[2]
                res["scores"] = out[1]
                return inputs, [res]

        size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
        h, w = size_divisibility, size_divisibility * 2
        with create_detection_data_loader_on_toy_dataset(
                cfg, h, w, is_train=False) as data_loader:
            with make_temp_directory(
                    "test_export_torchvision_format") as tmp_dir:
                predictor_path = convert_and_export_predictor(
                    cfg,
                    copy.deepcopy(pytorch_model),
                    "torchscript",
                    tmp_dir,
                    data_loader,
                )

                orig_model = torch.jit.load(
                    os.path.join(predictor_path, "model.jit"))
                wrapped_model = Wrapper(orig_model)
                # optionally do a forward
                wrapped_model([torch.rand(3, 600, 600)])
                scripted_model = torch.jit.script(wrapped_model)
                scripted_model.save(os.path.join(tmp_dir, "new_file.pt"))
Exemplo n.º 18
0
    def test_default_cfg_dump_and_load(self):
        default_cfg = GeneralizedRCNNRunner().get_default_cfg()

        cfg = default_cfg.clone()
        with make_temp_directory("detectron2go_tmp") as tmp_dir:
            file_name = os.path.join(tmp_dir, "config.yaml")
            # this is same as the one in fblearner_launch_utils_detectron2go.py
            with open(file_name, "w") as f:
                f.write(cfg.dump(default_flow_style=False))

            # check if the dumped config file can be merged
            cfg.merge_from_file(file_name)
Exemplo n.º 19
0
    def test_load_arch_defs(self):
        """Test arch def str-to-dict conversion compatible with merging"""
        default_cfg = GeneralizedRCNNRunner().get_default_cfg()
        cfg = default_cfg.clone()
        cfg.merge_from_file(get_resource_path("arch_def_merging.yaml"))

        with make_temp_directory("detectron2go_tmp") as tmp_dir:
            # Dump out config with arch def
            file_name = os.path.join(tmp_dir, "test_archdef_config.yaml")
            with open(file_name, "w") as f:
                f.write(cfg.dump())

            # Attempt to reload the config
            another_cfg = default_cfg.clone()
            another_cfg.merge_from_file(file_name)
Exemplo n.º 20
0
def register_toy_coco_dataset(
    dataset_name, num_images=3, image_size=(5, 10), num_classes=-1, num_keypoints=0
):
    width, height = image_size
    with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
        image_dir = os.path.join(dataset_dir, "images")
        os.makedirs(image_dir)
        image_generator = LocalImageGenerator(image_dir, width=width, height=height)

        with _register_toy_dataset(
            dataset_name,
            image_generator,
            num_images=num_images,
            num_classes=num_classes,
            num_keypoints=num_keypoints,
        ):
            yield
Exemplo n.º 21
0
    def test_create_predictor(self):
        with make_temp_directory("test_model_info") as tmp_dir:
            # define the predictor
            model_a_path = os.path.join(tmp_dir, "model_A")
            predictor_info = PredictorInfo(
                model=ModelInfo(path=model_a_path, type="torchscript"),
                preprocess_info=FuncInfo.gen_func_info(TestPreprocess,
                                                       params={"weight": 2.0}),
            )

            # simulating exporting to predictor
            _save_test_model(model_a_path)
            with open(os.path.join(tmp_dir, "predictor_info.json"), "w") as f:
                json.dump(predictor_info.to_dict(), f)

            predictor = create_predictor(tmp_dir)
            # y = (x * 2) + 1
            self.assertEqual(torch.tensor(5), predictor(torch.tensor(2)))
Exemplo n.º 22
0
def export_optimize_and_save_torchscript(
    model: nn.Module,
    inputs: Optional[Tuple[Any]],
    output_path: str,
    *,
    jit_mode: Optional[str] = DEFAULT_JIT_MODE,
    torchscript_filename: str = "model.jit",
    mobile_optimization: Optional[MobileOptimizationConfig] = None,
    _extra_files: Optional[Dict[str, bytes]] = None,
) -> str:
    """
    The primary function for exporting PyTorch model to TorchScript.

    Args:
        model (nn.Module): the model to export. When given a ScriptModule, skip the export
            and only optimize and save model.
        inputs (tuple or None): input arguments of model, can be called as model(*inputs).
            Will not be used when scripting the model.
        output_path (str): directory that the model will be saved.
        jit_mode (str): trace/script or None if the model is already a ScriptModule.
        torchscript_filename (str): the filename of non-mobile-optimized model.
        mobile_optimization (MobileOptimizationConfig): when provided, the mobile optimization
            will be applied.
        _extra_files (Dict[str, bytes]): when provided, extra files will be saved.

    Returns:
        (str): filename of the final model no matter optmized or not.
    """

    logger.info("Export, optimize and saving TorchScript to {} ...".format(
        output_path))
    PathManager.mkdirs(output_path)
    if _extra_files is None:
        _extra_files = {}

    if isinstance(model, torch.jit.ScriptModule):
        if jit_mode is not None:
            logger.info(
                "The input model is already a ScriptModule, skip the jit step")
    elif jit_mode == "trace":
        logger.info("Tracing the model ...")
        with torch.no_grad():
            script_model = torch.jit.trace(model, inputs)
    elif jit_mode == "script":
        logger.info("Scripting the model ...")
        script_model = torch.jit.script(model)
    else:
        raise ValueError("Unsupported jit_mode: {}".format(jit_mode))

    with make_temp_directory(
            "export_optimize_and_save_torchscript") as tmp_dir:

        @contextlib.contextmanager
        def _synced_local_file(rel_path):
            remote_file = os.path.join(output_path, rel_path)
            local_file = os.path.join(tmp_dir, rel_path)
            yield local_file
            PathManager.copy_from_local(local_file,
                                        remote_file,
                                        overwrite=True)

        with _synced_local_file(torchscript_filename) as model_file:
            logger.info(f"Saving torchscript model to: {torchscript_filename}")
            torch.jit.save(script_model, model_file, _extra_files=_extra_files)
        dump_torchscript_IR(script_model,
                            os.path.join(output_path, "torchscript_IR"))

        data_filename = "data.pth"
        with _synced_local_file(data_filename) as data_file:
            logger.info(f"Saving example data to: {data_filename}")
            torch.save(inputs, data_file)

        if mobile_optimization is not None:
            logger.info("Applying optimize_for_mobile ...")
            liteopt_model = optimize_for_mobile(
                script_model,
                optimization_blocklist=mobile_optimization.
                optimization_blocklist,
                preserved_methods=mobile_optimization.preserved_methods,
                backend=mobile_optimization.backend,
            )
            torchscript_filename = mobile_optimization.torchscript_filename
            with _synced_local_file(torchscript_filename) as lite_path:
                logger.info(
                    f"Saving mobile optimized model to: {torchscript_filename}"
                )
                liteopt_model._save_for_lite_interpreter(
                    lite_path, _extra_files=_extra_files)

            op_names = torch.jit.export_opnames(liteopt_model)
            logger.info("Operator names from lite interpreter:\n{}".format(
                "\n".join(op_names)))

            logger.info("Applying augment_model_with_bundled_inputs ...")
            # make all tensors zero-like to save storage
            iters = recursive_iterate(inputs)
            for x in iters:
                if isinstance(x, torch.Tensor):
                    iters.send(torch.zeros_like(x).contiguous())
            inputs = iters.value
            augment_model_with_bundled_inputs(liteopt_model, [inputs])

            # For non-cpu backends (e.g. Metal, Vulkan) the bundled inputs need to be
            # converted with `torch.to(<myDevice>)` in order to predict successfully
            # This is a temporary bypass until PT Edge supports automatic backend
            # conversion in the bundled inputs interface, or we can auto-add a input tensor
            # conversion op to Metal and Vulkan models.
            target_backend = mobile_optimization.backend.lower()
            if target_backend == "cpu":
                # Sanity check by running
                logger.info(
                    "Running sanity check for the mobile optimized model ...")
                liteopt_model(*liteopt_model.get_all_bundled_inputs()[0])
            name, ext = os.path.splitext(torchscript_filename)
            input_bundled_path = name + "_bundled" + ext
            with _synced_local_file(input_bundled_path) as lite_path:
                logger.info(
                    f"Saving input bundled model to: {input_bundled_path}")
                liteopt_model._save_for_lite_interpreter(lite_path)

        return torchscript_filename
Exemplo n.º 23
0
def trace_and_save_torchscript(
    model: nn.Module,
    inputs: Tuple[torch.Tensor],
    output_path: str,
    torchscript_filename: str = "model.jit",
    mobile_optimization: Optional[MobileOptimizationConfig] = None,
    _extra_files: Optional[Dict[str, bytes]] = None,
):
    logger.info("Tracing and saving TorchScript to {} ...".format(output_path))
    PathManager.mkdirs(output_path)
    if _extra_files is None:
        _extra_files = {}

    with torch.no_grad():
        script_model = torch.jit.trace(model, inputs)

    with make_temp_directory("trace_and_save_torchscript") as tmp_dir:

        @contextlib.contextmanager
        def _synced_local_file(rel_path):
            remote_file = os.path.join(output_path, rel_path)
            local_file = os.path.join(tmp_dir, rel_path)
            yield local_file
            PathManager.copy_from_local(local_file,
                                        remote_file,
                                        overwrite=True)

        with _synced_local_file(torchscript_filename) as model_file:
            torch.jit.save(script_model, model_file, _extra_files=_extra_files)

        with _synced_local_file("data.pth") as data_file:
            torch.save(inputs, data_file)

        if mobile_optimization is not None:
            logger.info("Applying optimize_for_mobile ...")
            liteopt_model = optimize_for_mobile(
                script_model,
                optimization_blocklist=mobile_optimization.
                optimization_blocklist,
                preserved_methods=mobile_optimization.preserved_methods,
                backend=mobile_optimization.backend,
            )
            torchscript_filename = mobile_optimization.torchscript_filename
            with _synced_local_file(torchscript_filename) as lite_path:
                liteopt_model._save_for_lite_interpreter(
                    lite_path, _extra_files=_extra_files)
            # liteopt_model(*inputs)  # sanity check
            op_names = torch.jit.export_opnames(liteopt_model)
            logger.info("Operator names from lite interpreter:\n{}".format(
                "\n".join(op_names)))

            logger.info("Applying augment_model_with_bundled_inputs ...")
            # make all tensors zero-like to save storage
            iters = recursive_iterate(inputs)
            for x in iters:
                if isinstance(x, torch.Tensor):
                    iters.send(torch.zeros_like(x).contiguous())
            inputs = iters.value
            augment_model_with_bundled_inputs(liteopt_model, [inputs])
            liteopt_model(
                *liteopt_model.get_all_bundled_inputs()[0])  # sanity check
            name, ext = os.path.splitext(torchscript_filename)
            with _synced_local_file(name + "_bundled" + ext) as lite_path:
                liteopt_model._save_for_lite_interpreter(lite_path)

        return torchscript_filename
Exemplo n.º 24
0
def maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self):
    with make_temp_directory("export_demo") as tmp_dir:
        # use a fake dataset for ci
        dataset_name = create_local_dataset(tmp_dir, 5, 224, 224)
        config_list = [
            "DATASETS.TRAIN",
            (dataset_name, ),
            "DATASETS.TEST",
            (dataset_name, ),
        ]
        # START_WIKI_EXAMPLE_TAG
        runner = GeneralizedRCNNRunner()
        cfg = runner.get_default_cfg()
        cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
        cfg.merge_from_list(get_quick_test_config_opts())
        cfg.merge_from_list(config_list)

        # equivalent to running:
        #   exporter.par --runner GeneralizedRCNNRunner --config-file config.yaml --predictor-types torchscript tourchscript@c2_ops --output-dir tmp_dir
        _ = main(
            cfg,
            tmp_dir,
            runner,
            predictor_types=["torchscript@c2_ops", "torchscript"],
        )

        # the path can be fetched from the return of main, here just use hard-coded values
        torchvision_ops_model = torch.jit.load(
            os.path.join(tmp_dir, "torchscript", "model.jit"))
        caffe2_ops_model = torch.jit.load(
            os.path.join(tmp_dir, "torchscript@c2_ops", "model.jit"))

        # Running inference using torchvision-style format
        image = torch.zeros(1, 64, 96)  # chw 3D tensor
        # The exported model can run on both cpu/gpu
        device = "cuda:0" if torch.cuda.is_available() else "cpu"
        torchvision_ops_model = torchvision_ops_model.to(device)
        torchvision_style_outputs = torchvision_ops_model(
            image)  # suppose N instances are detected
        # NOTE: the output are flattened tensors of the real output (which is a dict), they're
        # ordered by the key in dict, which is deterministic for the given model, but it might
        # be difficult to figure out just from model.jit file. The predictor_info.json from
        # the same directory contains the `outputs_schema`, which indicate how the final output
        # is constructed from flattened tensors.
        (
            pred_boxes,  # torch.Size([N, 4])
            pred_classes,  # torch.Size([N])
            pred_masks,  # torch.Size([N, 1, Hmask, Wmask])
            scores,  # torch.Size([N])
            image_sizes,  # torch.Size([2])
        ) = torchvision_style_outputs
        self.assertTrue(
            all(x.device == torch.device(device)
                for x in torchvision_style_outputs[:4]),
            torchvision_style_outputs,
        )
        torch.testing.assert_close(image_sizes, torch.tensor([64, 96]))

        # Running inference using caffe2-style format
        data = torch.zeros(1, 1, 64, 96)
        im_info = torch.tensor([[64, 96, 1.0]])
        caffe2_style_outputs = caffe2_ops_model([data, im_info])
        # NOTE: the output order is determined in the order of creating the tensor during
        # forward function, it's also follow the order of original Caffe2 model.
        roi_bbox_nms = caffe2_style_outputs[0]  # torch.Size([N, 4])
        roi_score_nms = caffe2_style_outputs[1]  # torch.Size([N])
        roi_class_nms = caffe2_style_outputs[2]  # torch.Size([N])
        mask_fcn_probs = caffe2_style_outputs[
            3]  # torch.Size([N, Cmask, Hmask, Wmask])

        # relations between torchvision-style outputs and caffe2-style outputs
        torch.testing.assert_close(pred_boxes,
                                   roi_bbox_nms,
                                   check_device=False)
        torch.testing.assert_close(pred_classes,
                                   roi_class_nms.to(torch.int64),
                                   check_device=False)
        torch.testing.assert_close(
            pred_masks,
            mask_fcn_probs[:, roi_class_nms.to(torch.int64), :, :],
            check_device=False,
        )
        torch.testing.assert_close(scores, roi_score_nms, check_device=False)