Exemplo n.º 1
0
        def _test_export(self, predictor_type, compare_match=True):
            size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
            h, w = size_divisibility, size_divisibility * 2
            with create_fake_detection_data_loader(h, w, is_train=False) as data_loader:
                inputs = next(iter(data_loader))

                with make_temp_directory(
                    "test_export_{}".format(predictor_type)
                ) as tmp_dir:
                    # TODO: the export may change model it self, need to fix this
                    model_to_export = copy.deepcopy(self.test_model)
                    predictor_path = convert_and_export_predictor(
                        self.cfg, model_to_export, predictor_type, tmp_dir, data_loader
                    )

                    predictor = create_predictor(predictor_path)
                    predicotr_outputs = predictor(inputs)
                    _validate_outputs(inputs, predicotr_outputs)

                    if compare_match:
                        with torch.no_grad():
                            pytorch_outputs = self.test_model(inputs)

                        assert_instances_allclose(
                            predicotr_outputs[0]["instances"],
                            pytorch_outputs[0]["instances"],
                        )
Exemplo n.º 2
0
def test_export_torchvision_format():
    cfg_name = 'faster_rcnn_fbnetv3a_dsmask_C4.yaml'
    pytorch_model = model_zoo.get(cfg_name, trained=True)

    from typing import List, Dict

    class Wrapper(torch.nn.Module):
        def __init__(self, model):
            super().__init__()
            self.model = model
            coco_idx_list = [
                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19,
                20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38,
                39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
                56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75,
                76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91
            ]

            self.coco_idx = torch.tensor(coco_idx_list)

        def forward(self, inputs: List[torch.Tensor]):
            x = inputs[0].unsqueeze(0) * 255
            scale = 320.0 / min(x.shape[-2], x.shape[-1])
            x = torch.nn.functional.interpolate(x,
                                                scale_factor=scale,
                                                mode="bilinear",
                                                align_corners=True,
                                                recompute_scale_factor=True)
            out = self.model(x[0])
            res: Dict[str, torch.Tensor] = {}
            res["boxes"] = out[0] / scale
            res["labels"] = torch.index_select(self.coco_idx, 0, out[1])
            res["scores"] = out[2]
            return inputs, [res]

    size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
    h, w = size_divisibility, size_divisibility * 2

    runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
    cfg = model_zoo.get_config(cfg_name)
    datasets = list(cfg.DATASETS.TRAIN)

    data_loader = runner.build_detection_test_loader(cfg, datasets)

    predictor_path = convert_and_export_predictor(
        cfg,
        copy.deepcopy(pytorch_model),
        "torchscript_int8@tracing",
        './',
        data_loader,
    )

    orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit"))
    wrapped_model = Wrapper(orig_model)
    # optionally do a forward
    wrapped_model([torch.rand(3, 600, 600)])
    scripted_model = torch.jit.script(wrapped_model)
    optimized_model = optimize_for_mobile(scripted_model)
    optimized_model.save("D2Go/d2go_optimized.pt")
Exemplo n.º 3
0
    def test_export_torchvision_format(self):
        runner = GeneralizedRCNNRunner()
        cfg = runner.get_default_cfg()
        cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
        cfg.merge_from_list(get_quick_test_config_opts())

        cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
        pytorch_model = runner.build_model(cfg, eval_only=True)

        from typing import List, Dict

        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model

            def forward(self, inputs: List[torch.Tensor]):
                x = inputs[0].unsqueeze(0) * 255
                scale = 320.0 / min(x.shape[-2], x.shape[-1])
                x = torch.nn.functional.interpolate(
                    x,
                    scale_factor=scale,
                    mode="bilinear",
                    align_corners=True,
                    recompute_scale_factor=True,
                )
                out = self.model(x[0])
                res: Dict[str, torch.Tensor] = {}
                res["boxes"] = out[0] / scale
                res["labels"] = out[2]
                res["scores"] = out[1]
                return inputs, [res]

        size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
        h, w = size_divisibility, size_divisibility * 2
        with create_fake_detection_data_loader(h, w,
                                               is_train=False) as data_loader:
            with make_temp_directory(
                    "test_export_torchvision_format") as tmp_dir:
                predictor_path = convert_and_export_predictor(
                    cfg,
                    copy.deepcopy(pytorch_model),
                    "torchscript@tracing",
                    tmp_dir,
                    data_loader,
                )

                orig_model = torch.jit.load(
                    os.path.join(predictor_path, "model.jit"))
                wrapped_model = Wrapper(orig_model)
                # optionally do a forward
                wrapped_model([torch.rand(3, 600, 600)])
                scripted_model = torch.jit.script(wrapped_model)
                scripted_model.save(os.path.join(tmp_dir, "new_file.pt"))
Exemplo n.º 4
0
def main(
    cfg,
    output_dir,
    runner,
    # binary specific optional arguments
    predictor_types: typing.List[str],
    compare_accuracy: bool = False,
    skip_if_fail: bool = False,
    inference_config: Optional[CfgNode] = None,
):
    cfg = copy.deepcopy(cfg)
    setup_after_launch(cfg, output_dir, runner)

    with temp_defrost(cfg):
        cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
    model = runner.build_model(cfg, eval_only=True)

    # NOTE: train dataset is used to avoid leakage since the data might be used for
    # running calibration for quantization. test_loader is used to make sure it follows
    # the inference behaviour (augmentation will not be applied).
    datasets = cfg.DATASETS.TRAIN[0]
    data_loader = runner.build_detection_test_loader(cfg, datasets)

    logger.info("Running the pytorch model and print FLOPS ...")
    first_batch = next(iter(data_loader))
    input_args = (first_batch, )
    flops_utils.print_model_flops(model, input_args)

    predictor_paths: typing.Dict[str, str] = {}
    for typ in predictor_types:
        # convert_and_export_predictor might alter the model, copy before calling it
        pytorch_model = copy.deepcopy(model)
        try:
            predictor_path = convert_and_export_predictor(
                cfg, pytorch_model, typ, output_dir, data_loader)
            logger.info(
                f"Predictor type {typ} has been exported to {predictor_path}")
            predictor_paths[typ] = predictor_path
        except Exception as e:
            logger.warning(f"Export {typ} predictor failed: {e}")
            if not skip_if_fail:
                raise e

    _add_inference_config(predictor_paths, inference_config)

    ret = {"predictor_paths": predictor_paths, "accuracy_comparison": {}}
    if compare_accuracy:
        raise NotImplementedError()
        # NOTE: dict for metrics of all exported models (and original pytorch model)
        # ret["accuracy_comparison"] = accuracy_comparison

    return ret
Exemplo n.º 5
0
        def _test_export(self, predictor_type, compare_match=True):
            h, w = _get_input_dim(self.test_model)
            dl = _get_data_loader(h, w, False)
            inputs = next(iter(dl))

            output_dir = os.path.join(self.test_dir, "test_export")
            predictor_path = convert_and_export_predictor(
                self.cfg, self.test_model, predictor_type, output_dir, dl)

            predictor = create_predictor(predictor_path)
            predicotr_outputs = predictor(inputs)
            self.assertEqual(len(predicotr_outputs), len(inputs))

            with torch.no_grad():
                pytorch_outputs = self.test_model(inputs)
                self.assertEqual(len(pytorch_outputs), len(inputs))

            if compare_match:
                for predictor_output, pytorch_output in zip(
                        predicotr_outputs, pytorch_outputs):
                    torch.testing.assert_allclose(predictor_output["sem_seg"],
                                                  pytorch_output["sem_seg"])
Exemplo n.º 6
0
cfg.merge_from_file(r"/content/config1.yml")
cfg.QUANTIZATION.BACKEND = 'qnnpack'
runner = GeneralizedRCNNRunner()
model = runner.build_model(cfg, eval_only=True)
model.cpu()

datasets = cfg.DATASETS.TRAIN[0]

cfg.QUANTIZATION.BACKEND = 'qnnpack'
data_loader = runner.build_detection_test_loader(cfg, datasets)


predictor_path = convert_and_export_predictor(
  copy.deepcopy(cfg),
  copy.deepcopy(model),
  "torchscript_int8@tracing",
  './',
  data_loader
)

# recover the logging level
logging.disable(previous_level)

f = open('config2.yml', 'w')
f.write(cfg.dump())
f.close()

#prediction with int8 model
import detectron2
import cv2
import os