def test_dump_IR_tracing(self):
        cfg = get_cfg()
        cfg.MODEL.RESNETS.DEPTH = 18
        cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64

        class Mod(nn.Module):
            def forward(self, x):
                return tuple(self.m(x).values())

        model = Mod()
        model.m = build_backbone(cfg)
        model.eval()

        with torch.no_grad():
            ts_model = torch.jit.trace(model, (torch.rand(2, 3, 224, 224), ))

        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            dump_torchscript_IR(ts_model, d)
            # check that the files are created
            for name in [
                    "model_ts_code", "model_ts_IR", "model_ts_IR_inlined",
                    "model"
            ]:
                fname = os.path.join(d, name + ".txt")
                self.assertTrue(os.stat(fname).st_size > 0, fname)
示例#2
0
def export_tracing(torch_model, data_loader):
    inputs = next(iter(data_loader))
    images = torch_model.preprocess_image(inputs)
    images_tensor = images.tensor   # If you want Run with FP16, 'images.tensor.half()' instead

    if isinstance(torch_model, GeneralizedRCNN):

        def inference(model, inputs):
            # use do_postprocess=False so it returns ROI mask
            inst = model.inference(inputs, do_postprocess=False)[0]
            return [{"instances": inst}]

    else:
        inference = None  # assume that we just call the model directly

    traceable_model = TracingAdapter(torch_model, images_tensor, inference)

    ts_model = torch.jit.trace(traceable_model, (images_tensor,))
    with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
        torch.jit.save(ts_model, f)
    dump_torchscript_IR(ts_model, args.output)

    logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
    logger.info("Outputs schema: " + str(traceable_model.outputs_schema))

    return ts_model
示例#3
0
    def test_dump_IR_function(self):
        @torch.jit.script
        def gunc(x, y):
            return x + y

        def func(x, y):
            return x + y + gunc(x, y)

        ts_model = torch.jit.trace(func, (torch.rand(3), torch.rand(3)))
        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            dump_torchscript_IR(ts_model, d)
            for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined"]:
                fname = os.path.join(d, name + ".txt")
                self.assertTrue(os.stat(fname).st_size > 0, fname)
示例#4
0
def export_tracing(torch_model, inputs):
    assert TORCH_VERSION >= (1, 8)
    image = inputs[0]["image"]
    inputs = [{"image": image}]  # remove other unused keys

    if isinstance(torch_model, GeneralizedRCNN):

        def inference(model, inputs):
            # use do_postprocess=False so it returns ROI mask
            inst = model.inference(inputs, do_postprocess=False)[0]
            return [{"instances": inst}]

    else:
        inference = None  # assume that we just call the model directly

    traceable_model = TracingAdapter(torch_model, inputs, inference)

    if args.format == "torchscript":
        ts_model = torch.jit.trace(traceable_model, (image, ))
        with PathManager.open(os.path.join(args.output, "model.ts"),
                              "wb") as f:
            torch.jit.save(ts_model, f)
        dump_torchscript_IR(ts_model, args.output)
    elif args.format == "onnx":
        # NOTE onnx export currently failing in pytorch
        with PathManager.open(os.path.join(args.output, "model.onnx"),
                              "wb") as f:
            torch.onnx.export(traceable_model, (image, ), f, opset_version=11)
    logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
    logger.info("Outputs schema: " + str(traceable_model.outputs_schema))

    if args.format != "torchscript":
        return None
    if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)):
        return None

    def eval_wrapper(inputs):
        """
        The exported model does not contain the final resize step, which is typically
        unused in deployment but needed for evaluation. We add it manually here.
        """
        input = inputs[0]
        instances = traceable_model.outputs_schema(ts_model(
            input["image"]))[0]["instances"]
        postprocessed = detector_postprocess(instances, input["height"],
                                             input["width"])
        return [{"instances": postprocessed}]

    return eval_wrapper
示例#5
0
def export_scripting(torch_model):
    assert TORCH_VERSION >= (1, 8)
    fields = {
        "proposal_boxes": Boxes,
        "objectness_logits": Tensor,
        "pred_boxes": Boxes,
        "scores": Tensor,
        "pred_classes": Tensor,
        "pred_masks": Tensor,
        "pred_keypoints": torch.Tensor,
        "pred_keypoint_heatmaps": torch.Tensor,
    }
    assert args.format == "torchscript", "Scripting only supports torchscript format."

    class ScriptableAdapterBase(nn.Module):
        # Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944
        # by not retuning instances but dicts. Otherwise the exported model is not deployable
        def __init__(self):
            super().__init__()
            self.model = torch_model
            self.eval()

    if isinstance(torch_model, GeneralizedRCNN):

        class ScriptableAdapter(ScriptableAdapterBase):
            def forward(
                self,
                inputs: Tuple[Dict[str,
                                   torch.Tensor]]) -> List[Dict[str, Tensor]]:
                instances = self.model.inference(inputs, do_postprocess=False)
                return [i.get_fields() for i in instances]

    else:

        class ScriptableAdapter(ScriptableAdapterBase):
            def forward(
                self,
                inputs: Tuple[Dict[str,
                                   torch.Tensor]]) -> List[Dict[str, Tensor]]:
                instances = self.model(inputs)
                return [i.get_fields() for i in instances]

    ts_model = scripting_with_instances(ScriptableAdapter(), fields)
    with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
        torch.jit.save(ts_model, f)
    dump_torchscript_IR(ts_model, args.output)
    # TODO inference in Python now missing postprocessing glue code
    return None
示例#6
0
def export_caffe2_tracing(cfg, torch_model, inputs):
    tracer = Caffe2Tracer(cfg, torch_model, inputs)
    if args.format == "caffe2":
        caffe2_model = tracer.export_caffe2()
        caffe2_model.save_protobuf(args.output)
        # draw the caffe2 graph
        caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs)
        return caffe2_model
    elif args.format == "onnx":
        onnx_model = tracer.export_onnx()
        onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
    elif args.format == "torchscript":
        ts_model = tracer.export_torchscript()
        with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
            torch.jit.save(ts_model, f)
        dump_torchscript_IR(ts_model, args.output)
示例#7
0
def export_scripting(torch_model):
    assert TORCH_VERSION >= (1, 8)
    fields = {
        "proposal_boxes": Boxes,
        "objectness_logits": Tensor,
        "pred_boxes": Boxes,
        "scores": Tensor,
        "pred_classes": Tensor,
        "pred_masks": Tensor,
        "pred_keypoints": torch.Tensor,
        "pred_keypoint_heatmaps": torch.Tensor,
    }
    assert args.format == "torchscript", "Scripting only supports torchscript format."
    ts_model = scripting_with_instances(torch_model, fields)
    with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
        torch.jit.save(ts_model, f)
    dump_torchscript_IR(ts_model, args.output)
    # TODO inference in Python now missing postprocessing glue code
    return None
示例#8
0
def export_optimize_and_save_torchscript(
    model: nn.Module,
    inputs: Optional[Tuple[Any]],
    output_path: str,
    *,
    jit_mode: Optional[str] = DEFAULT_JIT_MODE,
    torchscript_filename: str = "model.jit",
    mobile_optimization: Optional[MobileOptimizationConfig] = None,
    _extra_files: Optional[Dict[str, bytes]] = None,
) -> str:
    """
    The primary function for exporting PyTorch model to TorchScript.

    Args:
        model (nn.Module): the model to export. When given a ScriptModule, skip the export
            and only optimize and save model.
        inputs (tuple or None): input arguments of model, can be called as model(*inputs).
            Will not be used when scripting the model.
        output_path (str): directory that the model will be saved.
        jit_mode (str): trace/script or None if the model is already a ScriptModule.
        torchscript_filename (str): the filename of non-mobile-optimized model.
        mobile_optimization (MobileOptimizationConfig): when provided, the mobile optimization
            will be applied.
        _extra_files (Dict[str, bytes]): when provided, extra files will be saved.

    Returns:
        (str): filename of the final model no matter optmized or not.
    """

    logger.info("Export, optimize and saving TorchScript to {} ...".format(
        output_path))
    PathManager.mkdirs(output_path)
    if _extra_files is None:
        _extra_files = {}

    if isinstance(model, torch.jit.ScriptModule):
        if jit_mode is not None:
            logger.info(
                "The input model is already a ScriptModule, skip the jit step")
    elif jit_mode == "trace":
        logger.info("Tracing the model ...")
        with torch.no_grad():
            script_model = torch.jit.trace(model, inputs)
    elif jit_mode == "script":
        logger.info("Scripting the model ...")
        script_model = torch.jit.script(model)
    else:
        raise ValueError("Unsupported jit_mode: {}".format(jit_mode))

    with make_temp_directory(
            "export_optimize_and_save_torchscript") as tmp_dir:

        @contextlib.contextmanager
        def _synced_local_file(rel_path):
            remote_file = os.path.join(output_path, rel_path)
            local_file = os.path.join(tmp_dir, rel_path)
            yield local_file
            PathManager.copy_from_local(local_file,
                                        remote_file,
                                        overwrite=True)

        with _synced_local_file(torchscript_filename) as model_file:
            logger.info(f"Saving torchscript model to: {torchscript_filename}")
            torch.jit.save(script_model, model_file, _extra_files=_extra_files)
        dump_torchscript_IR(script_model,
                            os.path.join(output_path, "torchscript_IR"))

        data_filename = "data.pth"
        with _synced_local_file(data_filename) as data_file:
            logger.info(f"Saving example data to: {data_filename}")
            torch.save(inputs, data_file)

        if mobile_optimization is not None:
            logger.info("Applying optimize_for_mobile ...")
            liteopt_model = optimize_for_mobile(
                script_model,
                optimization_blocklist=mobile_optimization.
                optimization_blocklist,
                preserved_methods=mobile_optimization.preserved_methods,
                backend=mobile_optimization.backend,
            )
            torchscript_filename = mobile_optimization.torchscript_filename
            with _synced_local_file(torchscript_filename) as lite_path:
                logger.info(
                    f"Saving mobile optimized model to: {torchscript_filename}"
                )
                liteopt_model._save_for_lite_interpreter(
                    lite_path, _extra_files=_extra_files)

            op_names = torch.jit.export_opnames(liteopt_model)
            logger.info("Operator names from lite interpreter:\n{}".format(
                "\n".join(op_names)))

            logger.info("Applying augment_model_with_bundled_inputs ...")
            # make all tensors zero-like to save storage
            iters = recursive_iterate(inputs)
            for x in iters:
                if isinstance(x, torch.Tensor):
                    iters.send(torch.zeros_like(x).contiguous())
            inputs = iters.value
            augment_model_with_bundled_inputs(liteopt_model, [inputs])

            # For non-cpu backends (e.g. Metal, Vulkan) the bundled inputs need to be
            # converted with `torch.to(<myDevice>)` in order to predict successfully
            # This is a temporary bypass until PT Edge supports automatic backend
            # conversion in the bundled inputs interface, or we can auto-add a input tensor
            # conversion op to Metal and Vulkan models.
            target_backend = mobile_optimization.backend.lower()
            if target_backend == "cpu":
                # Sanity check by running
                logger.info(
                    "Running sanity check for the mobile optimized model ...")
                liteopt_model(*liteopt_model.get_all_bundled_inputs()[0])
            name, ext = os.path.splitext(torchscript_filename)
            input_bundled_path = name + "_bundled" + ext
            with _synced_local_file(input_bundled_path) as lite_path:
                logger.info(
                    f"Saving input bundled model to: {input_bundled_path}")
                liteopt_model._save_for_lite_interpreter(lite_path)

        return torchscript_filename