示例#1
0
def export_tracing(torch_model, inputs):
    assert TORCH_VERSION >= (1, 8)
    # RetinaNet is supported but needs a slightly different wrapper.
    # TODO wrapper should be automatically generated
    assert isinstance(torch_model, GeneralizedRCNN)
    image = inputs[0]["image"]

    class WrapModel(nn.Module):
        def __init__(self):
            super().__init__()
            self.torch_model = torch_model

        def forward(self, image):
            inputs = [{"image": image}]
            outputs = self.torch_model.inference(inputs,
                                                 do_postprocess=False)[0]
            outputs = outputs.get_fields()
            from detectron2.utils.analysis import _flatten_to_tuple

            return _flatten_to_tuple(outputs)

    from detectron2.export.torchscript_patch import patch_builtin_len

    with torch.no_grad(), patch_builtin_len():
        assert (args.format == "torchscript"
                ), "Tracing method only supports torchscript format for now."
        ts_model = torch.jit.trace(WrapModel(), (image, ))
        ts_model.save(os.path.join(args.output, "model.ts"))
        dump_torchscript_IR(ts_model, args.output)
        # NOTE onnx export fails in pytorch
        # if args.format == "onnx":
        #     torch.onnx.export(WrapModel(), (image,), os.path.join(args.output, "model.onnx"))

    # TODO inference in Python now missing postprocessing glue code
    return None
    def test_dump_IR_tracing(self):
        cfg = get_cfg()
        cfg.MODEL.RESNETS.DEPTH = 18
        cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64

        class Mod(nn.Module):
            def forward(self, x):
                return tuple(self.m(x).values())

        model = Mod()
        model.m = build_backbone(cfg)
        model.eval()

        with torch.no_grad():
            ts_model = torch.jit.trace(model, (torch.rand(2, 3, 224, 224), ))

        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            dump_torchscript_IR(ts_model, d)
            # check that the files are created
            for name in [
                    "model_ts_code", "model_ts_IR", "model_ts_IR_inlined",
                    "model"
            ]:
                fname = os.path.join(d, name + ".txt")
                self.assertTrue(os.stat(fname).st_size > 0, fname)
示例#3
0
def export_tracing(torch_model, inputs):
    assert TORCH_VERSION >= (1, 8)
    image = inputs[0]["image"]
    inputs = [{"image": image}]  # remove other unused keys

    if isinstance(torch_model, GeneralizedRCNN):

        def inference(model, inputs):
            # use do_postprocess=False so it returns ROI mask
            inst = model.inference(inputs, do_postprocess=False)[0]
            return [{"instances": inst}]

    else:
        inference = None  # assume that we just call the model directly

    traceable_model = TracingAdapter(torch_model, inputs, inference)

    if args.format == "torchscript":
        ts_model = torch.jit.trace(traceable_model, (image, ))
        with PathManager.open(os.path.join(args.output, "model.ts"),
                              "wb") as f:
            torch.jit.save(ts_model, f)
        dump_torchscript_IR(ts_model, args.output)
    elif args.format == "onnx":
        # NOTE onnx export currently failing in pytorch
        with PathManager.open(os.path.join(args.output, "model.onnx"),
                              "wb") as f:
            torch.onnx.export(traceable_model, (image, ), f)
    logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
    logger.info("Outputs schema: " + str(traceable_model.outputs_schema))

    if args.format != "torchscript":
        return None
    if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)):
        return None

    def eval_wrapper(inputs):
        """
        The exported model does not contain the final resize step, which is typically
        unused in deployment but needed for evaluation. We add it manually here.
        """
        input = inputs[0]
        instances = traceable_model.outputs_schema(ts_model(
            input["image"]))[0]["instances"]
        postprocessed = detector_postprocess(instances, input["height"],
                                             input["width"])
        return [{"instances": postprocessed}]

    return eval_wrapper
示例#4
0
def export_caffe2_tracing(cfg, torch_model, inputs):
    tracer = Caffe2Tracer(cfg, torch_model, inputs)
    if args.format == "caffe2":
        caffe2_model = tracer.export_caffe2()
        caffe2_model.save_protobuf(args.output)
        # draw the caffe2 graph
        caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs)
        return caffe2_model
    elif args.format == "onnx":
        onnx_model = tracer.export_onnx()
        onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
    elif args.format == "torchscript":
        ts_model = tracer.export_torchscript()
        ts_model.save(os.path.join(args.output, "model.ts"))
        dump_torchscript_IR(ts_model, args.output)
示例#5
0
def export_scripting(torch_model):
    assert TORCH_VERSION >= (1, 8)
    fields = {
        "proposal_boxes": Boxes,
        "objectness_logits": Tensor,
        "pred_boxes": Boxes,
        "scores": Tensor,
        "pred_classes": Tensor,
        "pred_masks": Tensor,
        "pred_keypoints": torch.Tensor,
        "pred_keypoint_heatmaps": torch.Tensor,
    }
    # maybe can export to onnx format?
    assert args.format == "torchscript", "Scripting only supports torchscript format."
    ts_model = export_torchscript_with_instances(torch_model, fields)
    ts_model.save(os.path.join(args.output, "model.ts"))
    dump_torchscript_IR(ts_model, args.output)
    # TODO inference in Python now missing postprocessing glue code
    return None
示例#6
0
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    first_batch = next(iter(data_loader))

    # convert and save caffe2 model
    tracer = Caffe2Tracer(cfg, torch_model, first_batch)
    if args.format == "caffe2":
        caffe2_model = tracer.export_caffe2()
        caffe2_model.save_protobuf(args.output)
        # draw the caffe2 graph
        caffe2_model.save_graph(os.path.join(args.output, "model.svg"),
                                inputs=first_batch)
    elif args.format == "onnx":
        onnx_model = tracer.export_onnx()
        onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
    elif args.format == "torchscript":
        ts_model = tracer.export_torchscript()
        ts_model.save(os.path.join(args.output, "model.ts"))
        from detectron2.export.torchscript import dump_torchscript_IR

        dump_torchscript_IR(ts_model, args.output)

    # run evaluation with the converted model
    if args.run_eval:
        assert args.format == "caffe2", "Python inference in other format is not yet supported."
        dataset = cfg.DATASETS.TEST[0]
        data_loader = build_detection_test_loader(cfg, dataset)
        # NOTE: hard-coded evaluator. change to the evaluator for your dataset
        evaluator = COCOEvaluator(dataset, cfg, True, args.output)
        metrics = inference_on_dataset(caffe2_model, data_loader, evaluator)
        print_csv_format(metrics)