Exemplo n.º 1
0
class PredictorExportConfig(NamedTuple):
    """
    Storing information for exporting a predictor.

    Args:
        model (any nested iterable structure of nn.Module): the model(s) to be exported
            (via tracing/onnx or scripting). This can be sub-model(s) when the predictor
            consists of multiple models in deployable format, and/or pre/post processing
            is excluded due to requirement of tracing or hardware incompatibility.
        data_generator (Callable): a function to generate all data needed for tracing,
            such that data = data_generator(x), the returned data has the same nested
            structure as model. The data for each model will be treated as positional
            arguments, i.e. model(*data).
        model_export_kwargs (Dict): additional kwargs when exporting each sub-model, it
            follows the same nested structure as the model, and may contains information
            such as scriptable.

        preprocess_info (FuncInfo): info for predictor's preprocess
        postprocess_info (FuncInfo): info for predictor's postprocess
        run_func_info (FuncInfo): info for predictor's run_fun
    """

    model: Union[nn.Module, Dict[str, nn.Module]]
    data_generator: Optional[Callable] = None
    model_export_method: Optional[Union[str, Dict[str, str]]] = None
    model_export_kwargs: Optional[Union[Dict, Dict[str, Dict]]] = None

    preprocess_info: FuncInfo = FuncInfo.gen_func_info(IdentityPreprocess, params={})
    postprocess_info: FuncInfo = FuncInfo.gen_func_info(IdentityPostprocess, params={})
    run_func_info: FuncInfo = FuncInfo.gen_func_info(NaiveRunFunc, params={})
Exemplo n.º 2
0
def default_rcnn_prepare_for_export(self, cfg, inputs, predictor_type):
    pytorch_model = self

    if ("@c2_ops" in predictor_type or "caffe2" in predictor_type
            or "onnx" in predictor_type):
        from detectron2.export.caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP

        C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[
            cfg.MODEL.META_ARCHITECTURE]
        c2_compatible_model = C2MetaArch(cfg, pytorch_model)

        preprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPreprocessFunc,
            params=D2Caffe2MetaArchPreprocessFunc.get_params(
                cfg, c2_compatible_model),
        )
        postprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPostprocessFunc,
            params=D2Caffe2MetaArchPostprocessFunc.get_params(
                cfg, c2_compatible_model),
        )

        preprocess_func = preprocess_info.instantiate()
        model_export_kwargs = {}
        if "torchscript" in predictor_type:
            model_export_kwargs["force_disable_tracing_adapter"] = True

        return PredictorExportConfig(
            model=c2_compatible_model,
            # Caffe2MetaArch takes a single tuple as input (which is the return of
            # preprocess_func), data_generator requires all positional args as a tuple.
            data_generator=lambda x: (preprocess_func(x), ),
            model_export_method=predictor_type.replace("@c2_ops", "", 1),
            model_export_kwargs=model_export_kwargs,
            preprocess_info=preprocess_info,
            postprocess_info=postprocess_info,
        )

    else:
        do_postprocess = cfg.RCNN_EXPORT.INCLUDE_POSTPROCESS
        preprocess_info = FuncInfo.gen_func_info(
            D2RCNNInferenceWrapper.Preprocess, params={})
        preprocess_func = preprocess_info.instantiate()
        return PredictorExportConfig(
            model=D2RCNNInferenceWrapper(
                pytorch_model,
                do_postprocess=do_postprocess,
            ),
            data_generator=lambda x: (preprocess_func(x), ),
            model_export_method=predictor_type,
            preprocess_info=preprocess_info,
            postprocess_info=FuncInfo.gen_func_info(
                D2RCNNInferenceWrapper.Postprocess,
                params={"detector_postprocess_done_in_model": do_postprocess},
            ),
        )
Exemplo n.º 3
0
    def prepare_for_export(self, cfg, inputs, predictor_type):
        preprocess_info = FuncInfo.gen_func_info(
            PreprocessFunc,
            params={
                "size_divisibility": self.backbone.size_divisibility,
                "device": str(self.device),
            },
        )
        postprocess_info = FuncInfo.gen_func_info(
            PostprocessFunc,
            params={},
        )

        preprocess_func = preprocess_info.instantiate()

        return PredictorExportConfig(
            model=ModelWrapper(self),
            data_generator=lambda x: (preprocess_func(x), ),
            preprocess_info=preprocess_info,
            postprocess_info=postprocess_info,
        )
Exemplo n.º 4
0
def default_rcnn_prepare_for_export(self, cfg, inputs, predictor_type):

    if "torchscript" in predictor_type and "@tracing" in predictor_type:
        return PredictorExportConfig(
            model=D2RCNNTracingWrapper(self),
            data_generator=D2RCNNTracingWrapper.generator_trace_inputs,
            run_func_info=FuncInfo.gen_func_info(D2RCNNTracingWrapper.RunFunc,
                                                 params={}),
        )

    if cfg.MODEL.META_ARCHITECTURE in META_ARCH_CAFFE2_EXPORT_TYPE_MAP:
        C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[
            cfg.MODEL.META_ARCHITECTURE]
        c2_compatible_model = C2MetaArch(cfg, self)

        preprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPreprocessFunc,
            params=D2Caffe2MetaArchPreprocessFunc.get_params(
                cfg, c2_compatible_model),
        )
        postprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPostprocessFunc,
            params=D2Caffe2MetaArchPostprocessFunc.get_params(
                cfg, c2_compatible_model),
        )

        preprocess_func = preprocess_info.instantiate()

        return PredictorExportConfig(
            model=c2_compatible_model,
            # Caffe2MetaArch takes a single tuple as input (which is the return of
            # preprocess_func), data_generator requires all positional args as a tuple.
            data_generator=lambda x: (preprocess_func(x), ),
            preprocess_info=preprocess_info,
            postprocess_info=postprocess_info,
        )

    raise NotImplementedError("Can't determine prepare_for_tracing!")
Exemplo n.º 5
0
    def test_create_predictor(self):
        with make_temp_directory("test_model_info") as tmp_dir:
            # define the predictor
            model_a_path = os.path.join(tmp_dir, "model_A")
            predictor_info = PredictorInfo(
                model=ModelInfo(path=model_a_path, type="torchscript"),
                preprocess_info=FuncInfo.gen_func_info(TestPreprocess,
                                                       params={"weight": 2.0}),
            )

            # simulating exporting to predictor
            _save_test_model(model_a_path)
            with open(os.path.join(tmp_dir, "predictor_info.json"), "w") as f:
                json.dump(predictor_info.to_dict(), f)

            predictor = create_predictor(tmp_dir)
            # y = (x * 2) + 1
            self.assertEqual(torch.tensor(5), predictor(torch.tensor(2)))
Exemplo n.º 6
0
def d2_meta_arch_prepare_for_export(self, cfg, inputs, predictor_type):

    if "torchscript" in predictor_type and "@tracing" in predictor_type:

        def inference_func(model, image):
            inputs = [{"image": image}]
            return model.inference(inputs, do_postprocess=False)[0]

        def data_generator(x):
            return (x[0]["image"],)

        image = data_generator(inputs)[0]
        wrapper = TracingAdapter(self, image, inference_func)
        wrapper.eval()

        # HACK: outputs_schema can only be obtained after running tracing, but
        # PredictorExportConfig requires a pre-defined postprocessing function, this
        # causes tracing to run twice.
        logger.info("tracing the model to get outputs_schema ...")
        with torch.no_grad(), patch_builtin_len():
            _ = torch.jit.trace(wrapper, (image,))
        outputs_schema_json = json.dumps(
            wrapper.outputs_schema, default=dataclass_object_dump
        )

        return PredictorExportConfig(
            model=wrapper,
            data_generator=data_generator,
            preprocess_info=FuncInfo.gen_func_info(
                D2TracingAdapterPreprocessFunc, params={}
            ),
            postprocess_info=FuncInfo.gen_func_info(
                D2TracingAdapterPostFunc,
                params={"outputs_schema_json": outputs_schema_json},
            ),
        )

    if cfg.MODEL.META_ARCHITECTURE in META_ARCH_CAFFE2_EXPORT_TYPE_MAP:
        C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
        c2_compatible_model = C2MetaArch(cfg, self)

        preprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPreprocessFunc,
            params=D2Caffe2MetaArchPreprocessFunc.get_params(cfg, c2_compatible_model),
        )
        postprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPostprocessFunc,
            params=D2Caffe2MetaArchPostprocessFunc.get_params(cfg, c2_compatible_model),
        )

        preprocess_func = preprocess_info.instantiate()

        return PredictorExportConfig(
            model=c2_compatible_model,
            # Caffe2MetaArch takes a single tuple as input (which is the return of
            # preprocess_func), data_generator requires all positional args as a tuple.
            data_generator=lambda x: (preprocess_func(x),),
            preprocess_info=preprocess_info,
            postprocess_info=postprocess_info,
        )

    raise NotImplementedError("Can't determine prepare_for_tracing!")
Exemplo n.º 7
0
 def test_func_info(self):
     test_preprocess_info = FuncInfo(name=f"{__name__}.TestPreprocess",
                                     params={"weight": 2})
     test_preprocess = test_preprocess_info.instantiate()
     self.assertEqual(4, test_preprocess(2))