Exemple #1
0
def default_export_predictor(cfg, pytorch_model, predictor_type, output_dir,
                             data_loader):
    # The default implementation acts based on the PredictorExportConfig returned by
    # calling "prepare_for_export". It'll export all sub models in standard way
    # according to the "predictor_type".
    assert hasattr(pytorch_model, "prepare_for_export"), pytorch_model
    inputs = next(iter(data_loader))
    export_config = pytorch_model.prepare_for_export(cfg, inputs,
                                                     predictor_type)
    model_inputs = (export_config.data_generator(inputs) if
                    export_config.data_generator is not None else (inputs, ))

    predictor_path = os.path.join(output_dir, predictor_type)
    PathManager.mkdirs(predictor_path)

    predictor_init_kwargs = {
        "preprocess_info": export_config.preprocess_info,
        "postprocess_info": export_config.postprocess_info,
        "run_func_info": export_config.run_func_info,
    }

    if isinstance(export_config.model, dict):
        models_info = {}
        for name, model in export_config.model.items():
            save_path = os.path.join(predictor_path, name)
            model_info = _export_single_model(
                predictor_path=predictor_path,
                model=model,
                input_args=model_inputs[name]
                if model_inputs is not None else None,
                save_path=save_path,
                model_export_method=(
                    predictor_type if export_config.model_export_method is None
                    else export_config.model_export_method[name]),
                model_export_kwargs=(
                    {} if export_config.model_export_kwargs is None else
                    export_config.model_export_kwargs[name]),
            )
            models_info[name] = model_info
        predictor_init_kwargs["models"] = models_info
    else:
        save_path = predictor_path  # for single model exported files are put under `predictor_path` together with predictor_info.json
        model_info = _export_single_model(
            predictor_path=predictor_path,
            model=export_config.model,
            input_args=model_inputs,
            save_path=save_path,
            model_export_method=export_config.model_export_method
            or predictor_type,
            model_export_kwargs=export_config.model_export_kwargs or {},
        )
        predictor_init_kwargs["model"] = model_info

    # assemble predictor
    predictor_info = PredictorInfo(**predictor_init_kwargs)
    with PathManager.open(os.path.join(predictor_path, "predictor_info.json"),
                          "w") as f:
        json.dump(predictor_info.to_dict(), f, indent=4)

    return predictor_path
Exemple #2
0
 def test_predictor_info(self):
     pinfo = PredictorInfo(model=ModelInfo(path="some_path",
                                           type="some_type"), )
     dic = pinfo.to_dict()
     another_pinfo = PredictorInfo.from_dict(dic)
     self.assertTrue(isinstance(another_pinfo.model, ModelInfo))
     self.assertEqual(another_pinfo.model.type, "some_type")
Exemple #3
0
def default_export_predictor(cfg, pytorch_model, predictor_type, output_dir,
                             data_loader):
    # The default implementation acts based on the PredictorExportConfig returned by
    # calling "prepare_for_export". It'll export all sub models in standard way
    # according to the "predictor_type".
    assert hasattr(pytorch_model, "prepare_for_export"), pytorch_model
    inputs = next(iter(data_loader))
    export_config = pytorch_model.prepare_for_export(
        cfg, inputs, export_scheme=predictor_type)

    predictor_path = os.path.join(output_dir, predictor_type)
    PathManager.mkdirs(predictor_path)

    # TODO: also support multiple models from nested dict in the default implementation
    assert isinstance(export_config.model,
                      nn.Module), "Currently support single model"
    model = export_config.model
    input_args = (export_config.data_generator(inputs)
                  if export_config.data_generator is not None else None)
    model_export_kwargs = export_config.model_export_kwargs or {}
    # the default implementation assumes model type is the same as the predictor type
    model_type = predictor_type
    model_path = predictor_path  # maye be sub dir for multipe models

    standard_model_export(
        model,
        model_type=model_type,
        save_path=model_path,
        input_args=input_args,
        **model_export_kwargs,
    )
    model_rel_path = os.path.relpath(model_path, predictor_path)

    # assemble predictor
    predictor_info = PredictorInfo(
        model=ModelInfo(path=model_rel_path, type=model_type),
        preprocess_info=export_config.preprocess_info,
        postprocess_info=export_config.postprocess_info,
        run_func_info=export_config.run_func_info,
    )
    with PathManager.open(os.path.join(predictor_path, "predictor_info.json"),
                          "w") as f:
        json.dump(predictor_info.to_dict(), f, indent=4)

    return predictor_path
Exemple #4
0
    def test_create_predictor(self):
        with make_temp_directory("test_model_info") as tmp_dir:
            # define the predictor
            model_a_path = os.path.join(tmp_dir, "model_A")
            predictor_info = PredictorInfo(
                model=ModelInfo(path=model_a_path, type="torchscript"),
                preprocess_info=FuncInfo.gen_func_info(TestPreprocess,
                                                       params={"weight": 2.0}),
            )

            # simulating exporting to predictor
            _save_test_model(model_a_path)
            with open(os.path.join(tmp_dir, "predictor_info.json"), "w") as f:
                json.dump(predictor_info.to_dict(), f)

            predictor = create_predictor(tmp_dir)
            # y = (x * 2) + 1
            self.assertEqual(torch.tensor(5), predictor(torch.tensor(2)))