Пример #1
0
Файл: api.py Проект: yeonh2/d2go
def _export_single_model(
    predictor_path,
    model,
    input_args,
    save_path,
    model_export_method,
    model_export_kwargs,
):
    assert isinstance(model, nn.Module), model
    # model_export_method either inherits ModelExportMethod or is a key in the registry
    model_export_method_str = None
    if isinstance(model_export_method, str):
        model_export_method_str = model_export_method
        model_export_method = ModelExportMethodRegistry.get(model_export_method)
    assert issubclass(model_export_method, ModelExportMethod), model_export_method

    load_kwargs = model_export_method.export(
        model=model,
        input_args=input_args,
        save_path=save_path,
        export_method=model_export_method_str,
        **model_export_kwargs,
    )
    assert isinstance(load_kwargs, dict)
    model_rel_path = os.path.relpath(save_path, predictor_path)
    return ModelInfo(
        path=model_rel_path,
        export_method="{}.{}".format(
            model_export_method.__module__, model_export_method.__qualname__
        ),
        load_kwargs=load_kwargs,
    )
Пример #2
0
 def test_predictor_info(self):
     pinfo = PredictorInfo(model=ModelInfo(path="some_path",
                                           type="some_type"), )
     dic = pinfo.to_dict()
     another_pinfo = PredictorInfo.from_dict(dic)
     self.assertTrue(isinstance(another_pinfo.model, ModelInfo))
     self.assertEqual(another_pinfo.model.type, "some_type")
Пример #3
0
    def test_model_info(self):
        with make_temp_directory("test_model_info") as tmp_dir:
            _save_test_model(tmp_dir)
            model_info = ModelInfo(path=tmp_dir, type="torchscript")
            # NOTE: decide if load_model is a public API or class method of ModelInfo
            from mobile_cv.predictor.model_wrappers import load_model

            model = load_model(model_info, model_root="")
            self.assertEqual(torch.tensor(2), model(torch.tensor(1)))
Пример #4
0
def default_export_predictor(cfg, pytorch_model, predictor_type, output_dir,
                             data_loader):
    # The default implementation acts based on the PredictorExportConfig returned by
    # calling "prepare_for_export". It'll export all sub models in standard way
    # according to the "predictor_type".
    assert hasattr(pytorch_model, "prepare_for_export"), pytorch_model
    inputs = next(iter(data_loader))
    export_config = pytorch_model.prepare_for_export(
        cfg, inputs, export_scheme=predictor_type)

    predictor_path = os.path.join(output_dir, predictor_type)
    PathManager.mkdirs(predictor_path)

    # TODO: also support multiple models from nested dict in the default implementation
    assert isinstance(export_config.model,
                      nn.Module), "Currently support single model"
    model = export_config.model
    input_args = (export_config.data_generator(inputs)
                  if export_config.data_generator is not None else None)
    model_export_kwargs = export_config.model_export_kwargs or {}
    # the default implementation assumes model type is the same as the predictor type
    model_type = predictor_type
    model_path = predictor_path  # maye be sub dir for multipe models

    standard_model_export(
        model,
        model_type=model_type,
        save_path=model_path,
        input_args=input_args,
        **model_export_kwargs,
    )
    model_rel_path = os.path.relpath(model_path, predictor_path)

    # assemble predictor
    predictor_info = PredictorInfo(
        model=ModelInfo(path=model_rel_path, type=model_type),
        preprocess_info=export_config.preprocess_info,
        postprocess_info=export_config.postprocess_info,
        run_func_info=export_config.run_func_info,
    )
    with PathManager.open(os.path.join(predictor_path, "predictor_info.json"),
                          "w") as f:
        json.dump(predictor_info.to_dict(), f, indent=4)

    return predictor_path
Пример #5
0
    def test_create_predictor(self):
        with make_temp_directory("test_model_info") as tmp_dir:
            # define the predictor
            model_a_path = os.path.join(tmp_dir, "model_A")
            predictor_info = PredictorInfo(
                model=ModelInfo(path=model_a_path, type="torchscript"),
                preprocess_info=FuncInfo.gen_func_info(TestPreprocess,
                                                       params={"weight": 2.0}),
            )

            # simulating exporting to predictor
            _save_test_model(model_a_path)
            with open(os.path.join(tmp_dir, "predictor_info.json"), "w") as f:
                json.dump(predictor_info.to_dict(), f)

            predictor = create_predictor(tmp_dir)
            # y = (x * 2) + 1
            self.assertEqual(torch.tensor(5), predictor(torch.tensor(2)))
Пример #6
0
def _export_single_model(
        predictor_path,
        model,
        input_args,
        save_path,
        model_export_method,
        model_export_kwargs,
        predictor_type,  # TODO: remove this after refactoring ModelInfo
):
    assert isinstance(model, nn.Module), model
    load_kwargs = ModelExportMethodRegistry.get(model_export_method).export(
        model=model,
        input_args=input_args,
        save_path=save_path,
        **model_export_kwargs,
    )
    assert isinstance(load_kwargs, dict)  # TODO: save this in predictor_info
    model_rel_path = os.path.relpath(save_path, predictor_path)
    return ModelInfo(path=model_rel_path, type=predictor_type)