Ejemplo n.º 1
0
def test_model_multiple_asset_load(working_dir, monkeypatch):
    monkeypatch.setenv("MODELKIT_ASSETS_DIR", working_dir)
    with open(os.path.join(working_dir, "something.txt"), "w") as f:
        f.write("OK")

    class SomeModel(Model):
        CONFIGURATIONS = {"a": {"asset": "something.txt"}}

        def _predict(self, item):
            return item

    class SomeModel2(Model):
        CONFIGURATIONS = {"b": {"asset": "something.txt"}}

        def _predict(self, item):
            return item

    fetched = 0

    def fake_fetch_asset(asset_spec, return_info=True):
        nonlocal fetched
        fetched += 1
        return {"path": os.path.join(working_dir, "something.txt")}

    lib = ModelLibrary(models=[SomeModel, SomeModel2],
                       settings={"lazy_loading": True})
    monkeypatch.setattr(lib.assets_manager, "fetch_asset", fake_fetch_asset)
    lib.preload()

    assert fetched == 1
Ejemplo n.º 2
0
def test_model_multiple_load():
    loaded = 0

    class SomeModel(Model):
        CONFIGURATIONS = {"a": {}}

        def _load(self):
            nonlocal loaded
            loaded += 1

        def _predict(self, item):
            return self.some_attribute

    class SomeModel2(Model):
        CONFIGURATIONS = {"b": {"model_dependencies": {"a"}}}

        def _load(self):
            self.some_attribute = "OK"

        def _predict(self, item):
            return self.some_attribute

    lib = ModelLibrary(models=[SomeModel, SomeModel2])
    lib.get("b")
    lib.get("a")
    assert loaded == 1
Ejemplo n.º 3
0
def test_compose_sync_async_generator_fail():
    class SomeAsyncModel(AsyncModel):
        CONFIGURATIONS = {"async_model": {}}

        async def _predict(self, item, **kwargs):
            await asyncio.sleep(0)
            return item

        async def close(self):
            await asyncio.sleep(0)

    class ComposedModel(Model):
        CONFIGURATIONS = {
            "composed_model": {
                "model_dependencies": {"async_model"}
            }
        }

        def _predict(self, item, **kwargs):
            # The following does not currently work, because AsyncToSync does not
            # seem to correctly wrap asynchronous generators
            for r in AsyncToSync(self.model_dependencies["async_model"].
                                 async_model.predict_gen)(iter((item, ))):
                break
            return r

    library = ModelLibrary(models=[SomeAsyncModel, ComposedModel])
    m = library.get("composed_model")
    assert isinstance(m.model_dependencies["async_model"], WrappedAsyncModel)
    with pytest.raises(TypeError):
        # raises
        # TypeError: object async_generator can't be used in 'await' expression
        assert m.predict({"hello": "world"}) == {"hello": "world"}

    library.close()
Ejemplo n.º 4
0
def test_rename_dependencies():
    class SomeModel(Model):
        CONFIGURATIONS = {"ok": {}}

        def _predict(self, item):
            return self.configuration_key

    class SomeModel2(Model):
        CONFIGURATIONS = {"boomer": {}}

        def _predict(self, item):
            return self.configuration_key

    class FinalModel(Model):
        CONFIGURATIONS = {
            "model_no_rename": {
                "model_dependencies": {"ok"},
            },
            "model_rename": {
                "model_dependencies": {
                    "ok": "boomer"
                },
            },
        }

        def _predict(self, item):
            return self.model_dependencies["ok"](item)

    lib = ModelLibrary(models=[SomeModel, SomeModel2, FinalModel])
    assert lib.get("model_no_rename")({}) == "ok"
    assert lib.get("model_rename")({}) == "boomer"
Ejemplo n.º 5
0
def test_modellibrary_no_models(monkeypatch):
    monkeypatch.setenv("modelkit_MODELS", "")
    p = ModelLibrary(models=None)
    assert p.configuration == {}
    assert p.required_models == {}

    with pytest.raises(errors.ModelsNotFound):
        # model does not exist
        p.get("some_model")
Ejemplo n.º 6
0
def test_override_asset():
    class TestModel(Model):
        def _load(self):
            pass

        def _predict(self, item, **kwargs):
            return self.asset_path

    class TestDepModel(Model):
        def _predict(self, item, **kwargs):
            return "dep" + self.asset_path

    config = {
        "some_asset":
        ModelConfiguration(
            model_type=TestModel,
            asset="asset/that/does/not/exist",
            model_dependencies={"dep_model"},
        ),
        "dep_model":
        ModelConfiguration(model_type=TestDepModel),
    }
    # The asset does not exist
    with pytest.raises(Exception):
        model_library = ModelLibrary(required_models=["some_asset"],
                                     configuration=config)

    # It does when overriden
    model_library = ModelLibrary(
        required_models={"some_asset": {
            "asset_path": "/the/path"
        }},
        configuration=config,
    )
    model = model_library.get("some_asset")
    assert "/the/path" == model({})

    # Dependent models are loaded properly
    model = model_library.get("dep_model")
    assert "dep" == model({})

    # Finally, it is possible to also specify
    # an asset for the dependent model
    config["dep_model"] = ModelConfiguration(model_type=TestDepModel,
                                             asset="cat/someasset")
    model_library = ModelLibrary(
        required_models={
            "some_asset": {
                "asset_path": "/the/path"
            },
            "dep_model": {
                "asset_path": "/the/dep/path"
            },
        },
        configuration=config,
    )
    # Dependent models are loaded properly
    model = model_library.get("dep_model")
    assert "dep/the/dep/path" == model({})
Ejemplo n.º 7
0
def test_model_sub_class(working_dir, monkeypatch):
    monkeypatch.setenv("MODELKIT_ASSETS_DIR", working_dir)
    with open(os.path.join(working_dir, "something.txt"), "w") as f:
        f.write("OK")

    class BaseAsset(Asset):
        def _load(self):
            assert self.asset_path

    class DerivedAsset(BaseAsset):
        CONFIGURATIONS = {"derived": {"asset": "something.txt"}}

        def _predict(self, item):
            return item

    # Abstract models are not loaded even when explicitly requesting them
    lib = ModelLibrary(models=[DerivedAsset, BaseAsset])
    lib.preload()
    assert ["derived"] == list(lib.models.keys())

    # Abstract models are ignored when walking through modules
    lib = ModelLibrary(models=testmodels)
    lib.preload()
    assert ["derived_asset",
            "derived_model"] == sorted(list(lib.models.keys()))
Ejemplo n.º 8
0
def test_deploy_tf_models(monkeypatch):
    class DummyTFModel(TensorflowModel):
        CONFIGURATIONS = {
            "dummy_tf_model": {
                "asset": "dummy_tf_model:0.0",
                "model_settings": {
                    "output_dtypes": {
                        "lambda": np.float32
                    },
                    "output_tensor_mapping": {
                        "lambda": "nothing"
                    },
                    "output_shapes": {
                        "lambda": (3, 2, 1)
                    },
                },
            }
        }

    with pytest.raises(ValueError):
        lib = ModelLibrary(models=[DummyTFModel],
                           settings={"lazy_loading": True})
        deploy_tf_models(lib, "remote", "remote")

    ref = testing.ReferenceText(
        os.path.join(TEST_DIR, "testdata", "tf_configs"))
    with tempfile.TemporaryDirectory() as tmp_dir:
        monkeypatch.setenv("MODELKIT_ASSETS_DIR", tmp_dir)
        monkeypatch.setenv("MODELKIT_STORAGE_BUCKET", TEST_DIR)
        monkeypatch.setenv("MODELKIT_STORAGE_PREFIX", "testdata")
        monkeypatch.setenv("MODELKIT_STORAGE_PROVIDER", "local")

        shutil.copytree(os.path.join(TEST_DIR, "testdata"),
                        os.path.join(tmp_dir, "testdata"))
        os.makedirs(
            os.path.join(tmp_dir, "testdata", "dummy_tf_model_sub", "0.0"))
        lib = ModelLibrary(models=[DummyTFModel],
                           settings={"lazy_loading": True})
        deploy_tf_models(lib, "local-docker", "local-docker")
        with open(os.path.join(tmp_dir, "local-docker.config")) as f:
            ref.assert_equal("local-docker.config", f.read())

        deploy_tf_models(lib, "remote", "remote")
        with open(os.path.join(tmp_dir, "remote.config")) as f:
            config_data = f.read().replace(TEST_DIR, "STORAGE_BUCKET")
            ref.assert_equal("remote.config", config_data)

        # local process mode depends on the tmp dir above and the platform
        # hence it cannot be tested reliably
        deploy_tf_models(lib, "local-process", "local-process")
Ejemplo n.º 9
0
    def __init__(
        self,
        # ModelLibrary arguments
        settings: Optional[Union[Dict, LibrarySettings]] = None,
        assetsmanager_settings: Optional[dict] = None,
        configuration: Optional[Dict[str, Union[Dict[str, Any],
                                                ModelConfiguration]]] = None,
        models: Optional[LibraryModelsType] = None,
        required_models: Optional[Union[List[str], Dict[str, Any]]] = None,
        # APIRouter arguments
        **kwargs,
    ) -> None:
        # add custom startup/shutdown events
        on_startup = kwargs.pop("on_startup", [])
        # on_startup.append(self._on_startup)
        kwargs["on_startup"] = on_startup
        on_shutdown = kwargs.pop("on_shutdown", [])
        on_shutdown.append(self._on_shutdown)
        kwargs["on_shutdown"] = on_shutdown
        super().__init__(**kwargs)

        self.lib = ModelLibrary(
            required_models=required_models,
            settings=settings,
            assetsmanager_settings=assetsmanager_settings,
            configuration=configuration,
            models=models,
        )
Ejemplo n.º 10
0
async def test_distant_http_model(
    item, params, expected, run_mocked_service, event_loop
):
    async_model_settings = {
        "endpoint": "http://127.0.0.1:8000/api/path/endpoint",
        "async_mode": True,
    }
    sync_model_settings = {
        "endpoint": "http://127.0.0.1:8000/api/path/endpoint",
        "async_mode": False,
    }

    class SomeDistantHTTPModel(DistantHTTPModel):
        CONFIGURATIONS = {
            "some_model_sync": {"model_settings": sync_model_settings},
        }

    class SomeAsyncDistantHTTPModel(AsyncDistantHTTPModel):
        CONFIGURATIONS = {"some_model_async": {"model_settings": async_model_settings}}

    lib_without_params = ModelLibrary(
        models=[SomeDistantHTTPModel, SomeAsyncDistantHTTPModel]
    )
    lib_with_params = ModelLibrary(
        models=[SomeDistantHTTPModel, SomeAsyncDistantHTTPModel],
        configuration={
            "some_model_sync": {
                "model_settings": {**params, **sync_model_settings},
            },
            "some_model_async": {"model_settings": {**params, **async_model_settings}},
        },
    )
    for lib in [lib_without_params, lib_with_params]:
        # Test with asynchronous mode
        m = lib.get("some_model_async")
        with pytest.raises(AssertionError):
            assert expected == m(item, endpoint_params=params)

        res = await m.predict(item, endpoint_params=params)
        assert expected == res
        await lib.aclose()

        # Test with synchronous mode
        m = lib.get("some_model_sync")
        assert expected == m(item, endpoint_params=params)
Ejemplo n.º 11
0
def test_required_models():
    class SomeModel(Model):
        CONFIGURATIONS = {"model": {}}

        def _predict(self, item):
            return item

    class SomeOtherModel(Model):
        CONFIGURATIONS = {"other_model": {}}

        def _predict(self, item):
            return item

    lib = ModelLibrary(required_models=[], models=[SomeModel, SomeOtherModel])
    assert len(lib.models) == 0
    assert lib.required_models == {}

    lib = ModelLibrary(models=[SomeModel, SomeOtherModel])
    assert len(lib.models) == 2
    assert lib.required_models == {"model": {}, "other_model": {}}
Ejemplo n.º 12
0
def test_modellibrary_error_in_load(error):
    class SomeModel(Model):
        CONFIGURATIONS = {"model": {}}

        def _load(self):
            raise error

        def _predict(self, item):
            return item

    library = ModelLibrary(
        models=SomeModel,
        settings={"lazy_loading": True},
    )

    try:
        library.get("model")
        assert False
    except error as err:
        assert "not loaded" not in str(err)
Ejemplo n.º 13
0
 def fixture_function(request):
     if necessary_fixtures:
         for fixture_name in necessary_fixtures:
             request.getfixturevalue(fixture_name)
     return ModelLibrary(
         settings=settings,
         assetsmanager_settings=assetsmanager_settings,
         configuration=configuration,
         models=models,
         required_models=required_models,
     )
Ejemplo n.º 14
0
async def test_compose_async_sync_async(event_loop):
    class SomeAsyncModel(AsyncModel):
        CONFIGURATIONS = {"async_model": {}}

        async def _predict(self, item):
            await asyncio.sleep(0)
            return item

    class ComposedModel(Model):
        CONFIGURATIONS = {
            "composed_model": {
                "model_dependencies": {"async_model"}
            }
        }

        def _predict(self, item):
            return self.model_dependencies["async_model"].predict(item)

    class SomeAsyncComposedModel(AsyncModel):
        CONFIGURATIONS = {
            "async_composed_model": {
                "model_dependencies": {"composed_model"}
            }
        }

        async def _predict(self, item):
            await asyncio.sleep(0)
            return await self.model_dependencies["composed_model"].predict(item
                                                                           )

    library = ModelLibrary(
        models=[SomeAsyncComposedModel, SomeAsyncModel, ComposedModel])
    m = library.get("async_composed_model")
    res = await m.predict({"hello": "world"})
    assert res == {"hello": "world"}
    async for res in m.predict_gen(iter(({"hello": "world"}, ))):
        assert res == {"hello": "world"}
    res = await m.predict_batch([{"hello": "world"}])
    assert res == [{"hello": "world"}]
    await library.aclose()
Ejemplo n.º 15
0
def test_model_dependencies_bad_get():
    class SomeModel(Model):
        CONFIGURATIONS = {"some_model": {}}

        def _load(self):
            self.some_attribute = "OK"

        def _predict(self, item):
            return self.some_attribute

    class SomeModelDep(Model):
        CONFIGURATIONS = {
            "some_model_dep": {
                "model_dependencies": {"some_model"}
            }
        }

        def _load(self):
            dependencies = [x for x in self.model_dependencies]
            assert dependencies == ["some_model"]

            assert len([x for x in self.model_dependencies.values()])
            assert len([x for x in self.model_dependencies.items()])
            assert len([x for x in self.model_dependencies.keys()])

            assert len(self.model_dependencies) == 1

            self.some_attribute = self.model_dependencies.get(
                "some_model", SomeModel).some_attribute

            with pytest.raises(ValueError):
                self.model_dependencies.get("some_model",
                                            SomeModelDep).some_attribute

        def _predict(self, item):
            return item

    lib = ModelLibrary(models=[SomeModel, SomeModelDep],
                       required_models=["some_model_dep"])
    lib.get("some_model_dep")
Ejemplo n.º 16
0
def test_modellibrary_required_models():
    class SomeModel(Model):
        CONFIGURATIONS = {"yolo": {}, "les simpsons": {}}

        def _predict(self, item):
            return item

    p = ModelLibrary(models=SomeModel)
    m = p.get("yolo")
    assert m
    assert m.configuration_key == "yolo"
    assert m.__class__.__name__ == "SomeModel"
    assert m.model_settings == {}
    assert m.asset_path == ""
    assert m.batch_size is None

    class SomeOtherModel(Model):
        pass

    with pytest.raises(ValueError):
        # model does not exist
        p.get("yolo", model_type=SomeOtherModel)
Ejemplo n.º 17
0
def test_lazy_loading_dependencies():
    class Model0(Asset):
        CONFIGURATIONS = {"model0": {}}

        def _load(self):
            self.some_attribute = "ok"

    class Model1(Model):
        CONFIGURATIONS = {"model1": {"model_dependencies": {"model0"}}}

        def _load(self):
            self.some_attribute = self.model_dependencies[
                "model0"].some_attribute

        def _predict(self, item):
            return self.some_attribute

    p = ModelLibrary(models=[Model1, Model0], settings={"lazy_loading": True})
    m = p.get("model1")
    assert m({}) == "ok"
    assert m.model_dependencies["model0"].some_attribute == "ok"
    assert m.some_attribute == "ok"
Ejemplo n.º 18
0
def test_override_assets_dir(assetsmanager_settings):
    class TestModel(Model):
        def _predict(self, item, **kwargs):
            return self.asset_path

    model_library = ModelLibrary(
        required_models=["my_model", "my_override_model"],
        configuration={
            "my_model":
            ModelConfiguration(model_type=TestModel, asset="category/asset"),
            "my_override_model":
            ModelConfiguration(model_type=TestModel,
                               asset="category/override-asset"),
        },
        assetsmanager_settings=assetsmanager_settings,
    )

    prediction = model_library.get("my_model").predict({})
    assert prediction.endswith(os.path.join("category", "asset", "1.0"))

    prediction = model_library.get("my_override_model").predict({})
    assert prediction.endswith(
        os.path.join("category", "override-asset", "0.0"))

    model_library_override = ModelLibrary(
        required_models=["my_model", "my_override_model"],
        configuration={
            "my_model":
            ModelConfiguration(model_type=TestModel, asset="category/asset"),
            "my_override_model":
            ModelConfiguration(model_type=TestModel,
                               asset="category/override-asset"),
        },
        settings={
            "override_assets_dir":
            os.path.join(TEST_DIR, "testdata", "override-assets-dir"),
            "lazy_loading":
            True,
        },
        assetsmanager_settings=assetsmanager_settings,
    )

    prediction = model_library_override.get("my_model").predict({})
    assert prediction.endswith(os.path.join("category", "asset", "1.0"))

    prediction = model_library_override.get("my_override_model").predict({})
    assert prediction.endswith(
        os.path.join("category", "override-asset", "0.0"))
Ejemplo n.º 19
0
def test_compose_sync_async():
    class SomeAsyncModel(AsyncModel):
        CONFIGURATIONS = {"async_model": {}}

        async def _predict(self, item, **kwargs):
            await asyncio.sleep(0)
            return item

    class ComposedModel(Model):
        CONFIGURATIONS = {
            "composed_model": {
                "model_dependencies": {"async_model"}
            }
        }

        def _predict(self, item, **kwargs):
            self.model_dependencies["async_model"].predict_batch([item])
            return self.model_dependencies["async_model"].predict(item)

    library = ModelLibrary(models=[SomeAsyncModel, ComposedModel])
    m = library.get("composed_model")
    assert isinstance(m.model_dependencies["async_model"], WrappedAsyncModel)
    assert m.predict({"hello": "world"}) == {"hello": "world"}
Ejemplo n.º 20
0
def test_environment_asset_load(monkeypatch, assetsmanager_settings):
    class TestModel(Model):
        def _load(self):
            assert self.asset_path == "path/to/asset"
            self.data = {"some key": "some data"}

        def _predict(self, item, **kwargs):
            return self.data

    monkeypatch.setenv("MODELKIT_TESTS_TEST_ASSET_FILE", "path/to/asset")

    model_library = ModelLibrary(
        required_models=["some_asset"],
        configuration={
            "some_asset":
            ModelConfiguration(model_type=TestModel, asset="tests/test_asset")
        },
        assetsmanager_settings=assetsmanager_settings,
    )
    model = model_library.get("some_asset")

    predicted = model({})
    assert predicted == {"some key": "some data"}
Ejemplo n.º 21
0
def test_model_library_inexistent_model():
    with pytest.raises(ConfigurationNotFoundException):
        ModelLibrary(required_models=["model_that_does_not_exist"])

    configuration = {
        "existent_model":
        ModelConfiguration(model_type=Model,
                           model_dependencies={"inexistent_model"})
    }
    with pytest.raises(ConfigurationNotFoundException):
        ModelLibrary(required_models=["existent_model"],
                     configuration=configuration)

    p = ModelLibrary(required_models=["model_that_does_not_exist"],
                     settings={"lazy_loading": True})
    with pytest.raises(ConfigurationNotFoundException):
        p.get("model_that_does_not_exist")
    with pytest.raises(ConfigurationNotFoundException):
        p.get("other_model_that_does_not_exist")
Ejemplo n.º 22
0
def test_environment_asset_load_version(monkeypatch, assetsmanager_settings):
    class TestModel(Model):
        def _load(self):
            assert self.asset_path == "path/to/asset"
            self.data = {"some key": "some data"}

        def _predict(self, item, **kwargs):
            return self.data

    monkeypatch.setenv("MODELKIT_TESTS_TEST_ASSET_VERSION", "undef")

    with pytest.raises(InvalidAssetSpecError):
        ModelLibrary(
            required_models=["some_asset"],
            configuration={
                "some_asset":
                ModelConfiguration(model_type=TestModel,
                                   asset="tests/test_asset")
            },
            assetsmanager_settings=assetsmanager_settings,
        )
Ejemplo n.º 23
0
def test_deploy_tf_models_no_asset():
    np = pytest.importorskip("numpy")

    class DummyTFModelNoAsset(TensorflowModel):
        CONFIGURATIONS = {
            "dummy_non_tf_model": {
                "model_settings": {
                    "output_dtypes": {
                        "lambda": np.float32
                    },
                    "output_tensor_mapping": {
                        "lambda": "nothing"
                    },
                    "output_shapes": {
                        "lambda": (3, 2, 1)
                    },
                }
            }
        }

    lib = ModelLibrary(models=DummyTFModelNoAsset,
                       settings={"lazy_loading": True})
    with pytest.raises(ValueError):
        deploy_tf_models(lib, "local-docker")
Ejemplo n.º 24
0
def test_deploy_tf_models_no_tf_model():
    class DummyNonTFModel(Model):
        CONFIGURATIONS = {"dummy_non_tf_model": {}}

    lib = ModelLibrary(models=DummyNonTFModel, settings={"lazy_loading": True})
    deploy_tf_models(lib, "local-docker")
Ejemplo n.º 25
0
from typing import Any, Dict

from modelkit.core.library import ModelLibrary
from modelkit.core.model import Model


class SomeModel(Model[str, str]):
    CONFIGURATIONS: Dict[str, Any] = {"dependent": {}}

    def _predict(self, item):
        return item


class SomeOtherModel(Model[str, str]):
    CONFIGURATIONS: Dict[str, Any] = {
        "something": {
            "model_dependencies": {"dependent"}
        }
    }

    def _predict(self, item):
        m = self.model_dependencies.get("dependent", SomeModel)
        res = m.predict(item)
        return res


lib = ModelLibrary(models=[SomeModel, SomeOtherModel])

m2 = lib.get("something", model_type=SomeOtherModel)
m2.predict("str")
Ejemplo n.º 26
0
from typing import Any, Dict

from modelkit.core.library import ModelLibrary
from modelkit.core.model import Model


class SomeModelNoOtherFun(Model):
    CONFIGURATIONS: Dict[str, Any] = {"something2": {}}


lib = ModelLibrary(models=SomeModelNoOtherFun)

m_get = lib.get("something2", model_type=SomeModelNoOtherFun)
m_get.do_something_model_does_not()
Ejemplo n.º 27
0
def test_describe(monkeypatch):
    monkeypatch.setenv(
        "MODELKIT_ASSETS_DIR", os.path.join(TEST_DIR, "testdata", "test-bucket")
    )

    class SomeSimpleValidatedModelWithAsset(Model[str, str]):
        """
        This is a summary

        that also has plenty more text
        """

        CONFIGURATIONS = {"some_model_a": {"asset": "assets-prefix"}}

        def _predict(self, item):
            return item

    class SomeSimpleValidatedModelA(Model[str, str]):
        """
        This is a summary

        that also has plenty more text
        """

        CONFIGURATIONS = {"some_model_a": {}}

        def _predict(self, item):
            return item

    class ItemModel(pydantic.BaseModel):
        string: str

    class ResultModel(pydantic.BaseModel):
        sorted: str

    class A:
        def __init__(self):
            self.x = 1
            self.y = 2

    class SomeComplexValidatedModelA(Model[ItemModel, ResultModel]):
        """
        More complex

        With **a lot** of documentation
        """

        CONFIGURATIONS = {
            "some_complex_model_a": {
                "model_dependencies": ["some_model_a"],
                "asset": os.path.join(
                    TEST_DIR,
                    "testdata",
                    "test-bucket",
                    "assets-prefix",
                    "category",
                    "asset",
                    "0.0",
                ),
                "model_settings": {"batch_size": 128},
            }
        }

        def __init__(self, *args, **kwargs):
            super().__init__(*args, **kwargs)
            self.some_object = A()

        def _predict(self, item):
            return item

    # test without a console and no models
    library = ModelLibrary()
    library.describe()

    # test with assets
    library = ModelLibrary(
        models=[
            SomeSimpleValidatedModelA,
            SomeSimpleValidatedModelWithAsset,
            SomeComplexValidatedModelA,
        ]
    )
    library.describe()

    # test with models but not assets
    library = ModelLibrary(
        models=[SomeSimpleValidatedModelA, SomeComplexValidatedModelA]
    )
    console = Console()

    with console.capture() as capture:
        library.describe(console=console)

    if platform.system() != "Windows":
        # Output is different on Windows platforms since
        # modelkit.utils.memory cannot track memory increment
        # and write it
        r = ReferenceText(os.path.join(TEST_DIR, "testdata"))
        captured = capture.get()
        EXCLUDED = ["load time", "load memory", "asset", "category/asset", os.path.sep]
        captured = "\n".join(
            line
            for line in captured.split("\n")
            if not any(x in line for x in EXCLUDED)
        )
        r.assert_equal("library_describe.txt", captured)
Ejemplo n.º 28
0
from typing import Any, Dict

from modelkit.core.library import ModelLibrary
from modelkit.core.model import Model


class SomeModel(Model):
    CONFIGURATIONS: Dict[str, Any] = {"something": {}}

    def do_something_model_does_not(self):
        return True


lib = ModelLibrary(models=SomeModel)

m2 = lib.get("something", model_type=SomeModel)
m2.do_something_model_does_not()