Пример #1
0
    def setup_method(self, method):
        self._logger = logger
        self._logger.info(
            f"Setting up test {self.__class__.__name__}::{method.__name__}")
        self._test_env = {}
        self._old_env = {}
        self._setup_env(self._get_env_from_file())

        self._run_db = get_run_db()

        # the dbpath is already configured on the test startup before this stage
        # so even though we set the env var, we still need to directly configure
        # it in mlconf.
        mlconf.dbpath = self._test_env["MLRUN_DBPATH"]

        set_environment(
            artifact_path="/User/data",
            project=self.project_name,
        )

        self.custom_setup()

        self._logger.info(
            f"Finished setting up test {self.__class__.__name__}::{method.__name__}"
        )
Пример #2
0
    def _generate_runtime(self):
        # This is following the steps in
        # https://docs.mlrun.org/en/latest/runtimes/dask-mlrun.html#set-up-the-environment
        mlconf.remote_host = "http://remote_host"
        os.environ["V3IO_USERNAME"] = self.v3io_user

        mlrun.set_environment(
            project=self.project,
            access_key=self.v3io_access_key,
            artifact_path=self.artifact_path,
        )
        dask_cluster = mlrun.new_function(self.name,
                                          project=self.project,
                                          kind="dask",
                                          image=self.image_name)

        dask_cluster.apply(auto_mount())

        dask_cluster.spec.min_replicas = 1
        dask_cluster.spec.max_replicas = 4

        dask_cluster.spec.remote = True
        dask_cluster.spec.service_type = "NodePort"

        return dask_cluster
Пример #3
0
def main():
    # Load environment variables
    load_dotenv()

    # Setup project
    project_name, artifact_path = set_environment(
        project="remote-model-deployment",
        artifact_path=os.getenv("MLRUN_ARTIFACT_PATH"),
        api_path=os.getenv("MLRUN_DBPATH"),
        access_key=os.getenv("V3IO_ACCESS_KEY"),
    )
    print(f"Creating project '{project_name}'")

    # Push assets to V3IO
    v3io_client = v3io.dataplane.Client()
    push_to_v3io(v3io_client, "assets/model.pkl", "nick/tmp/model.pkl")

    # Create MLRun function
    serving_fn = code_to_function(
        name="serving",
        kind="serving",
        image="mlrun/mlrun",
        filename="assets/model_server.py",
    ).apply(mount_v3io())
    print(f"Creating function '{serving_fn.metadata.name}'")

    # Configure MLRun function
    serving_fn.spec.default_class = "ClassifierModel"
    serving_fn.add_model("my_model", model_path="/User/tmp/model.pkl")

    # Deploy
    addr = serving_fn.deploy()

    # Test model inference
    test_inference(addr)
Пример #4
0
def main():
    # Load config
    with open("config.yaml") as f:
        config = yaml.safe_load(f)

    # Set remote credentials
    os.environ["MLRUN_DBPATH"] = config["MLRUN_DBPATH"]
    os.environ["MLRUN_ARTIFACT_PATH"] = config["MLRUN_ARTIFACT_PATH"]
    os.environ["V3IO_USERNAME"] = config["V3IO_USERNAME"]
    os.environ["V3IO_API"] = config["V3IO_API"]
    os.environ["V3IO_ACCESS_KEY"] = config["V3IO_ACCESS_KEY"]

    # Setup project
    project_name, artifact_path = set_environment(
        project="remote-model-deployment",
        artifact_path=os.getenv("MLRUN_ARTIFACT_PATH"),
        api_path=os.getenv("MLRUN_DBPATH"),
        access_key=os.getenv("V3IO_ACCESS_KEY"),
    )
    print(f"Setting project environment for '{project_name}'")

    print(
        fs.get_offline_features(
            "level-3-demo/heart-disease-train").to_dataframe().head())