def deploy(project_root_directory, model_key, input_shape, model_uri, host, port, db, password, socket_timeout, socket_connect_timeout, socket_keepalive, socket_keepalive_options, connection_pool, unix_socket_path, encoding, encoding_errors, charset, errors, decode_responses, retry_on_timeout, ssl, ssl_keyfile, ssl_certfile, ssl_cert_reqs, ssl_ca_certs, max_connections, backend): redis_client = redis.StrictRedis(**dict( host=host, port=port, db=db, password=password, socket_timeout=socket_timeout, socket_connect_timeout=socket_connect_timeout, socket_keepalive=socket_keepalive, socket_keepalive_options=socket_keepalive_options, connection_pool=connection_pool, unix_socket_path=unix_socket_path, encoding=encoding, encoding_errors=encoding_errors, charset=charset, errors=errors, decode_responses=decode_responses, retry_on_timeout=retry_on_timeout, ssl=ssl, ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, ssl_cert_reqs=ssl_cert_reqs, ssl_ca_certs=ssl_ca_certs, max_connections=max_connections, )) device = 'cpu' if backend == 'CPU' else 'cuda' model_path = _download_artifact_from_uri(model_uri) pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) code_subpath = pyfunc_conf.get(pyfunc.CODE) if code_subpath is not None: pyfunc_utils._add_code_to_system_path( code_path=os.path.join(model_path, code_subpath)) pytorch_conf = _get_flavor_configuration(model_path=model_path, flavor_name=FLAVOR_NAME) if torch.__version__ != pytorch_conf["pytorch_version"]: _logger.warning( "Stored model version '%s' does not match installed PyTorch version '%s'", pytorch_conf["pytorch_version"], torch.__version__) sys.path.append(project_root_directory) model = mlflow.pytorch._load_model(path=model_uri + '/data').to(device) x = torch.ones(list(map(int, input_shape.split(' ')))).to(device) traced_net = torch.jit.trace(model, x) blob_stream = io.BytesIO() torch.jit.save(traced_net, blob_stream) blob_stream.seek(0) set_redismodel(redis_client, blob_stream.read(), model_key, backend)
def load_model(model_uri, **kwargs): """ Load a PyTorch model from a local file or a run. :param model_uri: The location, in URI format, of the MLflow model, for example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` - ``models:/<model_name>/<model_version>`` - ``models:/<model_name>/<stage>`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html# artifact-locations>`_. :param kwargs: kwargs to pass to ``torch.load`` method. :return: A PyTorch model. .. code-block:: python :caption: Example import torch import mlflow import mlflow.pytorch # Set values model_path_dir = ... run_id = "96771d893a5e46159d9f3b49bf9013e2" pytorch_model = mlflow.pytorch.load_model("runs:/" + run_id + "/" + model_path_dir) y_pred = pytorch_model(x_new_data) """ import torch local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) try: pyfunc_conf = _get_flavor_configuration( model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME ) except MlflowException: pyfunc_conf = {} code_subpath = pyfunc_conf.get(pyfunc.CODE) if code_subpath is not None: pyfunc_utils._add_code_to_system_path( code_path=os.path.join(local_model_path, code_subpath) ) pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) if torch.__version__ != pytorch_conf["pytorch_version"]: _logger.warning( "Stored model version '%s' does not match installed PyTorch version '%s'", pytorch_conf["pytorch_version"], torch.__version__, ) torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf["model_data"]) return _load_model(path=torch_model_artifacts_path, **kwargs)
def load_model(model_uri, **kwargs): """ Load a PyTorch model from a local file (if ``run_id`` is ``None``) or a run. :param model_uri: The location, in URI format, of the MLflow model, for example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` For more information about supported URI schemes, see the `Artifacts Documentation <https://www.mlflow.org/docs/latest/tracking.html# supported-artifact-stores>`_. :param kwargs: kwargs to pass to ``torch.load`` method. :return: A PyTorch model. >>> import torch >>> import mlflow >>> import mlflow.pytorch >>> # set values >>> model_path_dir = ... >>> run_id="96771d893a5e46159d9f3b49bf9013e2" >>> pytorch_model = mlflow.pytorch.load_model(model_path_dir, run_id) >>> y_pred = pytorch_model(x_new_data) """ import torch local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) try: pyfunc_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME) except MlflowException: pyfunc_conf = {} code_subpath = pyfunc_conf.get(pyfunc.CODE) if code_subpath is not None: pyfunc_utils._add_code_to_system_path( code_path=os.path.join(local_model_path, code_subpath)) pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) if torch.__version__ != pytorch_conf["pytorch_version"]: _logger.warning( "Stored model version '%s' does not match installed PyTorch version '%s'", pytorch_conf["pytorch_version"], torch.__version__) torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf['model_data']) return _load_model(path=torch_model_artifacts_path, **kwargs)
def load_model(path, run_id=None, **kwargs): """ Load a PyTorch model from a local file (if ``run_id`` is ``None``) or a run. :param path: Local filesystem path or run-relative artifact path to the model saved by :py:func:`mlflow.pytorch.log_model`. :param run_id: Run ID. If provided, combined with ``path`` to identify the model. :param kwargs: kwargs to pass to ``torch.load`` method. >>> import torch >>> import mlflow >>> import mlflow.pytorch >>> # set values >>> model_path_dir = ... >>> run_id="96771d893a5e46159d9f3b49bf9013e2" >>> pytorch_model = mlflow.pytorch.load_model(model_path_dir, run_id) >>> y_pred = pytorch_model(x_new_data) """ if run_id is not None: path = mlflow.tracking.utils._get_model_log_dir(model_name=path, run_id=run_id) path = os.path.abspath(path) try: pyfunc_conf = _get_flavor_configuration(model_path=path, flavor_name=pyfunc.FLAVOR_NAME) except MlflowException: pyfunc_conf = {} code_subpath = pyfunc_conf.get(pyfunc.CODE) if code_subpath is not None: pyfunc_utils._add_code_to_system_path( code_path=os.path.join(path, code_subpath)) pytorch_conf = _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME) if torch.__version__ != pytorch_conf["pytorch_version"]: _logger.warning( "Stored model version '%s' does not match installed PyTorch version '%s'", pytorch_conf["pytorch_version"], torch.__version__) torch_model_artifacts_path = os.path.join(path, pytorch_conf['model_data']) return _load_model(path=torch_model_artifacts_path, **kwargs)
def load_model(model_uri, dst_path=None, **kwargs): """ Load a PyTorch model from a local file or a run. :param model_uri: The location, in URI format, of the MLflow model, for example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` - ``models:/<model_name>/<model_version>`` - ``models:/<model_name>/<stage>`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html# artifact-locations>`_. :param dst_path: The local filesystem path to which to download the model artifact. This directory must already exist. If unspecified, a local output path will be created. :param kwargs: kwargs to pass to ``torch.load`` method. :return: A PyTorch model. .. code-block:: python :caption: Example import torch import mlflow.pytorch # Class defined here class LinearNNModel(torch.nn.Module): ... # Initialize our model, criterion and optimizer ... # Training loop ... # Log the model with mlflow.start_run() as run: mlflow.pytorch.log_model(model, "model") # Inference after loading the logged model model_uri = "runs:/{}/model".format(run.info.run_id) loaded_model = mlflow.pytorch.load_model(model_uri) for x in [4.0, 6.0, 30.0]: X = torch.Tensor([[x]]) y_pred = loaded_model(X) print("predict X: {}, y_pred: {:.2f}".format(x, y_pred.data.item())) .. code-block:: text :caption: Output predict X: 4.0, y_pred: 7.57 predict X: 6.0, y_pred: 11.64 predict X: 30.0, y_pred: 60.48 """ import torch local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path) try: pyfunc_conf = _get_flavor_configuration( model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME ) except MlflowException: pyfunc_conf = {} code_subpath = pyfunc_conf.get(pyfunc.CODE) if code_subpath is not None: pyfunc_utils._add_code_to_system_path( code_path=os.path.join(local_model_path, code_subpath) ) pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) if torch.__version__ != pytorch_conf["pytorch_version"]: _logger.warning( "Stored model version '%s' does not match installed PyTorch version '%s'", pytorch_conf["pytorch_version"], torch.__version__, ) torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf["model_data"]) return _load_model(path=torch_model_artifacts_path, **kwargs)