コード例 #1
0
 def predict(self, model_uri, input_path, output_path, content_type,
             json_format):
     """
     Generate predictions using generic python model saved with MLflow.
     Return the prediction results as a JSON.
     """
     with TempDir() as tmp:
         local_path = _download_artifact_from_uri(model_uri,
                                                  output_path=tmp.path())
         if not self._no_conda and ENV in self._config:
             conda_env_path = os.path.join(local_path, self._config[ENV])
             # NOTE: We're calling main in the pyfunc scoring server belonging to the current
             # conda environment. The model environment may contain mlflow with different version
             # than the one in the current active environment. This is the intended behavior.
             # We need to make sure the scoring server is consistent with the outside mlflow
             # while the model that is being loaded may depend on a different version of mlflow.
             # The hope is that the scoring server is self contained enough and does not have
             # external mlflow dependencies that would be incompatible between mlflow versions.
             if input_path is None:
                 input_path = "__stdin__"
             if output_path is None:
                 output_path = "__stdout__"
             command = "python {0} predict {1} {2} {3} {4} {5}".format(
                 scoring_server.__file__, shlex_quote(local_path),
                 shlex_quote(input_path), shlex_quote(output_path),
                 content_type, json_format)
             return scoring_server._execute_in_conda_env(
                 conda_env_path, command)
         else:
             scoring_server._predict(local_path, input_path, output_path,
                                     content_type, json_format)
コード例 #2
0
 def predict(
     self,
     model_uri,
     input_path,
     output_path,
     content_type,
     json_format,
 ):
     """
     Generate predictions using generic python model saved with MLflow.
     Return the prediction results as a JSON.
     """
     local_path = _download_artifact_from_uri(model_uri)
     # NB: Absolute windows paths do not work with mlflow apis, use file uri to ensure
     # platform compatibility.
     local_uri = path_to_local_file_uri(local_path)
     if not self._no_conda and ENV in self._config:
         conda_env_path = os.path.join(local_path, self._config[ENV])
         command = (
             'python -c "from mlflow.pyfunc.scoring_server import _predict; _predict('
             'model_uri={model_uri}, '
             'input_path={input_path}, '
             'output_path={output_path}, '
             'content_type={content_type}, '
             'json_format={json_format})"').format(
                 model_uri=repr(local_uri),
                 input_path=repr(input_path),
                 output_path=repr(output_path),
                 content_type=repr(content_type),
                 json_format=repr(json_format))
         return _execute_in_conda_env(conda_env_path, command,
                                      self._install_mlflow)
     else:
         scoring_server._predict(local_uri, input_path, output_path,
                                 content_type, json_format)
コード例 #3
0
ファイル: backend.py プロジェクト: bkbonde/mlflow
 def predict(self, model_uri, input_path, output_path, content_type,
             json_format):
     """
     Generate predictions using generic python model saved with MLflow.
     Return the prediction results as a JSON.
     """
     local_path = _download_artifact_from_uri(model_uri)
     # NB: Absolute windows paths do not work with mlflow apis, use file uri to ensure
     # platform compatibility.
     local_uri = path_to_local_file_uri(local_path)
     command = (
         'python -c "from mlflow.pyfunc.scoring_server import _predict; _predict('
         "model_uri={model_uri}, "
         "input_path={input_path}, "
         "output_path={output_path}, "
         "content_type={content_type}, "
         'json_format={json_format})"').format(
             model_uri=repr(local_uri),
             input_path=repr(input_path),
             output_path=repr(output_path),
             content_type=repr(content_type),
             json_format=repr(json_format),
         )
     if self._env_manager == _EnvManager.CONDA and ENV in self._config:
         conda_env_path = os.path.join(local_path, self._config[ENV])
         conda_env_name = get_or_create_conda_env(
             conda_env_path,
             env_id=self._env_id,
             capture_output=False,
             env_root_dir=self._env_root_dir,
         )
         return _execute_in_conda_env(conda_env_name,
                                      command,
                                      self._install_mlflow,
                                      env_root_dir=self._env_root_dir)
     elif self._env_manager == _EnvManager.VIRTUALENV:
         activate_cmd = _get_or_create_virtualenv(
             local_path, self._env_id, env_root_dir=self._env_root_dir)
         return _execute_in_virtualenv(activate_cmd,
                                       command,
                                       self._install_mlflow,
                                       env_root_dir=self._env_root_dir)
     else:
         scoring_server._predict(local_uri, input_path, output_path,
                                 content_type, json_format)