def serve(model_uri, port, host, no_conda): """ Serve a pyfunc model saved with MLflow by launching a webserver on the specified host and port. For information about the input data formats accepted by the webserver, see the following documentation: https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment. """ local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) model_env_file = _load_model_env(path=local_model_path) if not no_conda and model_env_file is not None: conda_env_path = os.path.join(local_model_path, model_env_file) return _rerun_in_conda(conda_env_path) app = scoring_server.init(load_pyfunc(local_model_path)) app.run(port=port, host=host)
def serve(model_path, run_id, port, host, no_conda): """ Serve a PythonFunction model saved with MLflow. If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run; otherwise it is treated as a local path. """ if run_id: model_path = _get_model_log_dir(model_path, run_id) model_env_file = _load_model_env(model_path) if not no_conda and model_env_file is not None: conda_env_path = os.path.join(model_path, model_env_file) return _rerun_in_conda(conda_env_path) app = scoring_server.init(load_pyfunc(model_path)) app.run(port=port, host=host)
def predict(model_uri, input_path, output_path, no_conda): """ Load a pandas DataFrame and runs a python_function model saved with MLflow against it. Return the prediction results as a CSV-formatted pandas DataFrame. """ local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) model_env_file = _load_model_env(path=local_model_path) if not no_conda and model_env_file is not None: conda_env_path = os.path.join(local_model_path, model_env_file) return _rerun_in_conda(conda_env_path) model = load_pyfunc(local_model_path) df = pandas.read_csv(input_path) result = model.predict(df) out_stream = sys.stdout if output_path: out_stream = open(output_path, 'w') pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)
def serve(model_path, run_id, port, host, no_conda): """ Serve a pyfunc model saved with MLflow by launching a webserver on the specified host and port. For information about the input data formats accepted by the webserver, see the following documentation: https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment. If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run; otherwise it is treated as a local path. """ if run_id: model_path = _get_model_log_dir(model_path, run_id) model_env_file = _load_model_env(model_path) if not no_conda and model_env_file is not None: conda_env_path = os.path.join(model_path, model_env_file) return _rerun_in_conda(conda_env_path) app = scoring_server.init(load_pyfunc(model_path)) app.run(port=port, host=host)
def predict(model_path, run_id, input_path, output_path, no_conda): """ Load a pandas DataFrame and runs a python_function model saved with MLflow against it. Return the prediction results as a CSV-formatted pandas DataFrame. If a ``run-id`` is specified, ``model-path`` is treated as an artifact path within that run; otherwise it is treated as a local path. """ if run_id: model_path = _get_model_log_dir(model_path, run_id) model_env_file = _load_model_env(model_path) if not no_conda and model_env_file is not None: conda_env_path = os.path.join(model_path, model_env_file) return _rerun_in_conda(conda_env_path) model = load_pyfunc(model_path) df = pandas.read_csv(input_path) result = model.predict(df) out_stream = sys.stdout if output_path: out_stream = open(output_path, 'w') pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)