示例#1
0
文件: keras.py 项目: yzongyue/mlflow
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import keras
    import tensorflow as tf

    return _mlflow_conda_env(
        additional_conda_deps=[
            "keras={}".format(keras.__version__),
            # The Keras pyfunc representation requires the TensorFlow
            # backend for Keras. Therefore, the conda environment must
            # include TensorFlow
            "tensorflow=={}".format(tf.__version__),
        ],
        additional_pip_deps=None,
        additional_conda_channels=None)
示例#2
0
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`. This Conda environment
             contains the current version of PySpark that is installed on the caller's
             system. ``dev`` versions of PySpark are replaced with stable versions in
             the resulting Conda environment (e.g., if you are running PySpark version
             ``2.4.5.dev0``, invoking this method produces a Conda environment with a
             dependency on PySpark version ``2.4.5``).
    """
    import pyspark

    # Strip the suffix from `dev` versions of PySpark, which are not
    # available for installation from Anaconda or PyPI
    pyspark_version = re.sub(r"(\.?)dev.*", "", pyspark.__version__)

    return _mlflow_conda_env(
        additional_pip_deps=["pyspark=={}".format(pyspark_version)])
示例#3
0
def _get_default_conda_env() -> Optional[Dict[str, Any]]:
    """Get default Conda environment.

    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import torch
    import fonduer

    return _mlflow_conda_env(
        additional_conda_deps=[
            "pytorch={}".format(torch.__version__),  # type: ignore
            "psycopg2",
            "pip",
        ],
        additional_pip_deps=["fonduer=={}".format(fonduer.__version__)],
        additional_conda_channels=["pytorch"],
    )
示例#4
0
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import onnx
    import onnxruntime
    return _mlflow_conda_env(
        additional_conda_deps=None,
        additional_pip_deps=[
            "onnx=={}".format(onnx.__version__),
            # The ONNX pyfunc representation requires the OnnxRuntime
            # inference engine. Therefore, the conda environment must
            # include OnnxRuntime
            "onnxruntime=={}".format(onnxruntime.__version__),
        ],
        additional_conda_channels=None,
    )
示例#5
0
文件: keras.py 项目: radzak/mlflow
def get_default_conda_env(include_cloudpickle=False, keras_module=None):
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import tensorflow as tf

    conda_deps = [
    ]  # if we use tf.keras we only need to declare dependency on tensorflow
    pip_deps = []
    if keras_module is None:
        import keras

        keras_module = keras
    if keras_module.__name__ == "keras":
        # Temporary fix: the created conda environment has issues installing keras >= 2.3.1
        if LooseVersion(keras_module.__version__) < LooseVersion("2.3.1"):
            conda_deps.append("keras=={}".format(keras_module.__version__))
        else:
            pip_deps.append("keras=={}".format(keras_module.__version__))
    if include_cloudpickle:
        import cloudpickle

        pip_deps.append("cloudpickle=={}".format(cloudpickle.__version__))
    # Temporary fix: conda-forge currently does not have tensorflow > 1.14
    # The Keras pyfunc representation requires the TensorFlow
    # backend for Keras. Therefore, the conda environment must
    # include TensorFlow
    if LooseVersion(tf.__version__) <= LooseVersion("1.13.2"):
        conda_deps.append("tensorflow=={}".format(tf.__version__))
    else:
        pip_deps.append("tensorflow=={}".format(tf.__version__))

    # Tensorflow<2.4 does not work with h5py>=3.0.0
    # see https://github.com/tensorflow/tensorflow/issues/44467
    if LooseVersion(tf.__version__) < LooseVersion("2.4"):
        pip_deps.append("h5py<3.0.0")

    return _mlflow_conda_env(
        additional_conda_deps=conda_deps,
        additional_pip_deps=pip_deps,
        additional_conda_channels=None,
    )
示例#6
0
def get_default_conda_env():
    """
    :return: The default Conda environment as a dictionary for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.

    .. code-block:: python
        :caption: Example

        import mlflow.pytorch

        # Log PyTorch model
        with mlflow.start_run() as run:
            mlflow.pytorch.log_model(model, "model")

        # Fetch the associated conda environment
        env = mlflow.pytorch.get_default_conda_env()
        print("conda env: {}".format(env))

    .. code-block:: text
        :caption: Output

        conda env {'name': 'mlflow-env',
                   'channels': ['defaults', 'conda-forge', 'pytorch'],
                   'dependencies': ['python=3.7.5', 'pytorch=1.5.1',
                                    'torchvision=0.6.1',
                                    'pip', {'pip': ['mlflow', 'cloudpickle==1.6.0']}]}
    """
    import torch
    import torchvision

    return _mlflow_conda_env(
        additional_conda_deps=[
            "pytorch={}".format(torch.__version__),
            "torchvision={}".format(torchvision.__version__),
        ],
        additional_pip_deps=[
            # We include CloudPickle in the default environment because
            # it's required by the default pickle module used by `save_model()`
            # and `log_model()`: `mlflow.pytorch.pickle_module`.
            "cloudpickle=={}".format(cloudpickle.__version__)
        ],
        additional_conda_channels=["pytorch"],
    )
示例#7
0
def get_default_conda_env(include_cloudpickle=False):
    """
    :return: The default Conda environment as a dictionary for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.


    .. code-block:: python
        :caption: Example

        import mlflow.fastai

        # Start MLflow session and log the fastai learner model
        with mlflow.start_run():
           model.fit(epochs, learning_rate)
           mlflow.fastai.log_model(model, "model")

        # Fetch the default conda environment
        env = mlflow.fastai.get_default_conda_env()
        print("conda environment: {}".format(env))

    .. code-block:: text
        :caption: Output

        conda environment: {'name': 'mlflow-env',
                            'channels': ['defaults', 'conda-forge'],
                            'dependencies': ['python=3.7.5', 'fastai=1.0.61',
                                             'pip', {'pip': ['mlflow']}]}
    """

    import fastai

    pip_deps = None
    if include_cloudpickle:
        import cloudpickle

        pip_deps = ["cloudpickle=={}".format(cloudpickle.__version__)]
    return _mlflow_conda_env(
        additional_conda_deps=["fastai={}".format(fastai.__version__)],
        additional_pip_deps=pip_deps,
        additional_conda_channels=None,
    )
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import torch
    import torchvision

    return _mlflow_conda_env(
        additional_conda_deps=[
            "pytorch={}".format(torch.__version__),
            "torchvision={}".format(torchvision.__version__),
        ],
        additional_pip_deps=[
            # We include CloudPickle in the default environment because
            # it's required by the default pickle module used by `save_model()`
            # and `log_model()`: `mlflow.pytorch.pickle_module`.
            "cloudpickle=={}".format(cloudpickle.__version__)
        ],
        additional_conda_channels=["pytorch"],
    )
示例#9
0
def driver():
    warnings.filterwarnings("ignore")
    # Dependencies for deploying the model
    pytorch_index = "https://download.pytorch.org/whl/"
    pytorch_version = "cpu/torch-1.1.0-cp36-cp36m-linux_x86_64.whl"
    deps = [
        "cloudpickle=={}".format(cloudpickle.__version__),
        pytorch_index + pytorch_version,
        "torchvision=={}".format(torchvision.__version__),
        "Pillow=={}".format("6.0.0")
    ]
    with mlflow.start_run() as run:
        model = Net().to(device)
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum)
        for epoch in range(1, args.epochs + 1):
            train(args, model, device, train_loader, optimizer, epoch)
            test(args, model, device, test_loader)
        # Log model to run history using MLflow
        if args.save_model:
            model_env = _mlflow_conda_env(additional_pip_deps=deps)
            mlflow.pytorch.log_model(model, "model", conda_env=model_env)
    return run
示例#10
0
文件: shap.py 项目: szczeles/mlflow
def _merge_environments(shap_environment, model_environment):
    """
    Merge conda environments of underlying model and shap.

    :param shap_environment: SHAP conda environment.
    :param model_environment: Underlying model conda environment.
    """
    # merge the channels from the two environments and remove the default conda
    # channels if present since its added later in `_mlflow_conda_env`
    merged_conda_channels = _union_lists(
        shap_environment["channels"], model_environment["channels"]
    )
    merged_conda_channels = [x for x in merged_conda_channels if x != "conda-forge"]

    shap_conda_deps, shap_pip_deps = _get_conda_and_pip_dependencies(shap_environment)
    model_conda_deps, model_pip_deps = _get_conda_and_pip_dependencies(model_environment)

    merged_conda_deps = _union_lists(shap_conda_deps, model_conda_deps)
    merged_pip_deps = _union_lists(shap_pip_deps, model_pip_deps)
    return _mlflow_conda_env(
        additional_conda_deps=merged_conda_deps,
        additional_pip_deps=merged_pip_deps,
        additional_conda_channels=merged_conda_channels,
    )
示例#11
0
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
示例#12
0
from mlflow.models import Model
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.utils.file_utils import TempDir

FLAVOR_NAME = "spark"

# Default temporary directory on DFS. Used to write / read from Spark ML models.
DFS_TMP = "/tmp/mlflow"
_SPARK_MODEL_PATH_SUB = "sparkml"

DEFAULT_CONDA_ENV = _mlflow_conda_env(
    additional_conda_deps=[
        "pyspark={}".format(pyspark.__version__),
    ],
    additional_pip_deps=None,
    additional_conda_channels=None,
)

_logger = logging.getLogger(__name__)


def log_model(spark_model,
              artifact_path,
              conda_env=None,
              jars=None,
              dfs_tmpdir=None,
              sample_input=None):
    """
    Log a Spark MLlib model as an MLflow artifact for the current run. This uses the
def keras_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env,
                      additional_conda_deps=["keras", "tensorflow", "pytest"])
    return conda_env
def custom_env(tmpdir):
    conda_env_path = os.path.join(tmpdir.strpath, "conda_env.yml")
    _mlflow_conda_env(conda_env_path,
                      additional_pip_deps=["catboost", "pytest"])
    return conda_env_path
def get_default_conda_env():
    return _mlflow_conda_env(
        additional_conda_deps=None,
        additional_pip_deps=["fasttext=={}".format(_get_installed_fasttext_version())],
        additional_conda_channels=None,
    )
示例#16
0
def get_default_conda_env(include_cloudpickle=False):  # pylint: disable=unused-argument
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
示例#17
0
def prophet_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env, additional_pip_deps=["pystan", "prophet", "pytest"])
    return conda_env
def lgb_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env, additional_pip_deps=["lightgbm", "pytest"])
    return conda_env
示例#19
0
def pytorch_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env,
                      additional_pip_deps=["pytorch", "torchvision", "pytest"])
    return conda_env
示例#20
0
def spark_conda_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(
        conda_env, additional_pip_deps=["pyspark=={}".format(pyspark_version)])
    return conda_env
def pyfunc_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(
        conda_env,
        additional_conda_deps=["scikit-learn", "pytest", "cloudpickle"])
    return conda_env
示例#22
0
def spacy_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env, additional_pip_deps=["pytest", "spacy"])
    return conda_env
def gluon_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env, additional_conda_deps=["mxnet", "pytest"])
    return conda_env
示例#24
0
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, INTERNAL_ERROR
from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS
import mlflow.tracking
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration

FLAVOR_NAME = "sklearn"

DEFAULT_CONDA_ENV = _mlflow_conda_env(
    additional_conda_deps=[
        "scikit-learn={}".format(sklearn.__version__),
    ],
    additional_pip_deps=None,
    additional_conda_channels=None,
)

SERIALIZATION_FORMAT_PICKLE = "pickle"
SERIALIZATION_FORMAT_CLOUDPICKLE = "cloudpickle"

SUPPORTED_SERIALIZATION_FORMATS = [
    SERIALIZATION_FORMAT_PICKLE, SERIALIZATION_FORMAT_CLOUDPICKLE
]


def save_model(sk_model,
               path,
               conda_env=None,
def pmdarima_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env, additional_pip_deps=["pmdarima"])
    return conda_env
示例#26
0
optimizer = keras.optimizers.SGD(lr=args.learning_rate,
                                 momentum=args.momentum,
                                 nesterov=True)

model.compile(optimizer=optimizer,
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=args.epochs, batch_size=args.batch_size)

test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)

conda_env = _mlflow_conda_env(additional_conda_deps=[
    "keras=={}".format(keras.__version__),
    "tensorflow=={}".format(tf.__version__),
],
                              additional_pip_deps=[
                                  "cloudpickle=={}".format(
                                      cloudpickle.__version__),
                              ])


class KerasMnistCNN(PythonModel):
    def load_context(self, context):
        import tensorflow as tf
        self.graph = tf.Graph()
        with self.graph.as_default():
            K.set_learning_phase(0)
            self.model = mlflow.keras.load_model(
                context.artifacts["keras-model"])

    def predict(self, context, input_df):
示例#27
0
import tensorflow as tf

from mlflow import pyfunc
from mlflow.models import Model
import mlflow.tracking
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration

FLAVOR_NAME = "keras"

DEFAULT_CONDA_ENV = _mlflow_conda_env(
    additional_conda_deps=[
        "keras={}".format(keras.__version__),
        # The Keras pyfunc representation requires the TensorFlow
        # backend for Keras. Therefore, the conda environment must
        # include TensorFlow
        "tensorflow=={}".format(tf.__version__),
    ],
    additional_pip_deps=None,
    additional_conda_channels=None,
)


def save_model(keras_model, path, conda_env=None, mlflow_model=Model()):
    """
    Save a Keras model to a path on the local file system.

    :param keras_model: Keras model to be saved.
    :param path: Local path where the model is to be saved.
    :param conda_env: Either a dictionary representation of a Conda environment or the path to a
                      Conda environment yaml file. If provided, this decribes the environment
示例#28
0
def pyfunc_custom_env_dict():
    return _mlflow_conda_env(
        additional_conda_deps=["scikit-learn", "pytest", "cloudpickle"],
        additional_pip_deps=["-e " + os.path.dirname(mlflow.__path__[0])])
示例#29
0
def h2o_custom_env(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env,
                      additional_conda_deps=["pytest"],
                      additional_pip_deps=["h2o"])
    return conda_env
示例#30
0
import mlflow.pyfunc.utils as pyfunc_utils
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model
import mlflow.tracking
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import _copy_file_or_tree
from mlflow.utils.model_utils import _get_flavor_configuration

FLAVOR_NAME = "pytorch"

DEFAULT_CONDA_ENV = _mlflow_conda_env(
    additional_conda_deps=[
        "pytorch={}".format(torch.__version__),
        "torchvision={}".format(torchvision.__version__),
    ],
    additional_pip_deps=None,
    additional_conda_channels=[
        "pytorch",
    ],
)

_logger = logging.getLogger(__name__)


def log_model(pytorch_model, artifact_path, conda_env=None, code_paths=None, **kwargs):
    """
    Log a PyTorch model as an MLflow artifact for the current run.

    :param pytorch_model: PyTorch model to be saved. Must accept a single ``torch.FloatTensor`` as
                          input and produce a single output tensor. Any code dependencies of the
                          model's class, including the class definition itself, should be