Exemple #1
0
def test_wrong_target_name():
    with pytest.raises(
            MlflowException,
            match=
            'No plugin found for managing model deployments to "wrong_target"'
    ):
        deployments.get_deploy_client("wrong_target")
Exemple #2
0
    def setUp(self):
        target_uri = 'openshift'
        self.openshift_client = get_deploy_client(target_uri)
        self.deployment_name = APP_NAME + ''.join(
            random.choices(string.ascii_lowercase, k=6))

        self.saved_env = os.environ["AWS_ACCESS_KEY_ID"]
def test_plugin_raising_error():
    client = deployments.get_deploy_client(f_target)
    # special case to raise error
    os.environ["raiseError"] = "True"
    with pytest.raises(RuntimeError):
        client.list_deployments()
    os.environ["raiseError"] = "False"
Exemple #4
0
def predict():
    data = ["This year business is good", "Fortnite, Football And Soccer, And Their Surprising Similarities"]
    client = get_deploy_client('torchserve')
    for d in data:
        data_json = json.dumps({'data': [d], 'uuid': 'str'})
        res = client.predict('news_classification_test', data_json)
        inspect(text=d, category=res)
Exemple #5
0
def test_create_cli_success_without_version():
    model_file = "MODEL_FILE={model_file_path}".format(
        model_file_path=model_file_path)
    handler_file = "HANDLER={handler_file_path}".format(
        handler_file_path=handler_file_path)
    _ = deployments.get_deploy_client(f_target)
    runner = CliRunner()
    res = runner.invoke(
        cli.create_deployment,
        [
            "-f",
            f_flavor,
            "-m",
            f_model_uri,
            "-t",
            f_target,
            "--name",
            f_deployment_id,
            "-C",
            model_file,
            "-C",
            handler_file,
        ],
    )
    assert "{} deployment {} is created".format(
        f_flavor, f_deployment_name_version) in res.stdout
Exemple #6
0
def predict(parser_args):

    with open(parser_args["input_file_path"], "r") as fp:
        text = fp.read()
        plugin = get_deploy_client(parser_args["target"])
        prediction = plugin.predict(parser_args["deployment_name"],
                                    json.dumps(text))
        print("Prediction Result {}".format(prediction))
Exemple #7
0
def test_create_wrong_handler_exception():
    client = deployments.get_deploy_client(f_target)
    with pytest.raises(Exception, match="Unable to create mar file"):
        client.create_deployment(
            f_deployment_id,
            f_model_uri,
            f_flavor,
            config={"VERSION": model_version, "MODEL_FILE": model_file_path, "HANDLER": f_dummy},
        )
Exemple #8
0
def test_create_success():
    client = deployments.get_deploy_client(f_target)
    ret = client.create_deployment(f_deployment_id, f_model_uri, f_flavor, config={})
    assert isinstance(ret, dict)
    assert ret["name"] == f_deployment_id
    assert ret["flavor"] == f_flavor

    ret2 = client.create_deployment(f_deployment_id, f_model_uri)
    assert ret2["flavor"] is None
def test_create_wrong_model_exception():
    client = deployments.get_deploy_client(f_target)
    with pytest.raises(Exception, match="No such file or directory"):
        client.create_deployment(
            f_deployment_id,
            f_model_uri,
            f_flavor,
            config={"VERSION": model_version, "MODEL_FILE": f_dummy, "HANDLER": handler_file_path},
        )
def test_create_no_handler_exception():
    client = deployments.get_deploy_client(f_target)
    with pytest.raises(Exception, match="Config Variable HANDLER - missing"):
        client.create_deployment(
            f_deployment_id,
            f_model_uri,
            f_flavor,
            config={"VERSION": model_version, "MODEL_FILE": model_file_path},
        )
def predict(parser_args):
    plugin = get_deploy_client(parser_args["target"])
    img = plt.imread(os.path.join(parser_args["input_file_path"]))
    mnist_transforms = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])

    image_tensor = mnist_transforms(img)
    prediction = plugin.predict(parser_args["deployment_name"], image_tensor)
    print("Prediction Result {}".format(prediction))
def test_create_deployment_no_version():
    client = deployments.get_deploy_client(f_target)
    ret = client.create_deployment(
        f_deployment_id,
        f_model_uri,
        f_flavor,
        config={"MODEL_FILE": model_file_path, "HANDLER": handler_file_path},
    )
    assert isinstance(ret, dict)
    assert ret["name"] == f_deployment_name_version
    assert ret["flavor"] == f_flavor
Exemple #13
0
def deploy():
    # This can be set as an environment variable
    mlflow.set_tracking_uri("http://localhost:5005")
    client = get_deploy_client('torchserve')
    # path = Path('').absolute() / 'models'
    # client.create_deployment('news_classification_test', f'file://{path}',
    client.create_deployment('news_classification_test', f'models:/BertModel/2',
                             config={
                                 'MODEL_FILE': 'src/bert_classifier/train.py',
                                 'HANDLER': 'src/bert_classifier/handler.py'
                             })
def test_get_success(deployment_name):
    client = deployments.get_deploy_client(f_target)
    ret = client.get_deployment(deployment_name)
    print("Return value is ", json.loads(ret["deploy"]))
    if deployment_name == f_deployment_id:
        assert json.loads(ret["deploy"])[0]["modelName"] == f_deployment_id
    elif deployment_name == f_deployment_name_version:
        assert (
            json.loads(ret["deploy"])[0]["modelVersion"] == f_deployment_name_version.split("/")[1]
        )
    else:
        assert len(json.loads(ret["deploy"])) == 2
Exemple #15
0
def test_explain_with_no_target_implementation():
    from unittest import mock
    from mlflow_test_plugin import fake_deployment_plugin

    mock_error = MlflowException("MOCK ERROR")
    target_client = deployments.get_deploy_client(f_target)
    plugin = fake_deployment_plugin.PluginDeploymentClient
    with mock.patch.object(plugin, "explain",
                           return_value=mock_error) as mock_explain:
        res = target_client.explain(f_target, "test")
        assert type(res) == MlflowException
        mock_explain.assert_called_once()
def test_list_success():
    client = deployments.get_deploy_client(f_target)
    ret = client.list_deployments()
    isNamePresent = False
    for i in range(len(ret)):
        if list(ret[i].keys())[0] == f_deployment_id:
            isNamePresent = True
            break
    if isNamePresent:
        assert True
    else:
        assert False
def test_target_uri_parsing():
    deployments.get_deploy_client(f_target)
    deployments.get_deploy_client(
        "{target}:/somesuffix".format(target=f_target))
    with pytest.raises(MlflowException):
        deployments.get_deploy_client(
            "{target}://somesuffix".format(target=f_target))
Exemple #18
0
    def setUp(self):
        target_uri = 'openshift'
        self.openshift_client = get_deploy_client(target_uri)
        self.deployment_name = APP_NAME + ''.join(
            random.choices(string.ascii_lowercase, k=6))

        self.openshift_client.create_deployment(self.deployment_name,
                                                MODEL_URI_1,
                                                config={
                                                    "docker_registry":
                                                    DOCKER_REGISTRY,
                                                    "image": IMAGE,
                                                    "tag": TAG
                                                })
Exemple #19
0
def create_deployment(parser_args):
    plugin = get_deploy_client(parser_args["target"])
    config = {
        "MODEL_FILE": parser_args["model_file"],
        "HANDLER": parser_args["handler"],
        "EXTRA_FILES": "source_vocab.pt,index_to_name.json",
    }
    result = plugin.create_deployment(
        name=parser_args["deployment_name"],
        model_uri=parser_args["serialized_file"],
        config=config,
    )

    print("Deployment {result} created successfully".format(
        result=result["name"]))
def create_deployment(parser_args):
    plugin = get_deploy_client(parser_args["target"])
    config = {
        "MODEL_FILE": parser_args["model_file"],
        "HANDLER": parser_args["handler"],
        "EXTRA_FILES": parser_args["extra_files"],
    }

    if parser_args["export_path"] != "":
        config["EXPORT_PATH"] = parser_args["export_path"]

    result = plugin.create_deployment(
        name=parser_args["deployment_name"],
        model_uri=parser_args["registered_model_uri"],
        config=config,
    )

    print("Deployment {result} created successfully".format(
        result=result["name"]))
Exemple #21
0
def predict(parser_args):
    plugin = get_deploy_client(parser_args["target"])
    input_file = parser_args["input_file_path"]
    if not os.path.exists(input_file):
        raise Exception("Unable to locate input file : {}".format(input_file))
    else:
        with open(input_file) as fp:
            input_data = fp.read()

    data = json.loads(input_data).get("data")
    import pandas as pd

    df = pd.read_json(data[0])
    for column in df.columns:
        df[column] = df[column].astype("double")

    prediction = plugin.predict(deployment_name=parser_args["deployment_name"],
                                df=input_data)
    print("Prediction Result {}".format(prediction))
Exemple #22
0
def run_local(name, model_uri, flavor=None, config=None):
    device = config.get("device", "cpu")
    if device.lower() == "gpu":
        docker_image = "pytorch/torchserve:latest-gpu"
    else:
        docker_image = "pytorch/torchserve:latest"

    client = docker.from_env()
    client.containers.run(
        image=docker_image,
        auto_remove=True,
        ports={
            _DEFAULT_TORCHSERVE_LOCAL_INFERENCE_PORT:
            _DEFAULT_TORCHSERVE_LOCAL_INFERENCE_PORT,
            _DEFAULT_TORCHSERVE_LOCAL_MANAGEMENT_PORT:
            _DEFAULT_TORCHSERVE_LOCAL_MANAGEMENT_PORT,
        },
        detach=True,
    )

    for _ in range(10):
        url = "http://localhost:{port}/ping".format(
            port=_DEFAULT_TORCHSERVE_LOCAL_INFERENCE_PORT)
        try:
            resp = requests.get(url)
            if resp.status_code != 200:
                time.sleep(6)
                continue
            else:
                break
        except requests.exceptions.ConnectionError:
            time.sleep(6)
    else:
        raise RuntimeError(
            "Could not start the torchserve docker container. You can "
            "try setting up torchserve locally"
            " and call the ``create`` API with target_uri as given in "
            "the example command below (this will set the host as "
            "localhost and port as 8080)\n\n"
            "    mlflow deployments create -t torchserve -m <modeluri> ...\n\n"
        )
    plugin = get_deploy_client("torchserve")
    plugin.create_deployment(name, model_uri, flavor, config)
Exemple #23
0
def test_create_cli_success_without_version():
    _ = deployments.get_deploy_client(f_target)
    res = runner.invoke(
        cli.create_deployment,
        [
            "-f",
            f_flavor,
            "-m",
            f_model_uri,
            "-t",
            f_target,
            "--name",
            f_deployment_id,
            "-C",
            model_file,
            "-C",
            handler_file,
        ],
    )
    assert "{} deployment {} is created".format(
        f_flavor, f_deployment_name_version) in res.stdout
Exemple #24
0
def predict(parser_args):
    plugin = get_deploy_client(parser_args["target"])
    image = open(parser_args["input_file_path"],
                 "rb")  # open binary file in read mode
    image_read = image.read()
    image_64_encode = base64.b64encode(image_read)
    bytes_array = image_64_encode.decode("utf-8")
    request = {"data": str(bytes_array)}

    inference_type = parser_args["inference_type"]
    if inference_type == "explanation":
        result = plugin.explain(parser_args["deployment_name"],
                                json.dumps(request))
    else:
        result = plugin.predict(parser_args["deployment_name"],
                                json.dumps(request))

    print("Prediction Result {}".format(result))

    output_path = parser_args["output_file_path"]
    if output_path:
        with open(output_path, "w") as fp:
            fp.write(result)
Exemple #25
0
def test_create_cli_version_success():
    version = "VERSION={version}".format(version="1.0")
    _ = deployments.get_deploy_client(f_target)
    res = runner.invoke(
        cli.create_deployment,
        [
            "-f",
            f_flavor,
            "-m",
            f_model_uri,
            "-t",
            f_target,
            "--name",
            f_deployment_id,
            "-C",
            model_file,
            "-C",
            handler_file,
            "-C",
            version,
        ],
    )
    assert "{} deployment {} is created".format(f_flavor, f_deployment_id +
                                                "/1.0") in res.stdout
Exemple #26
0
 def setUp(self):
     self.client_ = get_deploy_client('triton')
def register(parser_args):
    plugin = get_deploy_client(parser_args["target"])
    plugin.register_model(mar_file_path=parser_args["mar_file_name"])
    print("Registered Successfully")
Exemple #28
0
 def setUp(self):
     target_uri = 'openshift'
     self.openshift_client = get_deploy_client(target_uri)
     self.deployment_name = APP_NAME + ''.join(
         random.choices(string.ascii_lowercase, k=6))
def test_update_deployment_success(deployment_name, config):
    client = deployments.get_deploy_client(f_target)
    ret = client.update_deployment(deployment_name, config)
    assert ret["flavor"] is None
def test_wrong_target_name():
    with pytest.raises(MlflowException):
        deployments.get_deploy_client("wrong_target")