Exemplo n.º 1
0
    def predict(self, deployment_name, df):
        """
        Predict on the specified deployment using the provided dataframe.

        Compute predictions on the pandas DataFrame ``df`` using the specified deployment.
        Note that the input/output types of this method matches that of `mlflow pyfunc predict`
        (we accept a pandas DataFrame as input and return either a pandas DataFrame,
        pandas Series, or numpy array as output).

        :param deployment_name: Name of deployment to predict against
        :param df: Pandas DataFrame to use for inference
        :return: A pandas DataFrame, pandas Series, or numpy array
        """
        try:
            service = Webservice(self.workspace, deployment_name)
        except Exception as e:
            raise MlflowException(
                'Failure retrieving deployment to predict against') from e

        # Take in DF, parse to json using split orient
        input_data = _get_jsonable_obj(df, pandas_orient='split')

        if not service.scoring_uri:
            raise MlflowException(
                'Error attempting to call webservice, scoring_uri unavailable. '
                'This could be due to a failed deployment, or the service is not ready yet.\n'
                'Current State: {}\n'
                'Errors: {}'.format(service.state, service.error))

        # Pass split orient json to webservice
        # Take records orient json from webservice
        resp = ClientBase._execute_func(service._webservice_session.post,
                                        service.scoring_uri,
                                        data=json.dumps(
                                            {'input_data': input_data}))

        if resp.status_code == 401:
            if service.auth_enabled:
                service_keys = service.get_keys()
                service._session.headers.update(
                    {'Authorization': 'Bearer ' + service_keys[0]})
            elif service.token_auth_enabled:
                service_token, refresh_token_time = service.get_access_token()
                service._refresh_token_time = refresh_token_time
                service._session.headers.update(
                    {'Authorization': 'Bearer ' + service_token})
            resp = ClientBase._execute_func(service._webservice_session.post,
                                            service.scoring_uri,
                                            data=input_data)

        if resp.status_code == 200:
            # Parse records orient json to df
            return parse_json_input(json.dumps(resp.json()), orient='records')
        else:
            raise MlflowException('Failure during prediction:\n'
                                  'Response Code: {}\n'
                                  'Headers: {}\n'
                                  'Content: {}'.format(resp.status_code,
                                                       resp.headers,
                                                       resp.content))
Exemplo n.º 2
0
def test_deployed_model_service():
    service = Webservice(ws, deployment_name)
    assert service is not None

    key1, key2 = service.get_keys()
    uri = service.scoring_uri

    assert key1 is not None
    assert uri.startswith('http')

    headers = {
        'Content-Type': 'application/json',
        'Authorization': f'Bearer {key1}'
    }
    response = requests.post(uri, test_sample, headers=headers)
    assert response.status_code is 200
    assert abs(1 - sum(response.json()['predict_proba'][0])) < 0.01
from azureml.core import Workspace
from azureml.core import Webservice
ws = Workspace.from_config()

service = Webservice(ws, 'lpr')
scoring_uri = service.scoring_ui
primary, secondary = service.get_keys()
print(primary)
print(service.get_logs())