Example #1
0
    def test_root(self):
        model_app = ModelApp([])
        app = model_app.app.test_client()
        resp = app.get('/')
        self.assertEqual(resp.status_code, 200)

        model_app = ModelApp([], expose_docs=True)
        app = model_app.app.test_client()
        resp = app.get('/')
        self.assertEqual(resp.status_code, 302)
Example #2
0
    def setUpClass(cls):
        # DO NOT set app.testing = True here
        # doing so *disables* error handling in the application and instead
        # passes errors on to the test client (in our case, instances of
        # unittest.TestCase).
        # In this class we actually want to test the applications error handling
        # and thus do not set this attribute.
        # See, http://flask.pocoo.org/docs/0.12/api/#flask.Flask.test_client

        prediction_service = PredictionService(name='failing-model',
                                               api_version='B',
                                               model=None,
                                               meta={
                                                   '1': 'one',
                                                   'two': 2
                                               })

        cls.model_app = ModelApp([prediction_service])
        flask_app = cls.model_app.app

        @flask_app.route('/test-error-handling/', methods=['POST'])
        def test_error():
            flask.request.get_json(force=True)
            raise Exception('exceptional testing of exceptions')

        cls.app_test_client = flask_app.test_client()
Example #3
0
 def test_readiness_not_ready1(self):
     model_app = ModelApp([])
     app = model_app.app.test_client()
     resp_alive = app.get('/-/alive')
     resp_ready = app.get('/-/ready')
     expected_data = {
         'request_id': '123',
         'porter_version': __version__,
         'deployed_on': cn.HEALTH_CHECK_VALUES.DEPLOYED_ON,
         'services': {},
         'app_meta': {
             'description': '<div></div><div><p>(porter v0.15.3)</p></div>',
             'expose_docs': False,
             'name': None,
             'version': None
         },
     }
     self.assertEqual(resp_alive.status_code, 200)
     self.assertEqual(resp_ready.status_code, 503)
     alive_response = json.loads(resp_alive.data)
     ready_respnose = json.loads(resp_ready.data)
     self.assertEqual(alive_response, expected_data)
     self.assertEqual(ready_respnose, expected_data)
     # make sure the defined schema matches reality
     sc.health_check.validate(alive_response)  # should not raise exception
     sc.health_check.validate(ready_respnose)  # should not raise exception
Example #4
0
 def test_custom2(self, mock_app, mock_make_openapi_spec):
     model_app = ModelApp([], docs_prefix='/docs', docs_url='/d/', docs_json_url='/d.json', expose_docs=True)
     expected_calls = [
         mock.call('/docs/d/'),
         mock.call('/docs/assets/swagger-ui/<path:filename>'),
         mock.call('/docs/d.json'),
     ]
     model_app.app.route.assert_has_calls(expected_calls, any_order=True)
Example #5
0
 def test_defaults(self, mock_app, mock_make_openapi_spec):
     model_app = ModelApp([], expose_docs=True)
     expected_calls = [
         mock.call('/docs/'),
         mock.call('/assets/swagger-ui/<path:filename>'),
         mock.call('/_docs.json'),
     ]
     model_app.app.route.assert_has_calls(expected_calls, any_order=True)
Example #6
0
    def test_readiness_not_ready2(self, mock_init):
        mock_init.return_value = None

        class C(PredictionService):
            status = 'NOTREADY'
            request_schemas = {}
            response_schemas = {}

        svc = C()
        svc.name = 'foo'
        svc.api_version = 'bar'
        svc.meta = {'k': 1}
        svc.id = 'foo:bar'
        svc.endpoint = '/foo/bar/'
        svc.route_kwargs = {}

        model_app = ModelApp([svc])
        app = model_app.app.test_client()

        resp_alive = app.get('/-/alive')
        resp_ready = app.get('/-/ready')
        expected_data = {
            'request_id': '123',
            'porter_version': __version__,
            'deployed_on': cn.HEALTH_CHECK_VALUES.DEPLOYED_ON,
            'services': {
                'foo:bar': {
                    'status': 'NOTREADY',
                    'endpoint': '/foo/bar/',
                    'model_context': {
                        'model_name': 'foo',
                        'api_version': 'bar',
                        'model_meta': {
                            'k': 1
                        }
                    }
                }
            },
            'app_meta': {
                'description': '<div></div><div><p>(porter v0.15.3)</p></div>',
                'expose_docs': False,
                'name': None,
                'version': None
            },
        }
        self.assertEqual(resp_alive.status_code, 200)
        self.assertEqual(resp_ready.status_code, 503)
        alive_response = json.loads(resp_alive.data)
        ready_respnose = json.loads(resp_ready.data)
        self.assertEqual(alive_response, expected_data)
        self.assertEqual(ready_respnose, expected_data)
        # make sure the defined schema matches reality
        sc.health_check.validate(alive_response)  # should not raise exception
        sc.health_check.validate(ready_respnose)  # should not raise exception
Example #7
0
 def test_constructor_fail_on_duplicate_services(self, mock_app):
     class service1:
         id = 'service1'
         endpoint = '/an/endpoint'
         route_kwargs = {}
         request_schemas = {}
         response_schemas = {}
         name = 'service1'
     class service2:
         id = 'service1'
         endpoint = '/foobar'
         route_kwargs = {}
         request_schemas = {}
         response_schemas = {}
         name = 'service2'
     with self.assertRaisesRegex(ValueError, 'service has already been added'):
         model_app = ModelApp([service1, service2])
Example #8
0
    def test_docs_paths(self):
        class SC(BaseService):
            def serve(self):
                pass

            def status(self):
                pass

            action = 'endpoint'

        service = SC(name='the', api_version='v1')
        model_app = ModelApp([service], expose_docs=True)
        docs_json = model_app.docs_json
        expected_keys = ['openapi', 'info', 'paths', 'components']
        self.assertEqual(set(docs_json.keys()), set(expected_keys))
        expected_paths = ['/the/v1/endpoint', '/-/alive', '/-/ready']
        self.assertEqual(set(docs_json['paths'].keys()), set(expected_paths))
Example #9
0
    def test_constructor_schema_handling(self, mock_make_openapi_spec, mock_app):
        class service1:
            id = 'service1'
            endpoint = '/an/endpoint'
            route_kwargs = {'foo': 1, 'bar': 'baz'}
            request_schemas = {'GET': object()}
            response_schemas = {'POST': object()}
            name = 'service1'
        class service2:
            id = 'service2'
            endpoint = '/foobar'
            route_kwargs = {'methods': ['GET']}
            request_schemas = {'GET': object()}
            response_schemas = None
            name = 'service2'
        class service3:
            id = 'service3'
            endpoint = '/supa/dupa'
            route_kwargs = {'methods': ['GET'], 'strict_slashes': True}
            request_schemas = {'GET': object(), 'POST': object()}
            response_schemas = {'GET': object()}
            name = 'service3'

        # add the services and validate they were routed with the correct
        # parameters.
        model_app = ModelApp([service1, service2, service3],
                             expose_docs=True, docs_url='/custom/docs/url/')

        # verify that the schemas were correctly registered
        expected_request_schemas = {
            service1.endpoint: service1.request_schemas,
            service2.endpoint: service2.request_schemas,
            service3.endpoint: service3.request_schemas
        }
        self.assertEqual(model_app._request_schemas, expected_request_schemas)

        health_check_responses = [schemas.ResponseSchema(schemas.health_check, 200)]
        expected_response_schemas = {
            service1.endpoint: service1.response_schemas,
            # service 2 did not have a response schema
            service3.endpoint: service3.response_schemas,
            '/-/alive': ModelApp._health_check_response_schemas,
            '/-/ready': ModelApp._health_check_response_schemas
        }
        self.assertEqual(model_app._response_schemas, expected_response_schemas)
Example #10
0
    def test_constructor_routing(self, mock__route_endpoint, mock_make_openapi_spec):
        class service1:
            id = 'service1'
            endpoint = '/an/endpoint'
            route_kwargs = {'foo': 1, 'bar': 'baz'}
            request_schemas = {'GET': object()}
            response_schemas = {'POST': object()}
            name = 'service1'
        class service2:
            id = 'service2'
            endpoint = '/foobar'
            route_kwargs = {'methods': ['GET']}
            request_schemas = {'GET': object()}
            response_schemas = None
            name = 'service2'
        class service3:
            id = 'service3'
            endpoint = '/supa/dupa'
            route_kwargs = {'methods': ['GET'], 'strict_slashes': True}
            request_schemas = {'GET': object(), 'POST': object()}
            response_schemas = {'GET': object()}
            name = 'service3'

        # add the services and validate they were routed with the correct
        # parameters.
        model_app = ModelApp([service1, service2, service3],
                             expose_docs=True, docs_url='/custom/docs/url/')

        expected_calls = [
            mock.call(service1.endpoint, service1, service1.route_kwargs,
                      request_schemas=service1.request_schemas,
                      response_schemas=service1.response_schemas,
                      additional_params={'GET': {'tags': [service1.name]},
                                         'POST': {'tags': [service1.name]}}),
            mock.call(service2.endpoint, service2, service2.route_kwargs,
                      request_schemas=service2.request_schemas,
                      response_schemas=service2.response_schemas,
                      additional_params={'GET': {'tags': [service2.name]}}),
            mock.call(service3.endpoint, service3, service3.route_kwargs,
                      request_schemas=service3.request_schemas,
                      response_schemas=service3.response_schemas,
                      additional_params={'GET': {'tags': [service3.name]},
                                         'POST': {'tags': [service3.name]}})
        ]
        mock__route_endpoint.assert_has_calls(expected_calls, any_order=True)
Example #11
0
 def test_can_serve_swagger_files(self):
     model_app = ModelApp([], expose_docs=True)
     app = model_app.app.test_client()
     files = [
         'favicon-16x16.png',
         'favicon-32x32.png',
         'index.html',
         'oauth2-redirect.html',
         'swagger-ui-bundle.js',
         'swagger-ui-bundle.js.map',
         'swagger-ui-standalone-preset.js',
         'swagger-ui-standalone-preset.js.map',
         'swagger-ui.css',
         'swagger-ui.css.map',
         'swagger-ui.js',
         'swagger-ui.js.map',
         'swagger_template.html',
     ]
     for filename in files:
         resp = app.get(f'/assets/swagger-ui/{filename}')
         self.assertEqual(resp.status_code, 200)
Example #12
0
from porter.datascience import BaseModel
from porter.services import ModelApp, PredictionService
from porter.utils import JSONLogFormatter


class Model(BaseModel):
    def predict(self, X):
        return (X['foo'] % 3) * X['bar']


prediction_svc = PredictionService(model=Model(),
                                   name='my-model',
                                   api_version='v1',
                                   batch_prediction=True,
                                   log_api_calls=True)

app = ModelApp([prediction_svc])

if __name__ == '__main__':
    import logging
    stream_handler = logging.StreamHandler()
    formatter = JSONLogFormatter('asctime', 'levelname', 'module', 'name',
                                 'message', 'request_id', 'request_data',
                                 'response_data', 'service_class', 'event')
    stream_handler.setFormatter(formatter)
    logger = logging.getLogger('porter')
    logger.setLevel('INFO')
    logger.addHandler(stream_handler)
    app.run()
Example #13
0
    api_version='v1',  # The version of the model. Returned
    # to client in the prediction response.
    # Required.
    #
    preprocessor=preprocessor,  # preprocessor.process() is
    # called on the POST request data
    # before predicting. Optional.
    #
    postprocessor=Postprocessor(),  # postprocessor.process() is
    # called on the model's predictions before
    # returning to user. Optional.
    #
    feature_schema=feature_schema,  # The input schema is used to validate
    # the payload of the POST request.
    # Optional.
    validate_request_data=True,  # Whether to validate the request data.
    #
    batch_prediction=True  # Whether the API will accept an array of
    # JSON objects to predict on or a single
    # JSON object only.
)

# The model app is simply a wrapper around the `flask.Flask` object.
model_app = ModelApp([prediction_service])

if __name__ == '__main__':
    # you can run this with `gunicorn app:model_app`, or
    # simply execute this script with Python and send POST requests
    # to localhost:8000/supa-dupa-model/prediction/
    model_app.run(port=8000)
Example #14
0
    def setUpClass(cls):
        # define objects for model 1
        class Preprocessor1(BasePreProcessor):
            def process(self, X):
                X = X.copy()  # silence SettingWithCopyWarning
                X['feature2'] = X.feature2.astype(str)
                return X

        class Model1(BaseModel):
            feature2_map = {str(x + 1): x for x in range(5)}

            def predict(self, X):
                return X['feature1'] * X.feature2.map(self.feature2_map)

        class Postprocessor1(BasePostProcessor):
            def process(self, X_input, X_preprocessed, predictions):
                return predictions * -1

        feature_schema1 = sc.Object(properties={
            'feature1': sc.Number(),
            'feature2': sc.Number(),
        })

        # define objects for model 2
        class Preprocessor2(BasePreProcessor):
            def process(self, X):
                X['feature3'] = range(len(X))
                return X

        class Model2(BaseModel):
            def predict(self, X):
                return X['feature1'] + X['feature3']

        feature_schema2 = sc.Object(properties={'feature1': sc.Number()})

        def user_check(X):
            if (X.feature1 == 0).any():
                raise exc.UnprocessableEntity

        # define objects for model 3
        class Model3(BaseModel):
            def predict(self, X):
                return X['feature1'] * -1

        feature_schema3 = sc.Object(properties={'feature1': sc.Number()})
        wrong_prediction_schema3 = sc.Number(additional_params=dict(minimum=0))

        cls.prediction_service_error = E = Exception(
            'this mock service failed during prediction')

        class ModelFailing(BaseModel):
            def predict(self, X):
                raise E

        # define configs and add services to app
        prediction_service1 = PredictionService(model=Model1(),
                                                name='a-model',
                                                api_version='v0',
                                                action='predict',
                                                preprocessor=Preprocessor1(),
                                                postprocessor=Postprocessor1(),
                                                feature_schema=feature_schema1,
                                                validate_request_data=True,
                                                batch_prediction=True)
        prediction_service2 = PredictionService(model=Model2(),
                                                name='anotherModel',
                                                api_version='v1',
                                                namespace='n/s/',
                                                preprocessor=Preprocessor2(),
                                                postprocessor=None,
                                                feature_schema=feature_schema2,
                                                validate_request_data=True,
                                                batch_prediction=True,
                                                additional_checks=user_check)
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            prediction_service3 = PredictionService(
                model=Model3(),
                name='model-3',
                api_version='v0.0-alpha',
                preprocessor=None,
                postprocessor=None,
                feature_schema=feature_schema3,
                validate_request_data=True,
                validate_response_data=True,
                batch_prediction=False,
                meta={
                    'algorithm': 'randomforest',
                    'lasttrained': 1
                })
            prediction_service4 = PredictionService(
                model=Model3(),
                name='model-4',
                api_version='v0.0-alpha',
                preprocessor=None,
                postprocessor=None,
                feature_schema=feature_schema3,
                validate_request_data=True,
                validate_response_data=True,
                batch_prediction=False,
                meta={
                    'algorithm': 'randomforest',
                    'lasttrained': 1
                })
            prediction_service5 = PredictionService(
                model=Model3(),
                name='model-5',
                api_version='v0.0-alpha',
                preprocessor=None,
                postprocessor=None,
                feature_schema=feature_schema3,
                prediction_schema=wrong_prediction_schema3,
                validate_request_data=True,
                validate_response_data=True,
                batch_prediction=False,
                meta={
                    'algorithm': 'randomforest',
                    'lasttrained': 1
                })
        prediction_service_failing = PredictionService(
            model=ModelFailing(),
            name='failing-model',
            api_version='v1',
            action='fail',
        )
        cls.model_app = ModelApp([
            prediction_service1,
            prediction_service2,
            prediction_service3,
            prediction_service4,
            prediction_service5,
            prediction_service_failing,
        ])
        cls.app = cls.model_app.app.test_client()
Example #15
0
    def test_readiness_ready_ready2(self, mock_init):
        mock_init.return_value = None
        svc1 = PredictionService()
        svc1.name = 'model1'
        svc1.api_version = '1.0.0'
        svc1.id = 'model1:1.0.0'
        svc1.endpoint = '/model1/1.0.0/prediction'
        svc1.meta = {'foo': 1, 'bar': 2}
        svc1.response_schemas = {}
        svc1.request_schemas = {}
        svc2 = PredictionService()
        svc2.name = 'model2'
        svc2.api_version = 'v0'
        svc2.id = 'model2:v0'
        svc2.endpoint = '/model2/v0/prediction'
        svc2.meta = {'foo': 1}
        svc2.response_schemas = {}
        svc2.request_schemas = {}

        model_app = ModelApp([svc1, svc2])
        app = model_app.app.test_client()

        resp_alive = app.get('/-/alive')
        resp_ready = app.get('/-/ready')
        expected_data = {
            'request_id': '123',
            'porter_version': __version__,
            'deployed_on': cn.HEALTH_CHECK_VALUES.DEPLOYED_ON,
            'app_meta': {
                'description': '<div></div><div><p>(porter v0.15.3)</p></div>',
                'expose_docs': False,
                'name': None,
                'version': None
            },
            'services': {
                'model1:1.0.0': {
                    'status': 'READY',
                    'endpoint': '/model1/1.0.0/prediction',
                    'model_context': {
                        'model_name': 'model1',
                        'api_version': '1.0.0',
                        'model_meta': {
                            'foo': 1,
                            'bar': 2
                        },
                    }
                },
                'model2:v0': {
                    'status': 'READY',
                    'endpoint': '/model2/v0/prediction',
                    'model_context': {
                        'model_name': 'model2',
                        'api_version': 'v0',
                        'model_meta': {
                            'foo': 1
                        },
                    }
                }
            }
        }
        self.assertEqual(resp_alive.status_code, 200)
        self.assertEqual(resp_ready.status_code, 200)
        alive_response = json.loads(resp_alive.data)
        ready_respnose = json.loads(resp_ready.data)
        self.assertEqual(alive_response, expected_data)
        self.assertEqual(ready_respnose, expected_data)
        # make sure the defined schema matches reality
        sc.health_check.validate(alive_response)  # should not raise exception
        sc.health_check.validate(ready_respnose)  # should not raise exception
Example #16
0
        raise PorterException('input cannot include zeros', code=422)


input_schema = sc.Array(item_type=sc.Number(), reference_name='InputSchema')
output_schema = sc.Number(reference_name='OutputSchema')
service_kw = dict(input_schema=input_schema,
                  output_schema=output_schema,
                  validate_request_data=True)

sum_service = FunctionService('sum',
                              sum,
                              name='math',
                              api_version='v1',
                              **service_kw)
prod_service = FunctionService('prod',
                               prod,
                               name='math',
                               api_version='v1',
                               additional_checks=check_for_zeros,
                               **service_kw)

app = ModelApp(
    [sum_service, prod_service],
    name='FunctionService Example',
    description=
    'Expose arbitrary callable functions by subclassing BaseService.',
    expose_docs=True)

if __name__ == '__main__':
    app.run()
Example #17
0
feature_schema = Object(
    'Inputs to the ratings model',
    properties=dict(
        user_id=Integer('The user ID.'),
        title_id=Integer('The title ID.'),
        is_tv=Boolean('Whether the content is a TV show.'),
        genre=String('The genre.',
                     additional_params={'enum': ['comedy', 'action', 'drama']}),
        average_rating=Number('The title\'s average rating.',
                              additional_params={'minimum': 0, 'maximum': 10}),
    ),
    reference_name='RatingsModelFeatures'
)

# build the prediction service
prediction_service = PredictionService(
    model=my_model,
    name='my-model',
    api_version='v1',
    feature_schema=feature_schema,
    validate_request_data=True)

app = ModelApp(
    [prediction_service],
    name='Example Model',
    description='Minimal example of a model with input validation and documentation.',
    expose_docs=True)

if __name__ == '__main__':
    app.run()
Example #18
0
 def test_liveness_live(self):
     model_app = ModelApp([])
     app = model_app.app.test_client()
     resp = app.get('/-/alive')
     self.assertEqual(resp.status_code, 200)
import urllib.request

from porter.services import ModelApp, PredictionService

service1 = PredictionService(model=None, name='a-model', api_version='0.0.0')

service2 = PredictionService(model=None,
                             name='yet-another-model',
                             api_version='1.0.0')

service3 = PredictionService(model=None,
                             name='yet-another-yet-another-model',
                             api_version='1.0.0-alpha',
                             meta={'arbitrary details': 'about the model'})

model_app = ModelApp([service1, service2, service3])


def get(url):
    with urllib.request.urlopen(url) as f:
        return f.read()


def run_app(model_app):
    t = threading.Thread(target=model_app.run, daemon=True)
    t.start()


class Shhh:
    """Silence flask logging."""
    def __init__(self):
Example #20
0
By default you can find the OpenAPI documentation at the endpoint `/docs/` but
this too can be customized.
"""

# define the services we want to serve from
services = [
    instance_prediction_service,
    batch_prediction_service,
    probabilistic_service,
    spark_interface_service,
    custom_service]


model_app = ModelApp(
    services,
    name='Example Model',
    description='An unhelpful description of what this application.',
    expose_docs=True)


"""
These are just some convenience functions to test the example.
"""


class Shhh:
    """Silence flask logging."""

    def __init__(self):
        self.devnull = open(os.devnull, 'w')
        self.stdout = sys.stdout