def model_interface(model_meta: 'core.Model'): """ Creates an interface from given model with the only `predict` method. Methods signature is determined via metadata associated with given model. :param model_meta: model to create interface for :return: instance of :class:`.Interface` implementation """ rlogger.debug('Creating interface for model %s', model_meta) input_type = model_meta.input_meta output_type = model_meta.output_meta class MLModelInterface(Interface): def __init__(self, model): self.model = model @expose def predict(self, vector: input_type) -> output_type: rlogger.debug('predicting given %s', vector) predict = self.model.predict(vector) rlogger.debug('prediction: %s', predict) return output_type.serialize(predict) return MLModelInterface(model_meta.wrapper)
def create_interface_routes(app, interface: Interface): for method in interface.exposed_methods(): sig = interface.exposed_method_signature(method) rlogger.debug('registering %s with input type %s and output type %s', method, sig.args, sig.output) spec = create_spec(method, sig) executor_function = create_executor_function(interface, method, spec) app.router.add_post('/' + method, executor_function)
def run(self, interface: Interface): app = web.Application() create_interface_routes(app, interface) create_schema_route(app, interface) create_misc_routes(app) setup_swagger(app, swagger_url="/apidocs", ui_version=3) rlogger.debug('Running aiohttp on %s:%s', HTTPServerConfig.host, HTTPServerConfig.port) web.run_app(app, host=HTTPServerConfig.host, port=HTTPServerConfig.port)
def ef(): data = _extract_request_data(interface.exposed_method_args(method)) try: result = interface.execute(method, data) if hasattr(result, 'read'): rlogger.debug('Got response for [%s]: <binary content>', flask.g.ebonite_id) return send_file(result, attachment_filename=getattr(result, 'name', None)) response = {'ok': True, 'data': result} rlogger.debug('Got response for [%s]: %s', flask.g.ebonite_id, response) if VALIDATE: validate(response, specs=spec, definition='response_{}'.format(method)) return jsonify(response) except ExecutionError as e: raise FlaskServerError(*e.args)
def run(self, interface: Interface): """ Starts flask service :param interface: runtime interface to expose via HTTP """ global current_app app = self._create_app() self._prepare_app(app, interface) current_app = app if FlaskConfig.run_flask: rlogger.debug('Running flask on %s:%s', HTTPServerConfig.host, HTTPServerConfig.port) app.run(HTTPServerConfig.host, HTTPServerConfig.port) else: rlogger.debug('Skipping direct flask application run')
def _extract_request_data(method_args): """ :param method_args: :return: """ args = {a.name: a for a in method_args} if request.content_type == 'application/json': request_data = request.json try: request_data = {k: deserialize(v, args[k].type) for k, v in request_data.items()} except KeyError: raise WrongArgumentsError(args.keys(), request_data.keys()) else: request_data = dict(itertools.chain(request.form.items(), request.files.items())) rlogger.debug('Got request[%s] with data %s', flask.g.ebonite_id, request_data) return request_data
def model_interface(model_meta: Model): """ Creates an interface from given model with methods exposed by wrapper Methods signature is determined via metadata associated with given model. :param model_meta: model to create interface for :return: instance of :class:`.Interface` implementation """ rlogger.debug('Creating interface for model %s', model_meta) class MLModelInterface(Interface): def __init__(self, model): self.model = model exposed = {**self.exposed} executors = {**self.executors} for name in self.model.exposed_methods: in_type, out_type = self.model.method_signature(name) exposed[name] = Signature([Field("vector", in_type, False)], Field(None, out_type, False)) executors[name] = self._exec_factory(name, out_type) self.exposed = exposed self.executors = executors def _exec_factory(self, name, out_type): model = self.model def _exec(**kwargs): input_data = kwargs['vector'] rlogger.debug('calling %s given %s', name, input_data) output_data = model.call_method(name, input_data) rlogger.debug('%s returned: %s', name, output_data) return out_type.serialize(output_data) if model_meta.description is not None: _exec.__doc__ = model_meta.description return _exec return MLModelInterface(model_meta.wrapper)
async def ef(request): ebonite_id = str(uuid.uuid4()) rlogger.debug('Headers for [%s]: %s', request.headers) try: if request.content_type == 'application/json': request_data = BaseHTTPServer._deserialize_json( interface, method, await request.json()) else: request_data = { k: v.file for k, v in dict(await request.post()).items() } result = BaseHTTPServer._execute_method(interface, method, request_data, ebonite_id) if isinstance(result, bytes): return web.Response(body=result, content_type='image/png') return web.json_response(result) except MalformedHTTPRequestException as e: return web.json_response(e.response_body(), status=e.code())
def pipeline_interface(pipeline_meta: Pipeline): """ Creates an interface from given pipeline with `run` method Method signature is determined via metadata associated with given pipeline. :param pipeline_meta: pipeline to create interface for :return: instance of :class:`.Interface` implementation """ rlogger.debug('Creating interface for pipeline %s', pipeline_meta) class PipelineInterface(Interface): def __init__(self, pipeline): self.pipeline = pipeline @expose def run(self, data: pipeline_meta.input_data) -> pipeline_meta.output_data: rlogger.debug('running pipeline given %s', data) output_data = self.pipeline.run(data) rlogger.debug('run returned: %s', output_data) return pipeline_meta.output_data.serialize(output_data) return PipelineInterface(pipeline_meta)
def _execute_method(interface: Interface, method: str, request_data, ebonite_id: str): rlogger.debug('Got request for [%s]: %s', ebonite_id, request_data) try: result = interface.execute(method, request_data) except (ExecutionError, SerializationError) as e: raise MalformedHTTPRequestException(e.args[0]) if isinstance(result, bytes): rlogger.debug('Got response for [%s]: <binary content>', ebonite_id) return result rlogger.debug('Got response for [%s]: %s', result) return {'ok': True, 'data': result}
def create_interface_routes(app, interface: Interface): for method in interface.exposed_methods(): sig = interface.exposed_method_signature(method) rlogger.debug('registering %s with input type %s and output type %s', method, sig.args, sig.output) _register_method(app, interface, method, sig)
def create_schema_route(app, interface: Interface): schema = InterfaceDescriptor.from_interface(interface).to_dict() rlogger.debug('Creating /interface.json route with schema: %s', schema) app.add_url_rule('/interface.json', 'schema', lambda: jsonify(schema))
def _exec(**kwargs): input_data = kwargs['vector'] rlogger.debug('calling %s given %s', name, input_data) output_data = model.call_method(name, input_data) rlogger.debug('%s returned: %s', name, output_data) return out_type.serialize(output_data)
def run(self, data: pipeline_meta.input_data) -> pipeline_meta.output_data: rlogger.debug('running pipeline given %s', data) output_data = self.pipeline.run(data) rlogger.debug('run returned: %s', output_data) return pipeline_meta.output_data.serialize(output_data)
def create_schema_route(app, interface: Interface): schema = InterfaceDescriptor.from_interface(interface).to_dict() rlogger.debug('Creating /interface.json route with schema: %s', schema) app.router.add_get('/interface.json', lambda request: web.json_response(schema))
def predict(self, vector: input_type) -> output_type: rlogger.debug('predicting given %s', vector) predict = self.model.predict(vector) rlogger.debug('prediction: %s', predict) return output_type.serialize(predict)