示例#1
0
 def from_http_request(self, req: HTTPRequest) -> InferenceTask[BinaryIO]:
     if req.parsed_headers.content_type == 'multipart/form-data':
         _, _, files = HTTPRequest.parse_form_data(req)
         if len(files) != 1:
             task = InferenceTask(data=None)
             task.discard(
                 http_status=400,
                 err_msg=
                 f"BentoML#{self.__class__.__name__} requires one and at"
                 " least one file at a time, if you just upgraded from"
                 " bentoml 0.7, you may need to use MultiFileAdapter instead",
             )
         else:
             input_file = next(iter(files.values()))
             task = InferenceTask(
                 context=InferenceContext(http_headers=req.parsed_headers),
                 data=input_file,
             )
     elif req.body:
         task = InferenceTask(
             context=InferenceContext(http_headers=req.parsed_headers),
             data=io.BytesIO(req.body),
         )
     else:
         task = InferenceTask(data=None)
         task.discard(
             http_status=400,
             err_msg=
             f'BentoML#{self.__class__.__name__} unexpected HTTP request'
             ' format',
         )
     return task
示例#2
0
 def from_aws_lambda_event(self, event):
     if event["headers"].get("Content-Type", "").startswith("images/"):
         img_bytes = base64.b64decode(event["body"])
         _, ext = event["headers"]["Content-Type"].split('/')
         f = FileLike(bytes_=img_bytes, name=f"default.{ext}")
         task = InferenceTask(data=(f, ))
     else:
         task = InferenceTask(data=None)
         task.discard(
             http_status=400,
             err_msg="BentoML currently doesn't support Content-Type: "
             "{content_type} for AWS Lambda".format(
                 content_type=event["headers"]["Content-Type"]),
         )
     return task
示例#3
0
 def from_http_request(self, req: HTTPRequest) -> MultiImgTask:
     if len(self.input_names) == 1:
         # broad parsing while single input
         if req.parsed_headers.content_type == 'multipart/form-data':
             _, _, files = HTTPRequest.parse_form_data(req)
             if not any(files):
                 task = InferenceTask(data=None)
                 task.discard(
                     http_status=400,
                     err_msg=
                     f"BentoML#{self.__class__.__name__} requires inputs"
                     f"fields {self.input_names}",
                 )
             else:
                 f = next(iter(files.values()))
                 task = InferenceTask(
                     context=InferenceContext(
                         http_headers=req.parsed_headers),
                     data=(f, ),
                 )
         else:
             # for images/*
             task = InferenceTask(
                 context=InferenceContext(http_headers=req.parsed_headers),
                 data=(io.BytesIO(req.body), ),
             )
     elif req.parsed_headers.content_type == 'multipart/form-data':
         _, _, files = HTTPRequest.parse_form_data(req)
         files = tuple(files.get(k) for k in self.input_names)
         if not any(files):
             task = InferenceTask(data=None)
             task.discard(
                 http_status=400,
                 err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
                 f"fields {self.input_names}",
             )
         elif not all(files) and not self.allow_none:
             task = InferenceTask(data=None)
             task.discard(
                 http_status=400,
                 err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
                 f"fields {self.input_names}",
             )
         else:
             task = InferenceTask(
                 context=InferenceContext(http_headers=req.parsed_headers),
                 data=files,
             )
     else:
         task = InferenceTask(data=None)
         task.discard(
             http_status=400,
             err_msg=
             f"BentoML#{self.__class__.__name__} with multiple inputs "
             "accepts requests with Content-Type: multipart/form-data only",
         )
     return task
示例#4
0
 def from_http_request(self, req: HTTPRequest) -> MultiFileTask:
     if req.headers.content_type != 'multipart/form-data':
         task = InferenceTask(data=None)
         task.discard(
             http_status=400,
             err_msg=
             f"BentoML#{self.__class__.__name__} only accepts requests "
             "with Content-Type: multipart/form-data",
         )
     else:
         _, _, files = HTTPRequest.parse_form_data(req)
         files = tuple(files.get(k) for k in self.input_names)
         if not any(files):
             task = InferenceTask(data=None)
             task.discard(
                 http_status=400,
                 err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
                 f"fields {self.input_names}",
             )
         elif not all(files) and not self.allow_none:
             task = InferenceTask(data=None)
             task.discard(
                 http_status=400,
                 err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
                 f"fields {self.input_names}",
             )
         else:
             task = InferenceTask(
                 http_headers=req.headers,
                 data=files,
             )
     return task
示例#5
0
 def from_http_request(self, req: HTTPRequest) -> InferenceTask[bytes]:
     if req.parsed_headers.content_encoding in {"gzip", "x-gzip"}:
         # https://tools.ietf.org/html/rfc7230#section-4.2.3
         try:
             return InferenceTask(
                 http_headers=req.parsed_headers,
                 data=gzip.decompress(req.body),
             )
         except OSError:
             task = InferenceTask(data=None)
             task.discard(http_status=400,
                          err_msg="Gzip decompression error")
             return task
     elif req.parsed_headers.content_encoding in ["", "identity"]:
         return InferenceTask(
             http_headers=req.parsed_headers,
             data=req.body,
         )
     else:
         task = InferenceTask(data=None)
         task.discard(http_status=415, err_msg="Unsupported Media Type")
         return task
示例#6
0
 def predict_strict_json(self, input_data, task: InferenceTask = None):
     if task.http_headers.content_type != "application/json":
         task.discard(http_status=400, err_msg="application/json only")
         return
     result = self.artifacts.model.predict_json([input_data])[0]
     return result