def pack(self, data): # pylint:disable=arguments-differ try: import tensorflow as tf except ImportError: raise MissingDependencyException( "Tensorflow package is required to use KerasModelArtifact. BentoML " "currently only support using Keras with Tensorflow backend.") if isinstance(data, dict): model = data['model'] custom_objects = (data['custom_objects'] if 'custom_objects' in data else self.custom_objects) else: model = data custom_objects = self.custom_objects if not isinstance(model, tf.keras.models.Model): error_msg = ( "KerasModelArtifact#pack expects model argument to be type: " "keras.engine.network.Network, tf.keras.models.Model, or their " "aliases, instead got type: {}".format(type(model))) try: import keras if not isinstance(model, keras.engine.network.Network): raise InvalidArgument(error_msg) else: self._keras_module_name = keras.__name__ except ImportError: raise InvalidArgument(error_msg) self.bind_keras_backend_session() model._make_predict_function() return _KerasModelArtifactWrapper(self, model, custom_objects)
def read_dataframes_from_json_n_csv( datas: Iterable["pd.DataFrame"], content_types: Iterable[str], orient: str = None, ) -> ("pd.DataFrame", Iterable[slice]): ''' load detaframes from multiple raw datas in json or csv fromat, efficiently Background: Each calling of pandas.read_csv or pandas.read_json cost about 100ms, no matter how many lines it contains. Concat jsons/csvs before read_json/read_csv to improve performance. ''' if not pd: raise MissingDependencyException('pandas required') try: rows_csv_with_id = [ (tds if isinstance(tds, str) else ','.join(map(_csv_quote, tds)), table_id) for tds, table_id in _dataframe_csv_from_input( datas, content_types, itertools.repeat(orient)) ] except (TypeError, ValueError) as e: raise BadInput('Invalid input format for DataframeInput') from e str_csv = [r for r, _ in rows_csv_with_id] df_str_csv = '\n'.join(str_csv) df_merged = pd.read_csv(StringIO(df_str_csv), index_col=None) dfs_id = [i for _, i in rows_csv_with_id][1:] slices = _gen_slice(dfs_id) return df_merged, slices
def _load_tf_saved_model(path): try: import tensorflow as tf from tensorflow.python.training.tracking.tracking import AutoTrackable TF2 = tf.__version__.startswith('2') except ImportError: raise MissingDependencyException( "Tensorflow package is required to use TfSavedModelArtifact") if TF2: return tf.saved_model.load(path) else: loaded = tf.compat.v2.saved_model.load(path) if isinstance(loaded, AutoTrackable) and not hasattr(loaded, "__call__"): logger.warning('''Importing SavedModels from TensorFlow 1.x. `outputs = imported(inputs)` is not supported in bento service due to tensorflow API. Recommended usage: ```python from tensorflow.python.saved_model import signature_constants imported = tf.saved_model.load(path_to_v1_saved_model) wrapped_function = imported.signatures[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] wrapped_function(tf.ones([])) ``` See https://www.tensorflow.org/api_docs/python/tf/saved_model/load for details. ''') return loaded
def save(self, dst): try: import tensorflow as tf TF2 = tf.__version__.startswith('2') except ImportError: raise MissingDependencyException( "Tensorflow package is required to use TfSavedModelArtifact.") if TF2: return tf.saved_model.save( self.obj, self.spec._saved_model_path(dst), signatures=self.signatures, options=self.options, ) else: if self.options: logger.warning( "Parameter 'options: %s' is ignored when using Tensorflow " "version 1", str(self.options), ) return tf.saved_model.save(self.obj, self.spec._saved_model_path(dst), signatures=self.signatures)
def __init__( self, name, custom_objects=None, model_extension=".h5", store_as_json_and_weights=False, ): super(KerasModelArtifact, self).__init__(name) try: import tensorflow as tf except ImportError: raise MissingDependencyException( "Tensorflow package is required to use KerasModelArtifact. BentoML " "currently only support using Keras with Tensorflow backend." ) self._model_extension = model_extension self._store_as_json_and_weights = store_as_json_and_weights # By default assume using tf.keras module self._keras_module_name = tf.keras.__name__ self.custom_objects = custom_objects self.graph = None self.sess = None
def pack( self, easyocr_model, metadata=None, recog_network="english_g2", lang_list=None, detect_model="craft_mlt_25k", gpu=False, ): # pylint:disable=arguments-differ try: import easyocr # noqa # pylint: disable=unused-import assert easyocr.__version__ >= "1.3" except ImportError: raise MissingDependencyException( "easyocr>=1.3 package is required to use EasyOCRArtifact") if not (type(easyocr_model) is easyocr.easyocr.Reader): raise InvalidArgument( "'easyocr_model' must be of type easyocr.easyocr.Reader") if not lang_list: lang_list = ['en'] self._model = easyocr_model self._detect_model = detect_model self._recog_network = recog_network self._gpu = gpu self._model_params = { "lang_list": lang_list, "recog_network": recog_network, "gpu": gpu, } return self
def _get_onnx_inference_session(self): if self.spec.backend == "onnxruntime": try: import onnxruntime except ImportError: raise MissingDependencyException( '"onnxruntime" package is required for inferencing with onnx ' 'runtime as backend' ) if self._model_proto: logger.info( "Initializing onnxruntime InferenceSession with onnx.ModelProto " "instance" ) return onnxruntime.InferenceSession( self._model_proto.SerializeToString() ) elif self._onnx_model_path: logger.info( "Initializing onnxruntime InferenceSession from onnx file:" f"'{self._onnx_model_path}'" ) return onnxruntime.InferenceSession(self._onnx_model_path) else: raise BentoMLException("OnnxModelArtifact in bad state") else: raise BentoMLException( f'"{self.spec.backend}" runtime is currently not supported for ' f'OnnxModelArtifact' )
def __init__( self, typ: str = "frame", orient: Optional[str] = None, columns: Sequence[str] = None, dtype: Mapping[str, object] = None, input_dtypes: Mapping[str, object] = None, **base_kwargs, ): super().__init__(**base_kwargs) dtype = dtype if dtype is not None else input_dtypes # Verify pandas imported properly and retry import if it has failed initially if pandas is None: raise MissingDependencyException( "Missing required dependency 'pandas' for DataframeInput, install " "with `pip install pandas`") if typ != "frame": raise NotImplementedError() assert not orient or orient in PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS, ( f"Invalid option 'orient'='{orient}', valid options are " f"{PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS}") assert (columns is None or dtype is None or set(dtype) == set(columns)), "dtype must match columns" self.typ = typ self.orient = orient self.columns = columns if isinstance(dtype, (list, tuple)): self.dtype = dict( (index, dtype) for index, dtype in enumerate(dtype)) else: self.dtype = dtype
def _get_torch_script_model(model_path): try: from torch import jit except ImportError: raise MissingDependencyException( '"torch" package is required for inference with ' 'PytorchLightningModelArtifact') return jit.load(model_path)
def _get_onnxmlir_inference_session(self): try: # this has to be able to find the arch and OS specific PyRuntime .so file from PyRuntime import ExecutionSession except ImportError: raise MissingDependencyException( "PyRuntime package library must be in python path") return ExecutionSession(self._model_so_path, "run_main_graph")
def load(self, path): try: import fasttext # noqa # pylint: disable=unused-import except ImportError: raise MissingDependencyException( "fasttext package is required to use FasttextModelArtifact") model = fasttext.load_model(self._model_file_path(path)) return self.pack(model)
def _import_imageio_imread(): try: from imageio import imread except ImportError: raise MissingDependencyException( "imageio package is required to use LegacyImageInput") return imread
def _import_imageio_imread(): try: from imageio import imread except ImportError: raise MissingDependencyException( "imageio package is required to use FastaiImageHandler") return imread
def _import_fastai_vision(): try: from fastai import vision except ImportError: raise MissingDependencyException( "fastai.vision package is required to use FastaiImageHandler") return vision
def pack(self, fasttext_model): # pylint:disable=arguments-differ try: import fasttext # noqa # pylint: disable=unused-import except ImportError: raise MissingDependencyException( "fasttext package is required to use FasttextModelArtifact") self._model = fasttext_model return self
def __init__(self, spec, model): super(_FasttextModelArtifactWrapper, self).__init__(spec) try: import fasttext except ImportError: raise MissingDependencyException( "fasttext package is required to use FasttextModelArtifact") self._model = model
def pack(self, model, metadata: dict = None): # pylint: disable=unused-argument try: import mxnet # noqa # pylint: disable=unused-import except ImportError: raise MissingDependencyException( "mxnet package is required to use GluonModelArtifact") self._model = model return self
def pack( self, obj, signatures=None, options=None ): # pylint:disable=arguments-differ """ Pack the TensorFlow Trackable object `obj` to [SavedModel format]. Args: obj: A trackable object to export. signatures: Optional, either a `tf.function` with an input signature specified or the result of `f.get_concrete_function` on a `@tf.function`-decorated function `f`, in which case `f` will be used to generate a signature for the SavedModel under the default serving signature key. `signatures` may also be a dictionary, in which case it maps from signature keys to either `tf.function` instances with input signatures or concrete functions. The keys of such a dictionary may be arbitrary strings, but will typically be from the `tf.saved_model.signature_constants` module. options: Optional, `tf.saved_model.SaveOptions` object that specifies options for saving. Raises: ValueError: If `obj` is not trackable. """ if not _is_path_like(obj): if self._tmpdir is not None: self._tmpdir.cleanup() else: self._tmpdir = tempfile.TemporaryDirectory() try: import tensorflow as tf TF2 = tf.__version__.startswith('2') except ImportError: raise MissingDependencyException( "Tensorflow package is required to use TfSavedModelArtifact." ) if TF2: tf.saved_model.save( obj, self._tmpdir.name, signatures=signatures, options=options, ) else: if self.options: logger.warning( "Parameter 'options: %s' is ignored when using Tensorflow " "version 1", str(options), ) tf.saved_model.save( obj, self._tmpdir.name, signatures=signatures, ) self._path = self._tmpdir.name else: self._path = obj return self
def _assert_azure_cli_available(): try: _call_az_cli(command=['az', 'account', 'show'], message='show Azure account') except FileNotFoundError: raise MissingDependencyException( 'azure cli is required for this deployment. Please visit ' 'https://docs.microsoft.com/en-us/cli/azure/install-azure-cli ' 'for instructions')
def pack(self, model, metadata=None, input_model_yaml=None): # pylint:disable=arguments-differ try: import detectron2 # noqa # pylint: disable=unused-import except ImportError: raise MissingDependencyException( "Detectron package is required to use DetectronModelArtifact") self._model = model self._input_model_yaml = input_model_yaml return self
def load(self, path): try: import lightgbm as lgb except ImportError: raise MissingDependencyException( "lightgbm package is required to use LightGBMModelArtifact") bst = lgb.Booster(model_file=self._model_file_path(path)) return self.pack(bst)
def _import_fastai2_module(): try: import fastai.basics # noqa except ImportError: raise MissingDependencyException( "fastai2 package is required to use " "bentoml.artifacts.Fastai2ModelArtifact") return fastai
def get_serving_default_function(m): try: import tensorflow as tf except ImportError: raise MissingDependencyException( "Tensorflow package is required to use TfSavedModelArtifact") return m.signatures.get( tf.compat.v2.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
def bind_keras_backend_session(self): try: import tensorflow as tf except ImportError: raise MissingDependencyException( "Tensorflow package is required to use KerasModelArtifact. BentoML " "currently only support using Keras with Tensorflow backend.") self.sess = tf.compat.v1.keras.backend.get_session() self.graph = self.sess.graph
def __init__(self, name: str): super(PaddlePaddleModelArtifact, self).__init__(name) self._model = None self._predictor = None self._model_path = None if paddle is None: raise MissingDependencyException( "paddlepaddle package is required to use PaddlePaddleModelArtifact" )
def __init__(self, name): super(QandaTransformersModelArtifact, self).__init__(name) self._model = None self._tokenizer_type = None self._model_type = 'AutoModelForQuestionAnswering' if transformers is None: raise MissingDependencyException( "the transformers package is required to use QandaTransformersModelArtifact" )
def ensure_node_available_or_raise(): try: subprocess.check_output(['node', '--version']) except subprocess.CalledProcessError as error: raise BentoMLException('Error executing node command: {}'.format( error.output.decode())) except FileNotFoundError: raise MissingDependencyException( 'Node is required for Yatai web UI. Please visit ' 'www.nodejs.org for instructions')
def ensure_docker_available_or_raise(): try: subprocess.check_output(['docker', 'info']) except subprocess.CalledProcessError as error: raise BentoMLException('Error executing docker command: {}'.format( error.output.decode())) except FileNotFoundError: raise MissingDependencyException( 'Docker is required for this deployment. Please visit ' 'www.docker.com for instructions')
def load(self, path): try: import h2o except ImportError: raise MissingDependencyException( "h2o package is required to use H2oModelArtifact") h2o.init() model = h2o.load_model(self._model_file_path(path)) return self.pack(model)
def load(self, path): try: import xgboost as xgb except ImportError: raise MissingDependencyException( "xgboost package is required to use XgboostModelArtifact") bst = xgb.Booster() bst.load_model(self._model_file_path(path)) return self.pack(bst)