コード例 #1
0
def test_duplicated_artifact_name():
    with pytest.raises(InvalidArgument) as e:

        @bentoml.artifacts([PickleArtifact("model"), PickleArtifact("model")])
        class ExampleBentoService(  # pylint: disable=unused-variable
                bentoml.BentoService):
            pass

    assert "Duplicated artifact name `model` detected" in str(e.value)
コード例 #2
0
def pack_models(path):
    model = PickleModel()
    PickleArtifact("model").pack(model).save(path)

    from sklearn.ensemble import RandomForestRegressor

    sklearn_model = RandomForestRegressor(n_estimators=2)
    sklearn_model.fit(
        [[i] for _ in range(100) for i in range(10)],
        [i for _ in range(100) for i in range(10)],
    )
    SklearnModelArtifact("sk_model").pack(sklearn_model).save(path)
コード例 #3
0
ファイル: service.py プロジェクト: harshitsinghai77/BentoML
import bentoml
from bentoml.adapters import (
    DataframeInput,
    FileInput,
    ImageInput,
    JsonInput,
    MultiImageInput,
)
from bentoml.frameworks.sklearn import SklearnModelArtifact
from bentoml.handlers import DataframeHandler  # deprecated
from bentoml.service.artifacts.pickle import PickleArtifact
from bentoml.types import InferenceResult, InferenceTask


@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([PickleArtifact("model"), SklearnModelArtifact('sk_model')])
class ExampleService(bentoml.BentoService):
    """
    Example BentoService class made for testing purpose
    """
    @bentoml.api(
        input=DataframeInput(dtype={"col1": "int"}),
        mb_max_latency=1000,
        mb_max_batch_size=2000,
        batch=True,
    )
    def predict_dataframe(self, df):
        return self.artifacts.model.predict_dataframe(df)

    @bentoml.api(DataframeHandler, dtype={"col1": "int"},
                 batch=True)  # deprecated
コード例 #4
0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def to_numpy(tensor):
    return tensor.detach().cpu().clone().numpy(
    ) if tensor.requires_grad else tensor.cpu().clone().numpy()


@env(infer_pip_packages=False,
     pip_packages=['onnxruntime-gpu'],
     requirements_txt_file="./requirements.txt",
     docker_base_image="bentoml/model-server:0.12.1-py38-gpu")
@artifacts([
    OnnxModelArtifact('model', backend='onnxruntime-gpu'),
    PickleArtifact('tokenizer'),
    PickleArtifact('vocab')
])
class OnnxService(BentoService):
    def __init__(self):
        super().__init__()
        self.news_label = {
            1: 'World',
            2: 'Sports',
            3: 'Business',
            4: 'Sci/Tec'
        }

    def classify_categories(self, sentence):
        text_pipeline, _ = get_pipeline(self.artifacts.tokenizer,
                                        self.artifacts.vocab)
コード例 #5
0
import bentoml
from bentoml.adapters import (  # FastaiImageInput,
    DataframeInput, ImageInput, JsonInput, LegacyImageInput, LegacyJsonInput,
)
from bentoml.service.artifacts.pickle import PickleArtifact
from bentoml.handlers import DataframeHandler  # deprecated


@bentoml.artifacts([PickleArtifact("model")])
@bentoml.env(infer_pip_packages=True)
class ExampleBentoService(bentoml.BentoService):
    """
    Example BentoService class made for testing purpose
    """
    @bentoml.api(input=DataframeInput(),
                 mb_max_latency=1000,
                 mb_max_batch_size=2000)
    def predict(self, df):
        """An API for testing simple bento model service
        """
        return self.artifacts.model.predict(df)

    @bentoml.api(input=DataframeInput(dtype={"col1": "int"}))
    def predict_dataframe(self, df):
        """predict_dataframe expects dataframe as input
        """
        return self.artifacts.model.predict_dataframe(df)

    @bentoml.api(DataframeHandler, dtype={"col1": "int"})  # deprecated
    def predict_dataframe_v1(self, df):
        """predict_dataframe expects dataframe as input
コード例 #6
0
from bentoml import BentoService, api, artifacts, env
from bentoml.adapters import JsonInput, JsonOutput
from bentoml.frameworks.pytorch import PytorchModelArtifact
from bentoml.service.artifacts.pickle import PickleArtifact
from train import get_pipeline
import torch

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # cuda


@env(conda_dependencies=['pytorch', 'torchtext', 'cudatoolkit=11.1'], conda_channels=['pytorch', 'nvidia'],
     infer_pip_packages=False, requirements_txt_file="./requirements.txt",
     docker_base_image="aarnphm/model-server:0.13.0-python3.8-slim-runtime")
@artifacts([PytorchModelArtifact("model"), PickleArtifact("tokenizer"), PickleArtifact("vocab")])
class PytorchService(BentoService):
    def __init__(self):
        super().__init__()
        self.news_label = {1: 'World',
                           2: 'Sports',
                           3: 'Business',
                           4: 'Sci/Tec'}

    def classify_categories(self, sentence):
        text_pipeline, _ = get_pipeline(self.artifacts.tokenizer, self.artifacts.vocab)
        with torch.no_grad():
            text = torch.tensor(text_pipeline(sentence)).to(device)
            offsets = torch.tensor([0]).to(device)
            output = self.artifacts.model(text, offsets=offsets)
            return output.argmax(1).item() + 1

    @api(input=JsonInput(), output=JsonOutput())