Beispiel #1
0
from PIL import Image

from torch.autograd import Variable
from torchvision import transforms

import bentoml
from bentoml.artifact import PytorchModelArtifact
from bentoml.handlers import ImageHandler


classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

@bentoml.env(pip_dependencies=['torch', 'numpy', 'torchvision', 'scikit-learn'])
@bentoml.artifacts([PytorchModelArtifact('net')])
class ImageClassifier(bentoml.BentoService):
    @bentoml.api(ImageHandler)
    def predict(self, img):
        img = Image.fromarray(img).resize((32, 32))
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        input_data = transform(img)

        outputs = self.artifacts.net(Variable(input_data).unsqueeze(0))
        _, output_classes = outputs.max(dim=1)

        return classes[output_classes]
Beispiel #2
0
CLASSES = ['ant', 'bee']

# The transformations required to resize, crop, normalize the image
# so that it can be fed into the model for prediction
TRANSFORM = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])


@bentoml.env(pip_dependencies=['torch', 'torchvision'])
# Defines the artifact that is used to deserialize the model
@bentoml.artifacts([PytorchModelArtifact('model')])
class AntOrBeeClassifier(bentoml.BentoService):

    # the actual api definition. Requires a ImageHandler to
    # accept the incomeing img.
    @bentoml.api(ImageHandler)
    def predict(self, img):

        # convert the image to pillow image for PyTorch
        img = Image.fromarray(img)
        # perform the transformations, returns a tensor (3, 224, 224)
        img = TRANSFORM(img)

        # Use eval mode for evaluation.
        self.artifacts.model.eval()
        # Performs forward prop
Beispiel #3
0
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
beam_size = 3
word_map_file = 'word_map.json'
word_map = json.load(open(word_map_file, 'r'))
vocab_size = len(word_map)
rev_word_map = {v: key for key, v in word_map.items()}


@bentoml.env(pip_dependencies=['torch', 'torchvision'])
@bentoml.artifacts(
    [PytorchModelArtifact('encoder'),
     PytorchModelArtifact('decoder')])
class ImageCaptioner(bentoml.BentoService):
    @bentoml.api(ImageHandler)
    def gen_caption(self, img):
        k = beam_size

        # Get the image
        img = Image.fromarray(img)
        img = transform(img)
        # (3, 224, 224)
        img = img.unsqueeze(0)

        # Pass to encoder
        self.artifacts.encoder.eval()
        # (1, enc_img_size, enc_img_size, encoder_dim)