Ejemplo n.º 1
0
def main():
    args, extra = parse_args(sys.argv[1:])
    # Pretrained Alibi explainer

    alibi_model = None
    if args.storage_uri is not None:
        alibi_model = os.path.join(
            kserve.Storage.download(args.storage_uri), EXPLAINER_FILENAME
        )
        with open(alibi_model, "rb") as f:
            logging.info("Loading Alibi model")
            alibi_model = dill.load(f)

    explainer = AlibiExplainer(
        args.model_name,
        args.predictor_host,
        ExplainerMethod(args.command),
        extra,
        alibi_model,
    )
    explainer.load()
    kserve.ModelServer().start(models=[explainer], nest_asyncio=True)
Ejemplo n.º 2
0
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import kserve
import argparse
from .bert_transformer import BertTransformer

DEFAULT_MODEL_NAME = "model"

parser = argparse.ArgumentParser(parents=[kserve.model_server.parser])
parser.add_argument('--model_name',
                    default=DEFAULT_MODEL_NAME,
                    help='The name that the model is served under.')
parser.add_argument('--predictor_host',
                    help='The URL for the model predict function',
                    required=True)

args, _ = parser.parse_known_args()

if __name__ == "__main__":
    transformer = BertTransformer(args.model_name,
                                  predictor_host=args.predictor_host)
    server = kserve.ModelServer()
    server.start(models=[transformer])
Ejemplo n.º 3
0
        # Input follows the Tensorflow V1 HTTP API for binary values
        # https://www.tensorflow.org/tfx/serving/api_rest#encoding_binary_values
        data = inputs[0]["image"]["b64"]

        raw_img_data = base64.b64decode(data)
        input_image = Image.open(io.BytesIO(raw_img_data))

        preprocess = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])

        input_tensor = preprocess(input_image)
        input_batch = input_tensor.unsqueeze(0)

        output = self.model(input_batch)

        torch.nn.functional.softmax(output, dim=1)[0]

        values, top_5 = torch.topk(output, 5)

        return {"predictions": values.tolist()}


if __name__ == "__main__":
    kserve.ModelServer(workers=1).start({"custom-model": AlexNetModel})
Ejemplo n.º 4
0
        self.name = name
        self.ready = False

    def load(self):
        dataset_orig = load_preproc_data_german(['age'])
        scale_orig = StandardScaler()
        X_train = scale_orig.fit_transform(dataset_orig.features)
        y_train = dataset_orig.labels.ravel()

        lmod = LogisticRegression()
        lmod.fit(X_train, y_train, sample_weight=dataset_orig.instance_weights)

        self.model = lmod
        self.ready = True

    def predict(self, request: Dict) -> Dict:
        inputs = request["instances"]

        scale_input = StandardScaler()
        scaled_input = scale_input.fit_transform(inputs)

        predictions = self.model.predict(scaled_input)

        return {"predictions": predictions.tolist()}


if __name__ == "__main__":
    model = KServeSampleModel("german-credit")
    model.load()
    kserve.ModelServer(workers=1).start([model])
Ejemplo n.º 5
0
DEFAULT_MODEL_NAME = "default"
DEFAULT_LOCAL_MODEL_DIR = "/tmp/model"
DEFAULT_NTHREAD = 1

parser = argparse.ArgumentParser(parents=[kserve.model_server.parser])  # pylint:disable=c-extension-no-member
parser.add_argument('--model_dir',
                    required=True,
                    help='A URI pointer to the model directory')
parser.add_argument('--model_name',
                    default=DEFAULT_MODEL_NAME,
                    help='The name that the model is served under.')
parser.add_argument('--nthread',
                    default=DEFAULT_NTHREAD,
                    help='Number of threads to use by LightGBM.')
args, _ = parser.parse_known_args()

if __name__ == "__main__":

    model = LightGBMModel(args.model_name, args.model_dir, args.nthread)
    try:
        model.load()
    except ModelMissingError:
        logging.error(
            f"fail to load model {args.model_name} from dir {args.model_dir},"
            f"trying to load from model repository.")
    model_repository = LightGBMModelRepository(args.model_dir, args.nthread)
    # LightGBM doesn't support multi-process, so the number of http server workers should be 1.
    kfserver = kserve.ModelServer(workers=1,
                                  registered_models=model_repository)  # pylint:disable=c-extension-no-member
    kfserver.start([model] if model.ready else [])
Ejemplo n.º 6
0
    default=DEFAULT_MIN_WEIGHT,
    help=
    'The minimum weight needed by a pixel to be considered useful as an explanation.'
)
parser.add_argument(
    '--positive_only',
    default=DEFAULT_POSITIVE_ONLY,
    help=
    'Whether or not to show only the explanations that positively indicate a classification.'
)
parser.add_argument('--explainer_type',
                    default=DEFAULT_EXPLAINER_TYPE,
                    help='What type of model explainer to use.')

parser.add_argument('--predictor_host',
                    help='The host for the predictor.',
                    required=True)
args, _ = parser.parse_known_args()

if __name__ == "__main__":
    model = AIXModel(name=args.model_name,
                     predictor_host=args.predictor_host,
                     segm_alg=args.segmentation_algorithm,
                     num_samples=args.num_samples,
                     top_labels=args.top_labels,
                     min_weight=args.min_weight,
                     positive_only=args.positive_only,
                     explainer_type=args.explainer_type)
    model.load()
    kserve.ModelServer().start([model], nest_asyncio=True)
Ejemplo n.º 7
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import kserve
import argparse

from pytorchserver import PyTorchModel

DEFAULT_MODEL_NAME = "model"
DEFAULT_LOCAL_MODEL_DIR = "/tmp/model"
DEFAULT_MODEL_CLASS_NAME = "PyTorchModel"

parser = argparse.ArgumentParser(parents=[kserve.model_server.parser])
parser.add_argument('--model_dir',
                    required=True,
                    help='A URI pointer to the model directory')
parser.add_argument('--model_name',
                    default=DEFAULT_MODEL_NAME,
                    help='The name that the model is served under.')
parser.add_argument('--model_class_name',
                    default=DEFAULT_MODEL_CLASS_NAME,
                    help='The class name for the model.')
args, _ = parser.parse_known_args()

if __name__ == "__main__":
    model = PyTorchModel(args.model_name, args.model_class_name,
                         args.model_dir)
    model.load()
    kserve.ModelServer().start([model])
Ejemplo n.º 8
0
import logging
import kserve
from kserve.model import ModelMissingError


from xgbserver import XGBoostModel, XGBoostModelRepository

DEFAULT_MODEL_NAME = "default"
DEFAULT_LOCAL_MODEL_DIR = "/tmp/model"
DEFAULT_NTHREAD = 1

parser = argparse.ArgumentParser(parents=[kserve.model_server.parser])  # pylint:disable=c-extension-no-member
parser.add_argument('--model_dir', required=True,
                    help='A URI pointer to the model directory')
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
                    help='The name that the model is served under.')
parser.add_argument('--nthread', default=DEFAULT_NTHREAD,
                    help='Number of threads to use by XGBoost.')
args, _ = parser.parse_known_args()

if __name__ == "__main__":
    model = XGBoostModel(args.model_name, args.model_dir, args.nthread)
    try:
        model.load()
    except ModelMissingError:
        logging.error(f"fail to locate model file for model {args.model_name} under dir {args.model_dir},"
                      f"trying loading from model repository.")

    kserve.ModelServer(registered_models=XGBoostModelRepository(args.model_dir, args.nthread))\
        .start([model] if model.ready else [])
Ejemplo n.º 9
0
                # Find the name and value by splitting the string
                name, value = line.split(separator, 1)

                # Assign key value pair to dict
                # strip() removes white space from the ends of strings
                keys[name.strip()] = value.strip()

    keys["model_snapshot"] = json.loads(keys["model_snapshot"])

    models = keys["model_snapshot"]["models"]
    model_names = []

    # Get all the model_names
    for model, value in models.items():
        model_names.append(model)
    if not model_names:
        model_names = [DEFAULT_MODEL_NAME]
    print(f"Wrapper : Model names {model_names}")
    return model_names


if __name__ == "__main__":
    model_names = parse_config()
    models = []
    for model_name in model_names:
        transformer = ImageTransformer(model_name, predictor_host=args.predictor_host)
        models.append(transformer)
    kserve.ModelServer(
        registered_models=TransformerModelRepository(args.predictor_host)
    ).start(models=models)
Ejemplo n.º 10
0
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import logging

import kserve
from sklearnserver import SKLearnModel, SKLearnModelRepository
from kserve.model import ModelMissingError

DEFAULT_MODEL_NAME = "model"
DEFAULT_LOCAL_MODEL_DIR = "/tmp/model"

parser = argparse.ArgumentParser(parents=[kserve.model_server.parser])
parser.add_argument('--model_dir', required=True,
                    help='A URI pointer to the model binary')
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
                    help='The name that the model is served under.')
args, _ = parser.parse_known_args()

if __name__ == "__main__":
    model = SKLearnModel(args.model_name, args.model_dir)
    try:
        model.load()

    except ModelMissingError:
        logging.error(f"fail to locate model file for model {args.model_name} under dir {args.model_dir},"
                      f"trying loading from model repository.")

    kserve.ModelServer(registered_models=SKLearnModelRepository(args.model_dir)).start([model] if model.ready else [])