示例#1
0
def EndpointSpec(framework, storage_uri, service_account):
    if framework == "tensorflow":
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri),
            service_account_name=service_account,
        ))
    elif framework == "pytorch":
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri),
            service_account_name=service_account,
        ))
    elif framework == "sklearn":
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri),
            service_account_name=service_account,
        ))
    elif framework == "xgboost":
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri),
            service_account_name=service_account,
        ))
    elif framework == "onnx":
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            onnx=V1alpha2ONNXSpec(storage_uri=storage_uri),
            service_account_name=service_account,
        ))
    elif framework == "tensorrt":
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri),
            service_account_name=service_account,
        ))
    else:
        raise ("Error: No matching framework: " + framework)
示例#2
0
def test_sklearn_kfserving():
    service_name = "isvc-sklearn"
    default_endpoint_spec = V1alpha2EndpointSpec(
        predictor=V1alpha2PredictorSpec(
            min_replicas=1,
            sklearn=V1alpha2SKLearnSpec(
                storage_uri="gs://kfserving-samples/models/sklearn/iris",
                resources=V1ResourceRequirements(
                    requests={
                        "cpu": "100m",
                        "memory": "256Mi"
                    },
                    limits={
                        "cpu": "100m",
                        "memory": "256Mi"
                    },
                ),
            ),
        ))

    isvc = V1alpha2InferenceService(
        api_version=api_version,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec),
    )

    KFServing.create(isvc)
    KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE)
    res = predict(service_name, "./data/iris_input.json")
    assert res["predictions"] == [1, 1]
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
示例#3
0
 def generate_predictor_spec(self,
                             framework,
                             storage_uri=None,
                             container=None):
     '''Generate predictor spec according to framework and
        default_storage_uri or custom container.
     '''
     if self.framework == 'tensorflow':
         predictor = V1alpha2PredictorSpec(
             tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri))
     elif self.framework == 'onnx':
         predictor = V1alpha2PredictorSpec(onnx=V1alpha2ONNXSpec(
             storage_uri=storage_uri))
     elif self.framework == 'pytorch':
         predictor = V1alpha2PredictorSpec(pytorch=V1alpha2PyTorchSpec(
             storage_uri=storage_uri))
     elif self.framework == 'sklearn':
         predictor = V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(
             storage_uri=storage_uri))
     elif self.framework == 'triton':
         predictor = V1alpha2PredictorSpec(triton=V1alpha2TritonSpec(
             storage_uri=storage_uri))
     elif self.framework == 'xgboost':
         predictor = V1alpha2PredictorSpec(xgboost=V1alpha2XGBoostSpec(
             storage_uri=storage_uri))
     elif self.framework == 'custom':
         predictor = V1alpha2PredictorSpec(custom=V1alpha2CustomSpec(
             container=container))
     else:
         raise RuntimeError("Unsupported framework {}".format(framework))
     return predictor
示例#4
0
def EndpointSpec(framework,
                 storage_uri,
                 service_account_name="k8s-sa",
                 transformer_custom_image=""):
    if framework == 'tensorflow':
        return V1alpha2EndpointSpec(
            predictor=V1alpha2PredictorSpec(
                service_account_name=service_account_name,
                tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)),
            transformer=V1alpha2TransformerSpec(
                min_replicas=1,
                custom=V1alpha2CustomSpec(container=client.V1Container(
                    image=transformer_custom_image,
                    name="kfserving-container"))))
    elif framework == 'pytorch':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
    elif framework == 'sklearn':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
    elif framework == 'xgboost':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
    elif framework == 'onnx':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
    elif framework == 'tensorrt':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
    else:
        raise ("Error: No matching framework: " + framework)
def test_tabular_explainer():
    service_name = 'isvc-explainer-tabular'
    default_endpoint_spec = V1alpha2EndpointSpec(
        predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(
            storage_uri='gs://seldon-models/sklearn/income/model',
            resources=V1ResourceRequirements(requests={
                'cpu': '100m',
                'memory': '1Gi'
            },
                                             limits={
                                                 'cpu': '100m',
                                                 'memory': '1Gi'
                                             }))),
        explainer=V1alpha2ExplainerSpec(
            min_replicas=1,
            alibi=V1alpha2AlibiExplainerSpec(
                type='AnchorTabular',
                storage_uri=
                'gs://seldon-models/sklearn/income/explainer-py36-0.5.2',
                resources=V1ResourceRequirements(requests={
                    'cpu': '100m',
                    'memory': '1Gi'
                },
                                                 limits={
                                                     'cpu': '100m',
                                                     'memory': '1Gi'
                                                 }))))

    isvc = V1alpha2InferenceService(
        api_version=api_version,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))

    KFServing.create(isvc)
    try:
        KFServing.wait_isvc_ready(service_name,
                                  namespace=KFSERVING_TEST_NAMESPACE,
                                  timeout_seconds=720)
    except RuntimeError as e:
        logging.info(
            KFServing.api_instance.get_namespaced_custom_object(
                "serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
                "services", service_name + "-predictor-default"))
        pods = KFServing.core_api.list_namespaced_pod(
            KFSERVING_TEST_NAMESPACE,
            label_selector='serving.kubeflow.org/inferenceservice={}'.format(
                service_name))
        for pod in pods.items:
            logging.info(pod)
        raise e

    res = predict(service_name, './data/income_input.json')
    assert (res["predictions"] == [0])
    precision = explain(service_name, './data/income_input.json')
    assert (precision > 0.9)
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
示例#6
0
def EndpointSpec(framework, storage_uri):
    if framework == 'tensorflow':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)))
    elif framework == 'pytorch':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
    elif framework == 'sklearn':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
    elif framework == 'xgboost':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
    elif framework == 'onnx':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
    elif framework == 'tensorrt':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
            tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
    else:
        raise ("Error: No matching framework: " + framework)
def EndpointSpec(framework, storage_uri, service_account, min_replicas,
                 max_replicas):

    endpointSpec = V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
        service_account_name=service_account,
        min_replicas=(min_replicas if min_replicas >= 0 else None),
        max_replicas=(max_replicas if max_replicas > 0
                      and max_replicas >= min_replicas else None)))
    if framework == "tensorflow":
        endpointSpec.predictor.tensorflow = V1alpha2TensorflowSpec(
            storage_uri=storage_uri)
        return endpointSpec

    elif framework == "pytorch":
        endpointSpec.predictor.pytorch = V1alpha2PyTorchSpec(
            storage_uri=storage_uri)
        return endpointSpec

    elif framework == "sklearn":
        endpointSpec.predictor.sklearn = V1alpha2SKLearnSpec(
            storage_uri=storage_uri)
        return endpointSpec

    elif framework == "xgboost":
        endpointSpec.predictor.xgboost = V1alpha2XGBoostSpec(
            storage_uri=storage_uri)
        return endpointSpec

    elif framework == "onnx":
        endpointSpec.predictor.onnx = V1alpha2ONNXSpec(storage_uri=storage_uri)
        return endpointSpec

    elif framework == "tensorrt":
        endpointSpec.predictor.tensorrt = V1alpha2TensorRTSpec(
            storage_uri=storage_uri)
        return endpointSpec

    else:
        raise ("Error: No matching framework: " + framework)
示例#8
0
def test_kfserving_logger():
    msg_dumper = 'message-dumper'
    default_endpoint_spec = V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
        min_replicas=1,
        custom=V1alpha2CustomSpec(container=V1Container(
            name="kfserving-container",
            image=
            'gcr.io/knative-releases/knative.dev/eventing-contrib/cmd/event_display',
        ))))

    isvc = V1alpha2InferenceService(
        api_version=api_version,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=msg_dumper,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))

    KFServing.create(isvc)
    KFServing.wait_isvc_ready(msg_dumper, namespace=KFSERVING_TEST_NAMESPACE)

    service_name = 'isvc-logger'
    default_endpoint_spec = V1alpha2EndpointSpec(
        predictor=V1alpha2PredictorSpec(
            min_replicas=1,
            logger=V1alpha2Logger(
                mode="all",
                url="http://message-dumper-predictor-default." +
                KFSERVING_TEST_NAMESPACE),
            sklearn=V1alpha2SKLearnSpec(
                storage_uri='gs://kfserving-samples/models/sklearn/iris',
                resources=V1ResourceRequirements(requests={
                    'cpu': '100m',
                    'memory': '256Mi'
                },
                                                 limits={
                                                     'cpu': '100m',
                                                     'memory': '256Mi'
                                                 }))))

    isvc = V1alpha2InferenceService(
        api_version=api_version,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))

    KFServing.create(isvc)
    KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE)
    res = predict(service_name, './data/iris_input.json')
    assert (res["predictions"] == [1, 1])
    pods = KFServing.core_api.list_namespaced_pod(
        KFSERVING_TEST_NAMESPACE,
        label_selector='serving.kubeflow.org/inferenceservice={}'.format(
            msg_dumper))
    for pod in pods.items:
        log = KFServing.core_api.read_namespaced_pod_log(
            name=pod.metadata.name,
            namespace=pod.metadata.namespace,
            container="kfserving-container")
        print(log)
        assert ("org.kubeflow.serving.inference.request" in log)
        assert ("org.kubeflow.serving.inference.response" in log)
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
    KFServing.delete(msg_dumper, KFSERVING_TEST_NAMESPACE)