Beispiel #1
0
def test_tabular_explainer():
    service_name = 'isvc-explainer-tabular'
    predictor = V1beta1PredictorSpec(sklearn=V1beta1SKLearnSpec(
        storage_uri='gs://seldon-models/sklearn/income/model',
        resources=V1ResourceRequirements(requests={
            'cpu': '100m',
            'memory': '1Gi'
        },
                                         limits={
                                             'cpu': '100m',
                                             'memory': '1Gi'
                                         })))
    explainer = V1beta1ExplainerSpec(
        min_replicas=1,
        alibi=V1beta1AlibiExplainerSpec(
            name='kfserving-container',
            type='AnchorTabular',
            storage_uri=
            'gs://seldon-models/sklearn/income/explainer-py37-0.6.0',
            resources=V1ResourceRequirements(requests={
                'cpu': '100m',
                'memory': '1Gi'
            },
                                             limits={
                                                 'cpu': '100m',
                                                 'memory': '1Gi'
                                             })))

    isvc = V1beta1InferenceService(
        api_version=constants.KFSERVING_V1BETA1,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1beta1InferenceServiceSpec(predictor=predictor,
                                         explainer=explainer))

    KFServing.create(isvc)
    try:
        KFServing.wait_isvc_ready(service_name,
                                  namespace=KFSERVING_TEST_NAMESPACE,
                                  timeout_seconds=720)
    except RuntimeError as e:
        logging.info(
            KFServing.api_instance.get_namespaced_custom_object(
                "serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
                "services", service_name + "-predictor-default"))
        pods = KFServing.core_api.list_namespaced_pod(
            KFSERVING_TEST_NAMESPACE,
            label_selector='serving.kubeflow.org/inferenceservice={}'.format(
                service_name))
        for pod in pods.items:
            logging.info(pod)
        raise e

    res = predict(service_name, './data/income_input.json')
    assert (res["predictions"] == [0])
    precision = explain(service_name, './data/income_input.json')
    assert (precision > 0.9)
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
def test_tabular_explainer():
    service_name = 'art-explainer'
    isvc = V1beta1InferenceService(
        api_version=api_version,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1beta1InferenceServiceSpec(
            predictor=V1beta1PredictorSpec(containers=[
                V1Container(
                    name="predictor",
                    # Update the image below to the aipipeline org.
                    image='aipipeline/art-server:mnist-predictor',
                    command=[
                        "python", "-m", "sklearnserver", "--model_name",
                        "art-explainer", "--model_dir",
                        "file://sklearnserver/sklearnserver/example_model"
                    ])
            ]),
            explainer=V1beta1ExplainerSpec(min_replicas=1,
                                           art=V1beta1ARTExplainerSpec(
                                               type='SquareAttack',
                                               name='explainer',
                                               config={"nb_classes": "10"}))))

    KFServing.create(isvc, version=kfserving_version)
    try:
        KFServing.wait_isvc_ready(service_name,
                                  namespace=KFSERVING_TEST_NAMESPACE,
                                  timeout_seconds=720)
    except RuntimeError as e:
        logging.info(
            KFServing.api_instance.get_namespaced_custom_object(
                "serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
                "services", service_name + "-predictor-default"))
        pods = KFServing.core_api.list_namespaced_pod(
            KFSERVING_TEST_NAMESPACE,
            label_selector='serving.kubeflow.org/inferenceservice={}'.format(
                service_name))
        for pod in pods.items:
            logging.info(pod)
        raise e

    res = predict(service_name, './data/mnist_input_bw_flat.json')
    assert (res["predictions"] == [3])

    adv_prediction = explain_art(service_name, './data/mnist_input_bw.json')
    assert (adv_prediction != 3)
    KFServing.delete(service_name,
                     KFSERVING_TEST_NAMESPACE,
                     version=kfserving_version)
Beispiel #3
0
def test_tabular_explainer():
    service_name = 'aix-explainer'
    predictor = V1beta1PredictorSpec(containers=[
        V1Container(name="predictor",
                    image='aipipeline/rf-predictor:0.4.0',
                    command=[
                        "python", "-m", "rfserver", "--model_name",
                        "aix-explainer"
                    ],
                    resources=V1ResourceRequirements(requests={
                        'cpu': '500m',
                        'memory': '1Gi'
                    },
                                                     limits={
                                                         'cpu': '500m',
                                                         'memory': '1Gi'
                                                     }))
    ])
    explainer = V1beta1ExplainerSpec(min_replicas=1,
                                     aix=V1beta1AIXExplainerSpec(
                                         name='explainer',
                                         type='LimeImages',
                                         resources=V1ResourceRequirements(
                                             requests={
                                                 'cpu': '500m',
                                                 'memory': '1Gi'
                                             },
                                             limits={
                                                 'cpu': '500m',
                                                 'memory': '1Gi'
                                             })))

    isvc = V1beta1InferenceService(
        api_version=constants.KFSERVING_V1BETA1,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1beta1InferenceServiceSpec(predictor=predictor,
                                         explainer=explainer))

    KFServing.create(isvc)
    try:
        KFServing.wait_isvc_ready(service_name,
                                  namespace=KFSERVING_TEST_NAMESPACE,
                                  timeout_seconds=720)
    except RuntimeError as e:
        logging.info(
            KFServing.api_instance.get_namespaced_custom_object(
                "serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
                "services", service_name + "-predictor-default"))
        pods = KFServing.core_api.list_namespaced_pod(
            KFSERVING_TEST_NAMESPACE,
            label_selector='serving.kubeflow.org/inferenceservice={}'.format(
                service_name))
        for pod in pods.items:
            logging.info(pod)
        raise e

    res = predict(service_name, './data/mnist_input.json')
    assert (res["predictions"] == [[
        0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    ]])

    mask = explain_aix(service_name, './data/mnist_input.json')
    percent_in_mask = np.count_nonzero(mask) / np.size(np.array(mask))
    assert (percent_in_mask > 0.6)
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)