def test_tabular_explainer(): service_name = 'isvc-explainer-tabular' default_endpoint_spec = V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec( storage_uri='gs://seldon-models/sklearn/income/model', resources=V1ResourceRequirements(requests={ 'cpu': '100m', 'memory': '1Gi' }, limits={ 'cpu': '100m', 'memory': '1Gi' }))), explainer=V1alpha2ExplainerSpec( min_replicas=1, alibi=V1alpha2AlibiExplainerSpec( type='AnchorTabular', storage_uri= 'gs://seldon-models/sklearn/income/explainer-py36-0.5.2', resources=V1ResourceRequirements(requests={ 'cpu': '100m', 'memory': '1Gi' }, limits={ 'cpu': '100m', 'memory': '1Gi' })))) isvc = V1alpha2InferenceService( api_version=api_version, kind=constants.KFSERVING_KIND, metadata=client.V1ObjectMeta(name=service_name, namespace=KFSERVING_TEST_NAMESPACE), spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec)) KFServing.create(isvc) try: KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE, timeout_seconds=720) except RuntimeError as e: logging.info( KFServing.api_instance.get_namespaced_custom_object( "serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE, "services", service_name + "-predictor-default")) pods = KFServing.core_api.list_namespaced_pod( KFSERVING_TEST_NAMESPACE, label_selector='serving.kubeflow.org/inferenceservice={}'.format( service_name)) for pod in pods.items: logging.info(pod) raise e res = predict(service_name, './data/income_input.json') assert (res["predictions"] == [0]) precision = explain(service_name, './data/income_input.json') assert (precision > 0.9) KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
def test_tabular_explainer(): service_name = 'aix-explainer' default_endpoint_spec = V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec( custom=V1alpha2CustomSpec( container=V1Container( name="predictor", image='aipipeline/rf-predictor:0.4.0', command=["python", "-m", "rfserver", "--model_name", "aix-explainer"], resources=V1ResourceRequirements( requests={'cpu': '500m', 'memory': '1Gi'}, limits={'cpu': '500m', 'memory': '1Gi'}) ))), explainer=V1alpha2ExplainerSpec( min_replicas=1, aix=V1alpha2AIXExplainerSpec( type='LimeImages', resources=V1ResourceRequirements( requests={'cpu': '500m', 'memory': '1Gi'}, limits={'cpu': '500m', 'memory': '1Gi'})))) isvc = V1alpha2InferenceService(api_version=api_version, kind=constants.KFSERVING_KIND, metadata=client.V1ObjectMeta( name=service_name, namespace=KFSERVING_TEST_NAMESPACE), spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec)) KFServing.create(isvc) try: KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE, timeout_seconds=720) except RuntimeError as e: logging.info(KFServing.api_instance.get_namespaced_custom_object("serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE, "services", service_name + "-predictor-default")) pods = KFServing.core_api.list_namespaced_pod(KFSERVING_TEST_NAMESPACE, label_selector='serving.kubeflow.org/inferenceservice={}'.format(service_name)) for pod in pods.items: logging.info(pod) raise e res = predict(service_name, './data/mnist_input.json') assert(res["predictions"] == [[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) mask = explain_aix(service_name, './data/mnist_input.json') percent_in_mask = np.count_nonzero(mask) / np.size(np.array(mask)) assert(percent_in_mask > 0.6) KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)