def __init__(self, params: dict) -> None:
     super().__init__(params)
     aiplatform.init(
         project=self._params["gcp_project_id"],
         location=self._params["gcp_location"],
     )
     self.__endpoint = aiplatform.Endpoint(self._params["gcp_endpoint"])
Example #2
0
def explain_tabular_sample(project: str, location: str, endpoint_id: str,
                           instance_dict: Dict):

    aiplatform.init(project=project, location=location)

    endpoint = aiplatform.Endpoint(endpoint_id)

    response = endpoint.explain(instances=[instance_dict], parameters={})

    for explanation in response.explanations:
        print(" explanation")
        # Feature attributions.
        attributions = explanation.attributions
        for attribution in attributions:
            print("  attribution")
            print("   baseline_output_value:",
                  attribution.baseline_output_value)
            print("   instance_output_value:",
                  attribution.instance_output_value)
            print("   output_display_name:", attribution.output_display_name)
            print("   approximation_error:", attribution.approximation_error)
            print("   output_name:", attribution.output_name)
            output_index = attribution.output_index
            for output_index in output_index:
                print("   output_index:", output_index)

    for prediction in response.predictions:
        print(prediction)
    def test_list_models(self, get_endpoint_with_models_mock):
        aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)

        ept = aiplatform.Endpoint(_TEST_ID)
        my_models = ept.list_models()

        assert my_models == _TEST_DEPLOYED_MODELS
Example #4
0
def endpoint_predict_sample(
    project: str, location: str, instances: list, endpoint: str
):
    aiplatform.init(project=project, location=location)

    endpoint = aiplatform.Endpoint(endpoint)

    prediction = endpoint.predict(instances=instances)
    print(prediction)
    return prediction
def predict_text_classification_single_label_sample(
    project, location, endpoint, content
):
    aiplatform.init(project=project, location=location)

    endpoint = aiplatform.Endpoint(endpoint)

    response = endpoint.predict(instances=[{"content": content}], parameters={})

    for prediction_ in response.predictions:
        print(prediction_)
Example #6
0
 def __init__(
     self,
     endpoint: Union[str, aiplatform.Endpoint],
     input_types: "OrderedDict[str, lit_types.LitType]",  # noqa: F821
     output_types: "OrderedDict[str, lit_types.LitType]",  # noqa: F821
     model_id: Optional[str] = None,
 ):
     """Construct a VertexLitModel.
     Args:
         model:
             Required. The name of the Endpoint resource. Format:
             ``projects/{project}/locations/{location}/endpoints/{endpoint}``
         input_types:
             Required. An OrderedDict of string names matching the features of the model
             as the key, and the associated LitType of the feature.
         output_types:
             Required. An OrderedDict of string names matching the labels of the model
             as the key, and the associated LitType of the label.
         model_id:
             Optional. A string of the specific model in the endpoint to create the
             LIT model from. If this is not set, any usable model in the endpoint is
             used to create the LIT model.
     Raises:
         ValueError if the model_id was not found in the endpoint.
     """
     if isinstance(endpoint, str):
         self._endpoint = aiplatform.Endpoint(endpoint)
     else:
         self._endpoint = endpoint
     self._model_id = model_id
     self._input_types = input_types
     self._output_types = output_types
     # Check if the model with the model ID has explanation enabled
     if model_id:
         deployed_model = next(
             filter(
                 lambda model: model.id == model_id, self._endpoint.list_models()
             ),
             None,
         )
         if not deployed_model:
             raise ValueError(
                 "A model with id {model_id} was not found in the endpoint {endpoint}.".format(
                     model_id=model_id, endpoint=endpoint
                 )
             )
         self._explanation_enabled = bool(deployed_model.explanation_spec)
     # Check if all models in the endpoint have explanation enabled
     else:
         self._explanation_enabled = all(
             model.explanation_spec for model in self._endpoint.list_models()
         )
    def test_delete_endpoint_with_force(self, sdk_undeploy_all_mock,
                                        delete_endpoint_mock, sync):

        ept = aiplatform.Endpoint(_TEST_ID)
        ept.delete(force=True, sync=sync)

        if not sync:
            ept.wait()

        # undeploy_all() should be called if force is set to True
        sdk_undeploy_all_mock.assert_called_once()

        delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
    def test_accessing_properties_with_no_resource_raises(self, ):

        my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)

        my_endpoint._gca_resource = None

        with pytest.raises(RuntimeError) as e:
            my_endpoint.gca_resource
        e.match(regexp=r"Endpoint resource has not been created.")

        with pytest.raises(RuntimeError) as e:
            my_endpoint.network
        e.match(regexp=r"Endpoint resource has not been created.")
Example #9
0
def predict_text_entity_extraction_sample(project, location, endpoint_id,
                                          content):

    aiplatform.init(project=project, location=location)

    endpoint = aiplatform.Endpoint(endpoint_id)

    response = endpoint.predict(instances=[{
        "content": content
    }],
                                parameters={})

    for prediction_ in response.predictions:
        print(prediction_)
Example #10
0
    def test_delete_endpoint_without_force(self, sdk_undeploy_all_mock,
                                           delete_endpoint_mock, sync):
        aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)

        ept = aiplatform.Endpoint(_TEST_ID)
        ept.delete(sync=sync)

        if not sync:
            ept.wait()

        # undeploy_all() should not be called unless force is set to True
        sdk_undeploy_all_mock.assert_not_called()

        delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
Example #11
0
def predict_tabular_regression_sample(
    project: str,
    location: str,
    endpoint_name: str,
    instances: List[Dict],
):
    aiplatform.init(project=project, location=location)

    endpoint = aiplatform.Endpoint(endpoint_name)

    response = endpoint.predict(instances=instances)

    for prediction_ in response.predictions:
        print(prediction_)
    def test_undeploy_all(self, sdk_private_undeploy_mock, sync):

        ept = aiplatform.Endpoint(_TEST_ID)
        ept.undeploy_all(sync=sync)

        if not sync:
            ept.wait()

        # undeploy_all() results in an undeploy() call for each deployed_model
        sdk_private_undeploy_mock.assert_has_calls(
            [
                mock.call(deployed_model_id=deployed_model.id, sync=sync)
                for deployed_model in _TEST_DEPLOYED_MODELS
            ],
            any_order=True,
        )
Example #13
0
  def _setUpVertexPredictionMocks(self):
    importlib.reload(initializer)
    importlib.reload(aiplatform)

    self._serving_container_image_uri = 'gcr.io/path/to/container'
    self._serving_path = os.path.join(self._output_data_dir, 'serving_path')
    self._endpoint_name = 'endpoint-name'
    self._endpoint_region = 'us-central1'
    self._deployed_model_id = 'model_id'

    self._mock_create_client = mock.Mock()
    initializer.global_config.create_client = self._mock_create_client
    self._mock_create_client.return_value = mock.Mock(
        spec=endpoint_service_client.EndpointServiceClient)

    self._mock_get_endpoint = mock.Mock()
    endpoint_service_client.EndpointServiceClient.get_endpoint = self._mock_get_endpoint
    self._mock_get_endpoint.return_value = endpoint.Endpoint(
        display_name=self._endpoint_name,)

    aiplatform.init(
        project=self._project_id,
        location=None,
        credentials=mock.Mock(spec=auth_credentials.AnonymousCredentials()))

    self._mock_endpoint = aiplatform.Endpoint(
        endpoint_name='projects/{}/locations/us-central1/endpoints/1234'.format(
            self._project_id))

    self._mock_endpoint_create = mock.Mock()
    aiplatform.Endpoint.create = self._mock_endpoint_create
    self._mock_endpoint_create.return_value = self._mock_endpoint

    self._mock_endpoint_list = mock.Mock()
    aiplatform.Endpoint.list = self._mock_endpoint_list
    self._mock_endpoint_list.return_value = []

    self._mock_model_upload = mock.Mock()
    aiplatform.Model.upload = self._mock_model_upload

    self._mock_model_deploy = mock.Mock()
    self._mock_model_upload.return_value.deploy = self._mock_model_deploy

    self._ai_platform_serving_args_vertex = {
        'endpoint_name': self._endpoint_name,
        'project_id': self._project_id,
    }
    def test_accessing_properties_with_no_resource_raises(self, ):
        """Ensure a descriptive RuntimeError is raised when the
        GAPIC object has not been populated"""

        my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)

        # Create a gca_resource without `name` being populated
        my_endpoint._gca_resource = gca_endpoint.Endpoint(
            create_time=datetime.now())

        with pytest.raises(RuntimeError) as e:
            my_endpoint.gca_resource
        e.match(regexp=r"Endpoint resource has not been created.")

        with pytest.raises(RuntimeError) as e:
            my_endpoint.network
        e.match(regexp=r"Endpoint resource has not been created.")
    def test_undeploy_all(self, sdk_private_undeploy_mock, sync):

        # Ensure mock traffic split deployed model IDs are same as expected IDs
        assert set(_TEST_LONG_TRAFFIC_SPLIT_SORTED_IDS) == set(
            _TEST_LONG_TRAFFIC_SPLIT.keys())

        ept = aiplatform.Endpoint(_TEST_ID)
        ept.undeploy_all(sync=sync)

        if not sync:
            ept.wait()

        # undeploy_all() results in an undeploy() call for each deployed_model
        # Models are undeployed in ascending order of traffic percentage
        sdk_private_undeploy_mock.assert_has_calls([
            mock.call(deployed_model_id=deployed_model_id, sync=sync)
            for deployed_model_id in _TEST_LONG_TRAFFIC_SPLIT_SORTED_IDS
        ], )
Example #16
0
  def _setUpDeleteVertexModelMocks(self):
    importlib.reload(initializer)
    importlib.reload(aiplatform)

    self._endpoint_name = 'endpoint_name'
    self._deployed_model_id = 'model_id'

    self._mock_create_client = mock.Mock()
    initializer.global_config.create_client = self._mock_create_client
    self._mock_create_client.return_value = mock.Mock(
        spec=endpoint_service_client.EndpointServiceClient)

    self._mock_get_endpoint = mock.Mock()
    endpoint_service_client.EndpointServiceClient.get_endpoint = self._mock_get_endpoint
    self._mock_get_endpoint.return_value = endpoint.Endpoint(
        display_name=self._endpoint_name)

    aiplatform.init(
        project=self._project_id,
        location=None,
        credentials=mock.Mock(spec=auth_credentials.AnonymousCredentials()))

    self._mock_endpoint = aiplatform.Endpoint(
        endpoint_name='projects/{}/locations/us-central1/endpoints/1234'.format(
            self._project_id))

    self._mock_endpoint_list = mock.Mock()
    aiplatform.Endpoint.list = self._mock_endpoint_list
    self._mock_endpoint_list.return_value = [self._mock_endpoint]

    self._mock_model_delete = mock.Mock()
    self._mock_endpoint.undeploy = self._mock_model_delete

    self._mock_list_models = mock.Mock()
    self._mock_list_models.return_value = [
        endpoint.DeployedModel(
            display_name=self._model_name, id=self._deployed_model_id)
    ]
    self._mock_endpoint.list_models = self._mock_list_models

    self._ai_platform_serving_args_vertex = {
        'endpoint_name': self._endpoint_name,
        'project_id': self._project_id,
    }
Example #17
0
    def test_create_lit_model_from_endpoint_returns_model(
            self, feature_types, label_types, model_id):
        endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
        lit_model = create_lit_model_from_endpoint(endpoint, feature_types,
                                                   label_types, model_id)
        test_inputs = [
            {
                "feature_1": 1.0,
                "feature_2": 2.0
            },
        ]
        outputs = lit_model.predict_minibatch(test_inputs)

        assert lit_model.input_spec() == dict(feature_types)
        assert lit_model.output_spec() == dict(label_types)
        assert len(outputs) == 1
        for item in outputs:
            assert item.keys() == {"label"}
            assert len(item.values()) == 1
def predict_tabular_classification_sample(
    project: str,
    location: str,
    endpoint_name: str,
    instances: List[Dict],
):
    '''
    Args
        project: Your project ID or project number.
        location: Region where Endpoint is located. For example, 'us-central1'.
        endpoint_name: A fully qualified endpoint name or endpoint ID. Example: "projects/123/locations/us-central1/endpoints/456" or
               "456" when project and location are initialized or passed.
        instances: A list of one or more instances (examples) to return a prediction for.
    '''
    aiplatform.init(project=project, location=location)

    endpoint = aiplatform.Endpoint(endpoint_name)

    response = endpoint.predict(instances=instances)

    for prediction_ in response.predictions:
        print(prediction_)
Example #19
0
    def test_create_lit_model_from_endpoint_with_xai_returns_model(
            self, feature_types, label_types, model_id):
        endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
        lit_model = create_lit_model_from_endpoint(endpoint, feature_types,
                                                   label_types, model_id)
        test_inputs = [
            {
                "feature_1": 1.0,
                "feature_2": 2.0
            },
        ]
        outputs = lit_model.predict_minibatch(test_inputs)

        assert lit_model.input_spec() == dict(feature_types)
        assert lit_model.output_spec() == dict({
            **label_types,
            "feature_attribution":
            lit_types.FeatureSalience(signed=True),
        })
        assert len(outputs) == 1
        for item in outputs:
            assert item.keys() == {"label", "feature_attribution"}
            assert len(item.values()) == 2
Example #20
0
    def test_end_to_end_tabular(self, shared_state):
        """Build dataset, train a custom and AutoML model, deploy, and get predictions"""

        assert shared_state["bucket"]
        bucket = shared_state["bucket"]

        blob = bucket.blob(_BLOB_PATH)

        # Download the CSV file into memory and save it directory to staging bucket
        with request.urlopen(_DATASET_SRC) as response:
            data = response.read()
            blob.upload_from_string(data)

        # Collection of resources generated by this test, to be deleted during teardown
        shared_state["resources"] = []

        aiplatform.init(
            project=e2e_base._PROJECT,
            location=e2e_base._LOCATION,
            staging_bucket=shared_state["staging_bucket_name"],
        )

        # Create and import to single managed dataset for both training jobs

        dataset_gcs_source = f'gs://{shared_state["staging_bucket_name"]}/{_BLOB_PATH}'

        ds = aiplatform.TabularDataset.create(
            display_name=self._make_display_name("dataset"),
            gcs_source=[dataset_gcs_source],
            sync=False,
            create_request_timeout=180.0,
        )

        shared_state["resources"].extend([ds])

        # Define both training jobs

        custom_job = aiplatform.CustomTrainingJob(
            display_name=self._make_display_name("train-housing-custom"),
            script_path=_LOCAL_TRAINING_SCRIPT_PATH,
            container_uri="gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest",
            requirements=["gcsfs==0.7.1"],
            model_serving_container_image_uri=
            "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest",
        )

        automl_job = aiplatform.AutoMLTabularTrainingJob(
            display_name=self._make_display_name("train-housing-automl"),
            optimization_prediction_type="regression",
            optimization_objective="minimize-rmse",
        )

        # Kick off both training jobs, AutoML job will take approx one hour to run

        custom_model = custom_job.run(
            ds,
            replica_count=1,
            model_display_name=self._make_display_name("custom-housing-model"),
            timeout=1234,
            restart_job_on_worker_restart=True,
            enable_web_access=True,
            sync=False,
            create_request_timeout=None,
        )

        automl_model = automl_job.run(
            dataset=ds,
            target_column="median_house_value",
            model_display_name=self._make_display_name("automl-housing-model"),
            sync=False,
        )

        shared_state["resources"].extend(
            [automl_job, automl_model, custom_job, custom_model])

        # Deploy both models after training completes
        custom_endpoint = custom_model.deploy(machine_type="n1-standard-4",
                                              sync=False)
        automl_endpoint = automl_model.deploy(machine_type="n1-standard-4",
                                              sync=False)
        shared_state["resources"].extend([automl_endpoint, custom_endpoint])

        custom_batch_prediction_job = custom_model.batch_predict(
            job_display_name=self._make_display_name("automl-housing-model"),
            instances_format="csv",
            machine_type="n1-standard-4",
            gcs_source=dataset_gcs_source,
            gcs_destination_prefix=
            f'gs://{shared_state["staging_bucket_name"]}/bp_results/',
            sync=False,
        )

        shared_state["resources"].append(custom_batch_prediction_job)

        in_progress_done_check = custom_job.done()
        custom_job.wait_for_resource_creation()

        automl_job.wait_for_resource_creation()
        custom_batch_prediction_job.wait_for_resource_creation()

        # Send online prediction with same instance to both deployed models
        # This sample is taken from an observation where median_house_value = 94600
        custom_endpoint.wait()

        # Check scheduling is correctly set
        assert (custom_job._gca_resource.training_task_inputs["scheduling"]
                ["timeout"] == "1234s")
        assert (custom_job._gca_resource.training_task_inputs["scheduling"]
                ["restartJobOnWorkerRestart"] is True)

        custom_prediction = custom_endpoint.predict([_INSTANCE], timeout=180.0)

        custom_batch_prediction_job.wait()

        automl_endpoint.wait()
        automl_prediction = automl_endpoint.predict(
            [{k: str(v)
              for k, v in _INSTANCE.items()}],  # Cast int values to strings
            timeout=180.0,
        )

        # Test lazy loading of Endpoint, check getter was never called after predict()
        custom_endpoint = aiplatform.Endpoint(custom_endpoint.resource_name)
        custom_endpoint.predict([_INSTANCE])

        completion_done_check = custom_job.done()
        assert custom_endpoint._skipped_getter_call()

        assert (custom_job.state ==
                gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED)
        assert (automl_job.state ==
                gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED)
        assert (custom_batch_prediction_job.state ==
                gca_job_state.JobState.JOB_STATE_SUCCEEDED)

        # Ensure a single prediction was returned
        assert len(custom_prediction.predictions) == 1
        assert len(automl_prediction.predictions) == 1

        # Ensure the models are remotely accurate
        try:
            automl_result = automl_prediction.predictions[0]["value"]
            custom_result = custom_prediction.predictions[0][0]
            assert 200000 > automl_result > 50000
            assert 200000 > custom_result > 50000
        except KeyError as e:
            raise RuntimeError("Unexpected prediction response structure:", e)

        # Check done() method works correctly
        assert in_progress_done_check is False
        assert completion_done_check is True
Example #21
0
from google.cloud import aiplatform

endpoint = aiplatform.Endpoint(endpoint_name="ENDPOINT_STRING")

# A test example we'll send to our model for prediction
test_mpg = [
    1.4838871833555929, 1.8659883497083019, 2.234620276849616,
    1.0187816540094903, -2.530890710602246, -1.6046416850441676,
    -0.4651483719733302, -0.4952254087173721, 0.7746763768735953
]

response = endpoint.predict([test_mpg])

print('API response: ', response)

print('Predicted MPG: ', response.predictions[0][0])
    def test_list_models(self, get_endpoint_with_models_mock):

        ept = aiplatform.Endpoint(_TEST_ID)
        my_models = ept.list_models()

        assert my_models == _TEST_DEPLOYED_MODELS