Ejemplo n.º 1
0
async def sample_update_model():
    # Create a client
    client = aiplatform_v1beta1.ModelServiceAsyncClient()

    # Initialize request argument(s)
    model = aiplatform_v1beta1.Model()
    model.display_name = "display_name_value"

    request = aiplatform_v1beta1.UpdateModelRequest(model=model, )

    # Make the request
    response = await client.update_model(request=request)

    # Handle the response
    print(response)
Ejemplo n.º 2
0
async def sample_upload_model():
    # Create a client
    client = aiplatform_v1beta1.ModelServiceAsyncClient()

    # Initialize request argument(s)
    model = aiplatform_v1beta1.Model()
    model.display_name = "display_name_value"

    request = aiplatform_v1beta1.UploadModelRequest(
        parent="parent_value",
        model=model,
    )

    # Make the request
    operation = client.upload_model(request=request)

    print("Waiting for operation to complete...")

    response = await operation.result()

    # Handle the response
    print(response)
def upload_model_explain_tabular_managed_container_sample(
    project: str,
    display_name: str,
    container_spec_image_uri: str,
    artifact_uri: str,
    input_tensor_name: str,
    output_tensor_name: str,
    feature_names: list,
    location: str = "us-central1",
    api_endpoint: str = "us-central1-aiplatform.googleapis.com",
    timeout: int = 300,
):
    # The AI Platform services require regional API endpoints.
    client_options = {"api_endpoint": api_endpoint}
    # Initialize client that will be used to create and send requests.
    # This client only needs to be created once, and can be reused for multiple requests.
    client = aiplatform_v1beta1.ModelServiceClient(
        client_options=client_options)

    # Container specification for deploying the model
    container_spec = {
        "image_uri": container_spec_image_uri,
        "command": [],
        "args": []
    }

    # The explainabilty method and corresponding parameters
    parameters = aiplatform_v1beta1.ExplanationParameters(
        {"xrai_attribution": {
            "step_count": 1
        }})

    # The input tensor for feature attribution to the output
    # For single input model, y = f(x), this will be the serving input layer.
    input_metadata = aiplatform_v1beta1.ExplanationMetadata.InputMetadata({
        "input_tensor_name":
        input_tensor_name,
        # Input is tabular data
        "modality":
        "numeric",
        # Assign feature names to the inputs for explanation
        "encoding":
        "BAG_OF_FEATURES",
        "index_feature_mapping":
        feature_names,
    })

    # The output tensor to explain
    # For single output model, y = f(x), this will be the serving output layer.
    output_metadata = aiplatform_v1beta1.ExplanationMetadata.OutputMetadata(
        {"output_tensor_name": output_tensor_name})

    # Assemble the explanation metadata
    metadata = aiplatform_v1beta1.ExplanationMetadata(
        inputs={"features": input_metadata},
        outputs={"prediction": output_metadata})

    # Assemble the explanation specification
    explanation_spec = aiplatform_v1beta1.ExplanationSpec(
        parameters=parameters, metadata=metadata)

    model = aiplatform_v1beta1.Model(
        display_name=display_name,
        # The Cloud Storage location of the custom model
        artifact_uri=artifact_uri,
        explanation_spec=explanation_spec,
        container_spec=container_spec,
    )
    parent = f"projects/{project}/locations/{location}"
    response = client.upload_model(parent=parent, model=model)
    print("Long running operation:", response.operation.name)
    upload_model_response = response.result(timeout=timeout)
    print("upload_model_response:", upload_model_response)