Пример #1
0
def sample_create_hyperparameter_tuning_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob()
    hyperparameter_tuning_job.display_name = "display_name_value"
    hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value"
    hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE"
    hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96
    hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962
    hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value"
    hyperparameter_tuning_job.max_trial_count = 1609
    hyperparameter_tuning_job.parallel_trial_count = 2128
    hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"

    request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest(
        parent="parent_value",
        hyperparameter_tuning_job=hyperparameter_tuning_job,
    )

    # Make the request
    response = client.create_hyperparameter_tuning_job(request=request)

    # Handle the response
    print(response)
def sample_cancel_custom_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.CancelCustomJobRequest(name="name_value", )

    # Make the request
    client.cancel_custom_job(request=request)
def sample_cancel_hyperparameter_tuning_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest(
        name="name_value", )

    # Make the request
    client.cancel_hyperparameter_tuning_job(request=request)
Пример #4
0
def sample_cancel_data_labeling_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.CancelDataLabelingJobRequest(
        name="name_value", )

    # Make the request
    client.cancel_data_labeling_job(request=request)
def sample_resume_model_deployment_monitoring_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest(
        name="name_value", )

    # Make the request
    client.resume_model_deployment_monitoring_job(request=request)
def sample_get_custom_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.GetCustomJobRequest(name="name_value", )

    # Make the request
    response = client.get_custom_job(request=request)

    # Handle the response
    print(response)
Пример #7
0
def sample_list_custom_jobs():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.ListCustomJobsRequest(parent="parent_value", )

    # Make the request
    page_result = client.list_custom_jobs(request=request)

    # Handle the response
    for response in page_result:
        print(response)
def sample_get_hyperparameter_tuning_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest(
        name="name_value", )

    # Make the request
    response = client.get_hyperparameter_tuning_job(request=request)

    # Handle the response
    print(response)
Пример #9
0
def sample_get_model_deployment_monitoring_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest(
        name="name_value", )

    # Make the request
    response = client.get_model_deployment_monitoring_job(request=request)

    # Handle the response
    print(response)
def sample_search_model_deployment_monitoring_stats_anomalies():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
        model_deployment_monitoring_job="model_deployment_monitoring_job_value",
        deployed_model_id="deployed_model_id_value",
    )

    # Make the request
    page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)

    # Handle the response
    for response in page_result:
        print(response)
Пример #11
0
def sample_delete_custom_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    request = aiplatform_v1beta1.DeleteCustomJobRequest(name="name_value", )

    # Make the request
    operation = client.delete_custom_job(request=request)

    print("Waiting for operation to complete...")

    response = operation.result()

    # Handle the response
    print(response)
Пример #12
0
def sample_create_model_deployment_monitoring_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
    model_deployment_monitoring_job.display_name = "display_name_value"
    model_deployment_monitoring_job.endpoint = "endpoint_value"

    request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest(
        parent="parent_value",
        model_deployment_monitoring_job=model_deployment_monitoring_job,
    )

    # Make the request
    response = client.create_model_deployment_monitoring_job(request=request)

    # Handle the response
    print(response)
Пример #13
0
def sample_create_custom_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    custom_job = aiplatform_v1beta1.CustomJob()
    custom_job.display_name = "display_name_value"
    custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"

    request = aiplatform_v1beta1.CreateCustomJobRequest(
        parent="parent_value",
        custom_job=custom_job,
    )

    # Make the request
    response = client.create_custom_job(request=request)

    # Handle the response
    print(response)
def create_batch_prediction_job_bigquery_sample(
    project: str,
    display_name: str,
    model_name: str,
    instances_format: str,
    bigquery_source_input_uri: str,
    predictions_format: str,
    bigquery_destination_output_uri: str,
    location: str = "us-central1",
    api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
    # The AI Platform services require regional API endpoints.
    client_options = {"api_endpoint": api_endpoint}
    # Initialize client that will be used to create and send requests.
    # This client only needs to be created once, and can be reused for multiple requests.
    client = aiplatform_v1beta1.JobServiceClient(client_options=client_options)
    model_parameters_dict = {}
    model_parameters = json_format.ParseDict(model_parameters_dict, Value())

    batch_prediction_job = {
        "display_name": display_name,
        # Format: 'projects/{project}/locations/{location}/models/{model_id}'
        "model": model_name,
        "model_parameters": model_parameters,
        "input_config": {
            "instances_format": instances_format,
            "bigquery_source": {
                "input_uri": bigquery_source_input_uri
            },
        },
        "output_config": {
            "predictions_format": predictions_format,
            "bigquery_destination": {
                "output_uri": bigquery_destination_output_uri
            },
        },
        # optional
        "generate_explanation": True,
    }
    parent = f"projects/{project}/locations/{location}"
    response = client.create_batch_prediction_job(
        parent=parent, batch_prediction_job=batch_prediction_job)
    print("response:", response)
def sample_update_model_deployment_monitoring_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob(
    )
    model_deployment_monitoring_job.display_name = "display_name_value"
    model_deployment_monitoring_job.endpoint = "endpoint_value"

    request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest(
        model_deployment_monitoring_job=model_deployment_monitoring_job, )

    # Make the request
    operation = client.update_model_deployment_monitoring_job(request=request)

    print("Waiting for operation to complete...")

    response = operation.result()

    # Handle the response
    print(response)
def sample_create_data_labeling_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    data_labeling_job = aiplatform_v1beta1.DataLabelingJob()
    data_labeling_job.display_name = "display_name_value"
    data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
    data_labeling_job.labeler_count = 1375
    data_labeling_job.instruction_uri = "instruction_uri_value"
    data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
    data_labeling_job.inputs.null_value = "NULL_VALUE"

    request = aiplatform_v1beta1.CreateDataLabelingJobRequest(
        parent="parent_value",
        data_labeling_job=data_labeling_job,
    )

    # Make the request
    response = client.create_data_labeling_job(request=request)

    # Handle the response
    print(response)
Пример #17
0
def create_batch_prediction_job_tabular_forecasting_sample(
    project: str,
    display_name: str,
    model_name: str,
    gcs_source_uri: str,
    gcs_destination_output_uri_prefix: str,
    predictions_format: str,
    location: str = "us-central1",
    api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
    # The AI Platform services require regional API endpoints.
    client_options = {"api_endpoint": api_endpoint}
    # Initialize client that will be used to create and send requests.
    # This client only needs to be created once, and can be reused for multiple requests.
    client = aiplatform_v1beta1.JobServiceClient(client_options=client_options)
    batch_prediction_job = {
        "display_name": display_name,
        # Format: 'projects/{project}/locations/{location}/models/{model_id}'
        "model": model_name,
        "input_config": {
            "instances_format": predictions_format,
            "gcs_source": {
                "uris": [gcs_source_uri]
            },
        },
        "output_config": {
            "predictions_format": predictions_format,
            "gcs_destination": {
                "output_uri_prefix": gcs_destination_output_uri_prefix
            },
        },
    }
    parent = f"projects/{project}/locations/{location}"
    response = client.create_batch_prediction_job(
        parent=parent, batch_prediction_job=batch_prediction_job)
    print("response:", response)
def sample_create_batch_prediction_job():
    # Create a client
    client = aiplatform_v1beta1.JobServiceClient()

    # Initialize request argument(s)
    batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob()
    batch_prediction_job.display_name = "display_name_value"
    batch_prediction_job.input_config.gcs_source.uris = [
        'uris_value_1', 'uris_value_2'
    ]
    batch_prediction_job.input_config.instances_format = "instances_format_value"
    batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
    batch_prediction_job.output_config.predictions_format = "predictions_format_value"

    request = aiplatform_v1beta1.CreateBatchPredictionJobRequest(
        parent="parent_value",
        batch_prediction_job=batch_prediction_job,
    )

    # Make the request
    response = client.create_batch_prediction_job(request=request)

    # Handle the response
    print(response)
Пример #19
0
def execute_notebook(args):

    client_info = gapic_v1.client_info.ClientInfo(
        user_agent="google-cloud-pipeline-components", )

    client_notebooks = notebooks.NotebookServiceClient(client_info=client_info)
    client_vertexai_jobs = vertex_ai_beta.JobServiceClient(
        client_options={
            'api_endpoint': f'{args.location}-aiplatform.googleapis.com'
        },
        client_info=client_info)

    execution_parent = f'projects/{args.project}/locations/{args.location}'
    execution_fullname = f'{execution_parent}/executions/{args.execution_id}'
    execution_template = build_execution_template(args)
    gcp_resources = ''

    try:
        print('Try create_execution()...')
        execution_create_operation = client_notebooks.create_execution(
            parent=execution_parent,
            execution_id=args.execution_id,
            execution=execution_template)
        gcp_resources = json.dumps({
            "resources": [{
                "resourceType":
                'type.googleapis.com/google.cloud.notebooks.v1.Execution',
                "resourceUri": execution_fullname
            }]
        })
    except Exception as e:
        response = build_response(error=f'create_execution() failed: {e}')
        handle_error(args.fail_pipeline, response)
        return response

    # Gets initial execution
    try:
        print('Try get_execution()...')
        execution = client_notebooks.get_execution(name=execution_fullname)
    except Exception as e:
        response = build_response(error=f'get_execution() failed: {e}')
        handle_error(args.fail_pipeline, response)
        return response

    if not args.block_pipeline:
        print('Not blocking pipeline...')
        return build_response(
            state=Execution.State(execution.state).name,
            output_notebook_file=execution.output_notebook_file,
            gcp_resources=gcp_resources)

    # Waits for execution to finish.
    print('Blocking pipeline...')
    execution_state = ''
    execution_job_uri = ''
    while True:
        try:
            execution = client_notebooks.get_execution(name=execution_fullname)
            execution_state = getattr(execution, 'state', '')
            print(
                f'execution.state is {Execution.State(execution_state).name}')
        except Exception as e:
            response = build_response(
                error=f'get_execution() for blocking pipeline failed: {e}')
            handle_error(args.fail_pipeline, response)
            return response

        # Job URI is not available when state is INITIALIZING.
        if execution_state in _STATES_JOB_URI and not execution_job_uri:
            execution_job_uri = getattr(execution, 'job_uri', '')
            print(f'execution.job_uri is {execution_job_uri}')

        if execution_state in _STATES_COMPLETED:
            break
        time.sleep(30)

    # For some reason, execution.get and job.get might not return the same state
    # so we check if we can get the error message using the AI Plaform API.
    # It is only possible if there is job_uri available though.
    if execution_state in _STATES_ERROR:
        if execution_job_uri:
            while True:
                try:
                    custom_job = client_vertexai_jobs.get_custom_job(
                        name=execution_job_uri)
                except Exception as e:
                    response = build_response(
                        error=f'get_custom_job() failed: {e}')
                    handle_error(args.fail_pipeline, response)
                    return response

                custom_job_state = getattr(custom_job, 'state', None)
                if custom_job_state in _STATES_COMPLETED:
                    break
                time.sleep(30)

            # == to `if state in _JOB_ERROR_STATES`
            custom_job_error = getattr(custom_job, 'error', None)
            if custom_job_error:
                response = build_response(
                    error=
                    f'Error {custom_job_error.code}: {custom_job_error.message}'
                )
                handle_error(args.fail_pipeline, (None, response))
                return response

        # The job might be successful but we need to address that the execution
        # had a problem. The previous loop was in hope to find the error message,
        # we didn't have any so we return the execution state as the message.
        response = build_response(
            error=
            f'Execution finished with state: {Execution.State(execution_state).name}'
        )
        handle_error(args.fail_pipeline, (None, response))
        return response

    return build_response(state=Execution.State(execution_state).name,
                          output_notebook_file=execution.output_notebook_file,
                          gcp_resources=gcp_resources)