Esempio n. 1
0
def teardown(shared_state):
    yield

    training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1]

    # Stop the training pipeline
    cancel_training_pipeline_sample.cancel_training_pipeline_sample(
        project=PROJECT_ID, training_pipeline_id=training_pipeline_id
    )

    client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"}
    pipeline_client = aiplatform.gapic.PipelineServiceClient(
        client_options=client_options
    )

    # Waiting for training pipeline to be in CANCELLED state
    helpers.wait_for_job_state(
        get_job_method=pipeline_client.get_training_pipeline,
        name=shared_state["training_pipeline_name"],
    )

    # Delete the training pipeline
    delete_training_pipeline_sample.delete_training_pipeline_sample(
        project=PROJECT_ID, training_pipeline_id=training_pipeline_id
    )
def teardown(shared_state):
    yield

    assert "/" in shared_state["data_labeling_job_name"]

    data_labeling_job_id = shared_state["data_labeling_job_name"].split(
        "/")[-1]

    client_options = {"api_endpoint": API_ENDPOINT}
    client = aiplatform.gapic.JobServiceClient(client_options=client_options)

    name = client.data_labeling_job_path(
        project=PROJECT_ID,
        location=LOCATION,
        data_labeling_job=data_labeling_job_id)
    client.cancel_data_labeling_job(name=name)

    # Verify Data Labelling Job is cancelled, or timeout after 400 seconds
    helpers.wait_for_job_state(get_job_method=client.get_data_labeling_job,
                               name=name,
                               timeout=400,
                               freq=10)

    # Delete the data labeling job
    response = client.delete_data_labeling_job(name=name)
    print("Delete LRO:", response.operation.name)
    delete_data_labeling_job_response = response.result(timeout=300)
    print("delete_data_labeling_job_response",
          delete_data_labeling_job_response)
Esempio n. 3
0
def teardown(shared_state):
    yield

    hyperparameter_tuning_job_id = shared_state[
        "hyperparameter_tuning_job_name"].split("/")[-1]

    # Cancel the created hyperparameter tuning job
    cancel_hyperparameter_tuning_job_sample.cancel_hyperparameter_tuning_job_sample(
        project=PROJECT_ID,
        hyperparameter_tuning_job_id=hyperparameter_tuning_job_id)

    job_client = aiplatform.gapic.JobServiceClient(
        client_options={
            "api_endpoint": "us-central1-aiplatform.googleapis.com"
        })

    # Waiting for hyperparameter tuning job to be in CANCELLED state
    helpers.wait_for_job_state(
        get_job_method=job_client.get_hyperparameter_tuning_job,
        name=shared_state["hyperparameter_tuning_job_name"],
    )

    # Delete the created hyperparameter tuning job
    delete_hyperparameter_tuning_job_sample.delete_hyperparameter_tuning_job_sample(
        project=PROJECT_ID,
        hyperparameter_tuning_job_id=hyperparameter_tuning_job_id)
def test_create_training_pipeline_custom_training_managed_dataset_sample(
        capsys, shared_state, pipeline_client):
    create_training_pipeline_custom_training_managed_dataset_sample.create_training_pipeline_custom_training_managed_dataset_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        model_display_name=MODEL_DISPLAY_NAME,
        dataset_id=DATASET_ID,
        annotation_schema_uri=ANNOTATION_SCHEMA_URI,
        training_container_spec_image_uri=TRAINING_CONTAINER_SPEC_IMAGE_URI,
        model_container_spec_image_uri=MODEL_CONTAINER_SPEC_IMAGE_URI,
        base_output_uri_prefix=BASE_OUTPUT_URI_PREFIX,
    )

    out, _ = capsys.readouterr()

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)

    # Poll until the pipeline succeeds because we want to test the model_upload step as well.
    helpers.wait_for_job_state(
        get_job_method=pipeline_client.get_training_pipeline,
        name=shared_state["training_pipeline_name"],
        expected_state="SUCCEEDED",
        timeout=1800,
        freq=20,
    )

    training_pipeline = pipeline_client.get_training_pipeline(
        name=shared_state["training_pipeline_name"])

    # Check that the model indeed has been uploaded.
    assert training_pipeline.model_to_upload.name != ""

    shared_state["model_name"] = training_pipeline.model_to_upload.name
Esempio n. 5
0
def teardown_data_labeling_job(capsys, shared_state, data_labeling_job_client):
    yield

    assert "/" in shared_state["data_labeling_job_name"]

    data_labeling_job_client.cancel_data_labeling_job(
        name=shared_state["data_labeling_job_name"])

    # Verify Data Labelling Job is cancelled, or timeout after 400 seconds
    helpers.wait_for_job_state(
        get_job_method=data_labeling_job_client.get_data_labeling_job,
        name=shared_state["data_labeling_job_name"],
        expected_state='FAILED',
        timeout=400,
        freq=10,
    )

    # Delete the data labeling job
    response = data_labeling_job_client.delete_data_labeling_job(
        name=shared_state["data_labeling_job_name"])
    print("Delete LRO:", response.operation.name)
    delete_data_labeling_job_response = response.result(timeout=300)
    print("delete_data_labeling_job_response",
          delete_data_labeling_job_response)

    out, _ = capsys.readouterr()
    assert "delete_data_labeling_job_response" in out
Esempio n. 6
0
def teardown(shared_state):
    yield

    assert "/" in shared_state["batch_prediction_job_name"]

    batch_prediction_job = shared_state["batch_prediction_job_name"].split(
        "/")[-1]

    # Stop the batch prediction job
    cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample(
        project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job)

    job_client = aiplatform.gapic.JobServiceClient(
        client_options={
            "api_endpoint": "us-central1-aiplatform.googleapis.com"
        })

    # Waiting for batch prediction job to be in CANCELLED state
    helpers.wait_for_job_state(
        get_job_method=job_client.get_batch_prediction_job,
        name=shared_state["batch_prediction_job_name"],
    )

    # Delete the batch prediction job
    delete_batch_prediction_job_sample.delete_batch_prediction_job_sample(
        project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job)
def teardown(shared_state, job_client):
    yield

    job_client.cancel_batch_prediction_job(name=shared_state["batch_prediction_job_name"])

    # Waiting until the job is in CANCELLED state.
    helpers.wait_for_job_state(
        get_job_method=job_client.get_batch_prediction_job,
        name=shared_state["batch_prediction_job_name"],
    )

    job_client.delete_batch_prediction_job(name=shared_state["batch_prediction_job_name"])
def teardown(shared_state, job_client):
    yield

    # Cancel the created custom job
    job_client.cancel_custom_job(name=shared_state["custom_job_name"])

    # Waiting for custom job to be in CANCELLED state
    helpers.wait_for_job_state(
        get_job_method=job_client.get_custom_job,
        name=shared_state["custom_job_name"],
    )

    # Delete the created custom job
    job_client.delete_custom_job(name=shared_state["custom_job_name"])
def test_ucaip_generated_cancel_training_pipeline_sample(
        capsys, shared_state, pipeline_client):
    # Run cancel pipeline sample
    training_pipeline_id = shared_state["training_pipeline_name"].split(
        "/")[-1]

    cancel_training_pipeline_sample.cancel_training_pipeline_sample(
        project=PROJECT_ID, training_pipeline_id=training_pipeline_id)

    # Waiting for training pipeline to be in CANCELLED state, otherwise raise error
    helpers.wait_for_job_state(
        get_job_method=pipeline_client.get_training_pipeline,
        name=shared_state["training_pipeline_name"],
    )
Esempio n. 10
0
def teardown_training_pipeline(shared_state, pipeline_client):
    yield

    pipeline_client.cancel_training_pipeline(
        name=shared_state["training_pipeline_name"])

    # Waiting for training pipeline to be in CANCELLED state
    helpers.wait_for_job_state(
        get_job_method=pipeline_client.get_training_pipeline,
        name=shared_state["training_pipeline_name"],
    )

    # Delete the training pipeline
    pipeline_client.delete_training_pipeline(
        name=shared_state["training_pipeline_name"])
Esempio n. 11
0
def teardown_hyperparameter_tuning_job(shared_state, job_client):
    yield

    # Cancel the created hyperparameter tuning job
    job_client.cancel_hyperparameter_tuning_job(
        name=shared_state["hyperparameter_tuning_job_name"])

    # Waiting for hyperparameter tuning job to be in CANCELLED state
    helpers.wait_for_job_state(
        get_job_method=job_client.get_hyperparameter_tuning_job,
        name=shared_state["hyperparameter_tuning_job_name"],
    )

    # Delete the created hyperparameter tuning job
    job_client.delete_hyperparameter_tuning_job(
        name=shared_state["hyperparameter_tuning_job_name"])
def teardown(shared_state, job_client):
    yield

    # Stop the batch prediction job
    # Delete the batch prediction job
    job_client.cancel_batch_prediction_job(
        name=shared_state["batch_prediction_job_name"])

    # Waiting for batch prediction job to be in CANCELLED state
    helpers.wait_for_job_state(
        get_job_method=job_client.get_batch_prediction_job,
        name=shared_state["batch_prediction_job_name"],
    )

    # Delete the batch prediction job
    job_client.delete_batch_prediction_job(
        name=shared_state["batch_prediction_job_name"])
def test_ucaip_generated_cancel_training_pipeline_sample(
        capsys, training_pipeline_id):
    # Run cancel pipeline sample
    cancel_training_pipeline_sample.cancel_training_pipeline_sample(
        project=PROJECT_ID, training_pipeline_id=training_pipeline_id)

    pipeline_client = aiplatform.gapic.PipelineServiceClient(
        client_options={
            "api_endpoint": "us-central1-aiplatform.googleapis.com"
        })

    # Waiting for training pipeline to be in CANCELLED state, otherwise raise error
    helpers.wait_for_job_state(
        get_job_method=pipeline_client.get_training_pipeline,
        name=pipeline_client.training_pipeline_path(
            project=PROJECT_ID,
            location=LOCATION,
            training_pipeline=training_pipeline_id,
        ),
    )
Esempio n. 14
0
def teardown_training_pipeline(shared_state, pipeline_client):
    yield

    try:
        pipeline_client.cancel_training_pipeline(
            name=shared_state["training_pipeline_name"])

        # Waiting for training pipeline to be in CANCELLED state
        timeout = shared_state["cancel_batch_prediction_job_timeout"]
        helpers.wait_for_job_state(
            get_job_method=pipeline_client.get_training_pipeline,
            name=shared_state["training_pipeline_name"],
            timeout=timeout,
        )

    except exceptions.FailedPrecondition:
        pass  # If pipeline failed, ignore and skip directly to deletion

    finally:
        # Delete the training pipeline
        pipeline_client.delete_training_pipeline(
            name=shared_state["training_pipeline_name"])