def test_task_not_lost(): driver_task_id = utils.submit_job(app_url=utils.SPARK_EXAMPLES, app_args="1500", # Long enough to examine the Executor's task info args=["--conf spark.cores.max=1", "--class org.apache.spark.examples.SparkPi"]) # Wait until executor is running sdk_tasks.check_running(SPARK_PI_FW_NAME, 1, timeout_seconds=600) # Check Executor task ID - should end with 0, the first task. # If it's > 0, that means the first task was lost. assert sdk_tasks.get_task_ids(SPARK_PI_FW_NAME, '')[0].endswith('-0') # Check job output utils.check_job_output(driver_task_id, "Pi is roughly 3")
def test_task_not_lost(): driver_task_id = utils.submit_job( app_url=utils.SPARK_EXAMPLES, app_args="1500", # Long enough to examine the Executor's task info args=[ "--conf", "spark.cores.max=1", "--class", "org.apache.spark.examples.SparkPi" ]) # Wait until executor is running utils.wait_for_executors_running(SPARK_PI_FW_NAME, 1) # Check Executor task ID - should be 0, the first task. # If it's > 0, that means the first task was lost. executor_task = shakedown.get_service_tasks(SPARK_PI_FW_NAME)[0] assert executor_task['id'] == "0" # Check job output utils.check_job_output(driver_task_id, "Pi is roughly 3")
def test_executor_gpus_allocated(): """ Checks that the specified executor.gpus is allocated for each executor. """ num_executors = 2 executor_gpus = 1 driver_task_id = _submit_gpu_app(num_executors=num_executors, executor_gpus=executor_gpus, gpus_max=num_executors * executor_gpus) # Wait until executors are running spark_utils.wait_for_executors_running(GPU_PI_APP_NAME, num_executors) # Check Executor gpus - should be 1. for i in range(0, num_executors): executor_task = shakedown.get_service_tasks(GPU_PI_APP_NAME)[i] assert executor_task['resources']['gpus'] == 1.0 # Check job output spark_utils.check_job_output(driver_task_id, "Pi calculated with GPUs: 3.14")