def test__restart_killed_job(): job = StatelessJob(job_file="test_stateless_job_spec_k8s.yaml") job.create() job.wait_for_state(goal_state="RUNNING") old_pod_infos = job.query_pods() job.stop() job.wait_for_state(goal_state="KILLED") job.restart(in_place=False) job.wait_for_all_pods_running() new_pod_infos = job.query_pods() assert_pod_id_changed(old_pod_infos, new_pod_infos)
def test__get_job_update_details__filter_non_update_workflow(client): """ test getJobUpdateDetails endpoint for filtering non-update workflows """ req1 = get_job_update_request("test_dc_labrat_large_job.yaml") req1.settings.updateGroupSize = 10 req2 = get_job_update_request("test_dc_labrat_large_job_diff_labels.yaml") req2.settings.updateGroupSize = 10 # start a regular update job_key = start_job_update(client, req1, "start job update test/dc/labrat_large_job") # trigger an unexpected restart through peloton api jobs = list_jobs() assert len(jobs) == 1 job = StatelessJob(job_id=jobs[0].job_id.value) job.restart(batch_size=10) job.wait_for_workflow_state(goal_state="SUCCEEDED") # wait for restart # start a new update start_job_update(client, req2, "start job update test/dc/labrat_large_job") # verify getJobUpdateDetails response res = client.get_job_update_details(None, api.JobUpdateQuery(role=job_key.role)) assert len(res.detailsList) == 2 for i, detail in enumerate(res.detailsList): if i == 0: assert len(detail.update.instructions.initialState) > 0 for initial in detail.update.instructions.initialState: assert initial.task.metadata, 'Expect metadata to be present' else: assert len(detail.update.instructions.initialState) == 0