def test_abort(self): d = Dispatcher() job_ref_1 = d.dispatch_task(bg_jobs.test_sleep, args=(3, )) time.sleep(1.2) assert d.query_task(job_ref_1).status == 'started' workers = rq.Worker.all(connection=d._redis_conn) wk = [w for w in workers if w.state == 'busy'] assert len(wk) == 1, "There must be precisely one busy worker" job_pid = wk[0].get_current_job().meta['pid'] d.abort_task(job_ref_1) time.sleep(0.1) j = d.query_task(job_ref_1) # There should be no result, cause it was cancelled assert j.result is None # RQ should identify the task as failed assert j.status == "failed" # Assert the JOB pid is gone with pytest.raises(OSError): os.kill(int(job_pid), 0) # Now assert the worker pid is still alive (so it can be assigned something else) worker_pid = wk[0].pid try: os.kill(int(worker_pid), 0) assert True, "Worker process is still hanging around." except OSError: assert False, "Worker process is killed"
def test_simple_dependent_job(self): d = Dispatcher() job_ref_1 = d.dispatch_task(bg_jobs.test_sleep, args=(2, )) job_ref_2 = d.dispatch_task(bg_jobs.test_exit_success, dependent_job=job_ref_1) time.sleep(0.5) assert d.query_task(job_ref_2).status == 'deferred' time.sleep(3) assert d.query_task(job_ref_1).status == 'finished' assert d.query_task(job_ref_2).status == 'finished' n = d.query_task(job_ref_1) assert n.meta.get('sample') == 'test_sleep metadata'
def test_fail_dependent_job(self): d = Dispatcher() job_ref_1 = d.dispatch_task(bg_jobs.test_exit_fail) job_ref_2 = d.dispatch_task(bg_jobs.test_exit_success, dependent_job=job_ref_1) time.sleep(3) assert d.query_task(job_ref_1).status == 'failed' assert d.query_task(job_ref_2).status == 'deferred'
def test_query_failed_tasks(self): d = Dispatcher() job_ref = d.dispatch_task(bg_jobs.test_exit_fail) time.sleep(1) assert job_ref in [j.job_key for j in d.failed_jobs] assert job_ref not in [j.job_key for j in d.finished_jobs] t = d.query_task(job_ref) assert t.failure_message == 'Exception: Intentional Exception from job `test_exit_fail`'
def test_failing_task(self): d = Dispatcher() job_ref = d.dispatch_task(bg_jobs.test_exit_fail) time.sleep(1) res = d.query_task(job_ref) assert res assert res.status == 'failed' assert res.failure_message == 'Exception: Intentional Exception from job `test_exit_fail`'
def _loader(self): self.job_key = self.id d = Dispatcher() q = d.query_task(JobKey(self.job_key)) self.status = q.status self.job_metadata = json.dumps(q.meta) self.failure_message = q.failure_message self.started_at = q.started_at self.finished_at = q.finished_at self.result = q.result
def test_simple_task(self): d = Dispatcher() job_ref = d.dispatch_task(bg_jobs.test_exit_success) time.sleep(1) res = d.query_task(job_ref) assert res assert res.status == 'finished' assert res.result == 0 assert res.failure_message is None assert res.finished_at is not None
def helper_resolve_image_status(self, labbook): """Helper to resolve the image status of a labbook""" labbook_image_key = infer_docker_image_name( labbook_name=self.name, owner=self.owner, username=get_logged_in_username()) dispatcher = Dispatcher() lb_jobs = [ dispatcher.query_task(j.job_key) for j in dispatcher.get_jobs_for_labbook(labbook.key) ] for j in lb_jobs: logger.debug("Current job for labbook: status {}, meta {}".format( j.status, j.meta)) # First, check if image exists or not -- The first step of building an image untags any existing ones. # Therefore, we know that if one exists, there most likely is not one being built. try: client = get_docker_client() client.images.get(labbook_image_key) image_status = ImageStatus.EXISTS except (ImageNotFound, requests.exceptions.ConnectionError): image_status = ImageStatus.DOES_NOT_EXIST if any([ j.status == 'failed' and j.meta.get('method') == 'build_image' for j in lb_jobs ]): logger.debug("Image status for {} is BUILD_FAILED".format( labbook.key)) if image_status == ImageStatus.EXISTS: # The indication that there's a failed job is probably lingering from a while back, so don't # change the status to FAILED. Only do that if there is no Docker image. logger.debug( f'Got failed build_image for labbook {labbook.key}, but image exists.' ) else: image_status = ImageStatus.BUILD_FAILED if any([ j.status in ['started'] and j.meta.get('method') == 'build_image' for j in lb_jobs ]): logger.debug( f"Image status for {labbook.key} is BUILD_IN_PROGRESS") # build_image being in progress takes precedence over if image already exists (unlikely event). if image_status == ImageStatus.EXISTS: logger.warning( f'Got build_image for labbook {labbook.key}, but image exists.' ) image_status = ImageStatus.BUILD_IN_PROGRESS if any([ j.status in ['queued'] and j.meta.get('method') == 'build_image' for j in lb_jobs ]): logger.warning( f"build_image for {labbook.key} stuck in queued state") image_status = ImageStatus.BUILD_QUEUED return image_status.value
def test_export_and_import_lb(self, fixture_working_dir_env_repo_scoped): api_server_proc = multiprocessing.Process(target=service.main, kwargs={'debug': False}) api_server_proc.daemon = True api_server_proc.start() assert api_server_proc.is_alive() time.sleep(5) assert api_server_proc.is_alive() # Make and validate request assert api_server_proc.is_alive() lb_name = "mutation-export-import-unittest" im = InventoryManager(fixture_working_dir_env_repo_scoped[0]) lb = im.create_labbook("default", "default", lb_name, description="Import/Export Mutation Testing.") cm = ComponentManager(lb) cm.add_base(ENV_UNIT_TEST_REPO, 'ut-busybox', 0) assert api_server_proc.is_alive() export_query = """ mutation export { exportLabbook(input: { owner: "default", labbookName: "%s" }) { jobKey } } """ % lb.name r = fixture_working_dir_env_repo_scoped[2].execute(export_query) pprint.pprint(r) # Sleep while the background job completes, and then delete new lb. time.sleep(5) d = Dispatcher() job_status = d.query_task(JobKey(r['data']['exportLabbook']['jobKey'])) # Delete existing labbook in file system. shutil.rmtree(lb.root_dir) assert api_server_proc.is_alive() assert job_status.status == 'finished' assert not os.path.exists(lb.root_dir) assert os.path.exists(job_status.result) pprint.pprint(job_status.result) if os.path.exists(os.path.join('/tmp', os.path.basename(job_status.result))): os.remove(os.path.join('/tmp', os.path.basename(job_status.result))) new_path = shutil.move(job_status.result, '/tmp') # Now, import the labbook that was just exported. export_query = """ mutation import { importLabbook(input: { }) { jobKey } } """ files = {'uploadFile': open(new_path, 'rb')} qry = {"query": export_query} assert api_server_proc.is_alive() r = requests.post('http://localhost:10001/labbook/', data=qry, files=files) time.sleep(0.5) pprint.pprint(r) assert 'errors' not in r time.sleep(2)