async def invoke(self, context): """Claims and processes Taskcluster work. Args: context (scriptworker.context.Context): context of worker Returns: status code of build """ try: # Note: claim_work(...) might not be safely interruptible! See # https://bugzilla.mozilla.org/show_bug.cgi?id=1524069 tasks = await self._run_cancellable(claim_work(context)) if not tasks or not tasks.get("tasks", []): await self._run_cancellable( asyncio.sleep(context.config["poll_interval"])) return None # Assume only a single task, but should more than one fall through, # run them sequentially. A side effect is our return status will # be the status of the final task run. status = None for task_defn in tasks.get("tasks", []): prepare_to_run_task(context, task_defn) reclaim_fut = context.event_loop.create_task( reclaim_task(context, context.task)) try: status = await do_run_task(context, self._run_cancellable, self._to_cancellable_process) artifacts_paths = filepaths_in_dir( context.config["artifact_dir"]) except WorkerShutdownDuringTask: shutdown_artifact_paths = [ os.path.join("public", "logs", log_file) for log_file in ["chain_of_trust.log", "live_backing.log"] ] artifacts_paths = [ path for path in shutdown_artifact_paths if os.path.isfile( os.path.join(context.config["artifact_dir"], path)) ] status = STATUSES["worker-shutdown"] status = worst_level(status, await do_upload(context, artifacts_paths)) await complete_task(context, status) reclaim_fut.cancel() cleanup(context) return status except asyncio.CancelledError: return None
async def run_tasks(context, creds_key="credentials"): """Run any tasks returned by claimWork. Returns the integer status of the task that was run, or None if no task was run. args: context (scriptworker.context.Context): the scriptworker context. creds_key (str, optional): when reading the creds file, this dict key corresponds to the credentials value we want to use. Defaults to "credentials". Returns: int: status None: if no task run. """ loop = asyncio.get_event_loop() tasks = await claim_work(context) status = None if not tasks or not tasks.get('tasks', []): await asyncio.sleep(context.config['poll_interval']) return status # Assume only a single task, but should more than one fall through, # run them sequentially. A side effect is our return status will # be the status of the final task run. for task_defn in tasks.get('tasks', []): status = 0 prepare_to_run_task(context, task_defn) loop.create_task(reclaim_task(context, context.task)) try: if context.config['verify_chain_of_trust']: chain = ChainOfTrust(context, context.config['cot_job_type']) await verify_chain_of_trust(chain) status = await run_task(context) generate_cot(context) except ScriptWorkerException as e: status = worst_level(status, e.exit_code) log.error("Hit ScriptWorkerException: {}".format(e)) try: await upload_artifacts(context) except ScriptWorkerException as e: status = worst_level(status, e.exit_code) log.error("Hit ScriptWorkerException: {}".format(e)) except aiohttp.ClientError as e: status = worst_level(status, STATUSES['intermittent-task']) log.error("Hit aiohttp error: {}".format(e)) await complete_task(context, status) cleanup(context) return status
def test_prepare_to_run_task(context): claim_task = context.claim_task context.claim_task = None expected = {'taskId': 'taskId', 'runId': 'runId'} path = os.path.join(context.config['work_dir'], 'current_task_info.json') assert swtask.prepare_to_run_task(context, claim_task) == expected assert os.path.exists(path) with open(path) as fh: contents = json.load(fh) assert contents == expected
def test_prepare_to_run_task(context): claim_task = context.claim_task context.claim_task = None expected = {"taskId": "taskId", "runId": "runId"} path = os.path.join(context.config["work_dir"], "current_task_info.json") assert swtask.prepare_to_run_task(context, claim_task) == expected assert os.path.exists(path) with open(path) as fh: contents = json.load(fh) assert contents == expected
async def invoke(self, context): """Claims and processes Taskcluster work. Args: context (scriptworker.context.Context): context of worker Returns: status code of build """ try: # Note: claim_work(...) might not be safely interruptible! See # https://bugzilla.mozilla.org/show_bug.cgi?id=1524069 tasks = await self._run_cancellable(claim_work(context)) if not tasks or not tasks.get('tasks', []): await self._run_cancellable(asyncio.sleep(context.config['poll_interval'])) return None # Assume only a single task, but should more than one fall through, # run them sequentially. A side effect is our return status will # be the status of the final task run. status = None for task_defn in tasks.get('tasks', []): prepare_to_run_task(context, task_defn) reclaim_fut = context.event_loop.create_task(reclaim_task(context, context.task)) try: status = await do_run_task(context, self._run_cancellable, self._to_cancellable_process) artifacts_paths = filepaths_in_dir(context.config['artifact_dir']) except WorkerShutdownDuringTask: shutdown_artifact_paths = [os.path.join('public', 'logs', log_file) for log_file in ['chain_of_trust.log', 'live_backing.log']] artifacts_paths = [path for path in shutdown_artifact_paths if os.path.isfile(os.path.join(context.config['artifact_dir'], path))] status = STATUSES['worker-shutdown'] status = worst_level(status, await do_upload(context, artifacts_paths)) await complete_task(context, status) reclaim_fut.cancel() cleanup(context) return status except asyncio.CancelledError: return None
async def run_tasks(context, creds_key="credentials"): """Run any tasks returned by claimWork. Returns the integer status of the task that was run, or None if no task was run. args: context (scriptworker.context.Context): the scriptworker context. creds_key (str, optional): when reading the creds file, this dict key corresponds to the credentials value we want to use. Defaults to "credentials". Raises: Exception: on unexpected exception. Returns: int: exit status None: if no task run. """ tasks = await claim_work(context) status = None if not tasks or not tasks.get('tasks', []): await asyncio.sleep(context.config['poll_interval']) return status # Assume only a single task, but should more than one fall through, # run them sequentially. A side effect is our return status will # be the status of the final task run. for task_defn in tasks.get('tasks', []): status = 0 prepare_to_run_task(context, task_defn) reclaim_fut = context.event_loop.create_task(reclaim_task(context, context.task)) status = await do_run_task(context) status = worst_level(status, await do_upload(context)) await complete_task(context, status) reclaim_fut.cancel() cleanup(context) return status