async def load_nightly_graph(dt=None, platform='linux-opt'): """Given a date, load the relevant nightly task graph.""" async with aiohttp.ClientSession() as session: index = Index(options=tc_options(), session=session) queue = Queue(options=tc_options(), session=session) if not dt: dt = datetime.now() datestr = dt.strftime("%Y.%m.%d") basestr = "gecko.v2.mozilla-central.nightly.{date}.latest.firefox.{platform}" found = await index.findTask(basestr.format(date=datestr, platform=platform)) taskid = found.get('taskId') taskdef = await queue.task(taskid) # except taskcluster.exceptions.TaskclusterRestFailure: taskgroup = taskdef.get('taskGroupId') log.debug("Looking at {} for {}".format(taskgroup, datestr)) if taskgroup: return {'date': datestr, 'graph': await TaskGraph(taskgroup)} return None
async def __init__(self, json=None, task_id=None, queue=None): """Init.""" # taskId is not provided in the definition if task_id: self.task_id = task_id if json: self.def_json = json.get('task', json) return if not task_id: raise ValueError('No task definition or taskId provided') self.queue = queue if not self.queue: self.queue = Queue(tc_options()) await self._fetch_definition()
async def __init__(self, json=None, task_id=None, queue=None): """Init.""" if task_id: self.task_id = task_id if json: # We might be passed {'status': ... } or just the contents self.status_json = json.get('status', json) self.task_id = self.status_json['taskId'] return if not task_id: raise ValueError('No task definition or taskId provided') self.queue = queue if not self.queue: self.queue = Queue(tc_options()) await self._fetch_status()
async def __init__(self, json=None, task_id=None, queue=None): """init.""" if json: self.def_json = json.get('task') self.status_json = json.get('status') self.task_id = self.status_json['taskId'] return if task_id: self.task_id = task_id else: raise ValueError('No task definition or taskId provided') self.queue = queue if not self.queue: self.queue = Queue(tc_options()) if self.task_id: await self._fetch_definition() await self._fetch_status()
async def get_tc_run_artifacts(taskid, runid): log.info('Fetching TC artifact info for %s/%s', taskid, runid) artifacts = [] query = {} async with aiohttp.ClientSession() as session: queue = Queue(options=tc_options(), session=session) while True: resp = await queue.listArtifacts(taskid, runid, query=query) # Ammend the artifact information with the task and run ids # to make it easy to find the corresponding S3 object for a in resp['artifacts']: a['_name'] = f'{taskid}/{runid}/{a["name"]}' artifacts.append(a) if 'continuationToken' in resp: query.update({'continuationToken': resp['continuationToken']}) else: break return artifacts
async def fetch_tasks(self, limit=None): """Return tasks with the associated group ID. Handles continuationToken without the user being aware of it. Enforces the limit parameter as a limit of the total number of tasks to be returned. """ if self.cache_file: if self._read_file_cache(): return query = {} if limit: # Default taskcluster-client api asks for 1000 tasks. query['limit'] = min(limit, 1000) def under_limit(length): """Indicate if we've returned enough tasks.""" if not limit or length < limit: return True return False async with aiohttp.ClientSession() as session: queue = Queue(options=tc_options(), session=session) outcome = await queue.listTaskGroup(self.groupid, query=query) tasks = outcome.get('tasks', []) while under_limit(len(tasks)) and outcome.get('continuationToken'): query.update( {'continuationToken': outcome.get('continuationToken')}) outcome = await queue.listTaskGroup(self.groupid, query=query) tasks.extend(outcome.get('tasks', [])) if limit: tasks = tasks[:limit] self.tasklist = [Task(json=data) for data in tasks] if self.cache_file: self._write_file_cache()