def load_image_by_name(image_name, tag=None): context_path = os.path.join(GECKO, 'taskcluster', 'docker', image_name) context_hash = docker.generate_context_hash(GECKO, context_path, image_name) index_path = DOCKER_INDEX.format('level-3', image_name, context_hash) task_id = find_task_id(index_path) return load_image_by_task_id(task_id, tag)
def load_image_by_name(image_name, tag=None): context_path = os.path.join(GECKO, 'taskcluster', 'docker', image_name) context_hash = docker.generate_context_hash(GECKO, context_path, image_name) index_path = cached_index_path( trust_domain='gecko', level=3, cache_type='docker-images.v1', cache_name=image_name, digest=context_hash, ) task_id = find_task_id(index_path) return load_image_by_task_id(task_id, tag)
def artifact_urls(self, tree, job, rev, download_symbols): try: artifact_job = get_job_details(job, log=self._log, download_symbols=download_symbols) except KeyError: self.log(logging.INFO, 'artifact', {'job': job}, 'Unknown job {job}') raise KeyError("Unknown job") # Grab the second part of the repo name, which is generally how things # are indexed. Eg: 'integration/mozilla-inbound' is indexed as # 'mozilla-inbound' tree = tree.split('/')[1] if '/' in tree else tree namespace = 'gecko.v2.{tree}.revision.{rev}.{product}.{job}'.format( rev=rev, tree=tree, product=artifact_job.product, job=job, ) self.log(logging.DEBUG, 'artifact', {'namespace': namespace}, 'Searching Taskcluster index with namespace: {namespace}') try: taskId = find_task_id(namespace) except Exception: # Not all revisions correspond to pushes that produce the job we # care about; and even those that do may not have completed yet. raise ValueError('Task for {namespace} does not exist (yet)!'.format(namespace=namespace)) artifacts = list_artifacts(taskId) urls = [] for artifact_name in artifact_job.find_candidate_artifacts(artifacts): # We can easily extract the task ID from the URL. We can't easily # extract the build ID; we use the .ini files embedded in the # downloaded artifact for this. We could also use the uploaded # public/build/buildprops.json for this purpose. url = get_artifact_url(taskId, artifact_name) urls.append(url) if not urls: raise ValueError('Task for {namespace} existed, but no artifacts found!'.format(namespace=namespace)) return urls
def load_parameters_file(filename, strict=True): """ Load parameters from a path, url, decision task-id or project. Examples: task-id=fdtgsD5DQUmAQZEaGMvQ4Q project=mozilla-central """ import urllib from taskgraph.util.taskcluster import get_artifact_url, find_task_id if not filename: return Parameters(strict=strict) try: # reading parameters from a local parameters.yml file f = open(filename) except IOError: # fetching parameters.yml using task task-id, project or supplied url task_id = None if filename.startswith("task-id="): task_id = filename.split("=")[1] elif filename.startswith("project="): index = "gecko.v2.{}.latest.firefox.decision".format(filename.split("=")[1]) task_id = find_task_id(index) if task_id: filename = get_artifact_url(task_id, 'public/parameters.yml') f = urllib.urlopen(filename) if filename.endswith('.yml'): return Parameters(strict=strict, **yaml.safe_load(f)) elif filename.endswith('.json'): return Parameters(strict=strict, **json.load(f)) else: raise TypeError("Parameters file `{}` is not JSON or YAML".format(filename))
def find_decision_task(parameters): """Given the parameters for this action, find the taskId of the decision task""" return find_task_id('gecko.v2.{}.pushlog-id.{}.decision'.format( parameters['project'], parameters['pushlog_id']))
def get_decision_task_id(project, push_id): return find_task_id(INDEX_TMPL.format(project, push_id))
def get_nightly_graph(): return find_task_id( "gecko.v2.mozilla-central.latest.taskgraph.decision-nightly-desktop")
def find_decision_task(parameters, graph_config): """Given the parameters for this action, find the taskId of the decision task""" return find_task_id('{}.v2.{}.pushlog-id.{}.decision'.format( graph_config['trust-domain'], parameters['project'], parameters['pushlog_id']))