def test_retrigger_action(parameters, input, task_group_id, task_id, task): new_task_definition = copy.copy(task) # set new created, deadline, and expiry fields new_task_definition['created'] = current_json_time() new_task_definition['deadline'] = json_time_from_now('1d') new_task_definition['expires'] = json_time_from_now('30d') # reset artifact expiry for artifact in new_task_definition['payload'].get('artifacts', {}).values(): artifact['expires'] = new_task_definition['expires'] # don't want to run mozharness tests, want a custom mach command instead new_task_definition['payload']['command'] += ['--no-run-tests'] custom_mach_command = [task['tags']['test-type']] # mochitests may specify a flavor if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'): custom_mach_command += [ '--keep-open=false', '-f', new_task_definition['payload']['env']['MOCHITEST_FLAVOR'] ] enable_e10s = json.loads(new_task_definition['payload']['env'].get( 'ENABLE_E10S', 'true')) if not enable_e10s: custom_mach_command += ['--disable-e10s'] custom_mach_command += ['--log-tbpl=-', '--log-tbpl-level={}'.format(input['logLevel'])] if input.get('runUntilFail'): custom_mach_command += ['--run-until-failure'] if input.get('repeat'): custom_mach_command += ['--repeat', str(input['repeat'])] # add any custom gecko preferences for (key, val) in input.get('preferences', {}).iteritems(): custom_mach_command += ['--setpref', '{}={}'.format(key, val)] custom_mach_command += [input['path']] new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join( custom_mach_command) # update environment new_task_definition['payload']['env'].update(input.get('environment', {})) # tweak the treeherder symbol new_task_definition['extra']['treeherder']['symbol'] += '-custom' logging.info("New task definition: %s", new_task_definition) # actually create the new task new_task_id = slugid() logger.info("Creating new mochitest task with id %s", new_task_id) session = requests.Session() create_task(session, new_task_id, 'test-retrigger', new_task_definition)
def get_action_yml(parameters): templates = Templates(os.path.join(GECKO, "taskcluster/taskgraph")) action_parameters = parameters.copy() action_parameters.update({ "decision_task_id": "{{decision_task_id}}", "task_labels": "{{task_labels}}", "from_now": json_time_from_now, "now": current_json_time() }) return templates.load('action.yml', action_parameters)
def get_action_yml(parameters): templates = Templates(os.path.join(GECKO, "taskcluster/taskgraph")) action_parameters = parameters.copy() action_parameters.update( { "action": "{{action}}", "action_args": "{{action_args}}", "project": parameters["project"], "from_now": json_time_from_now, "now": current_json_time(), } ) return templates.load("action.yml", action_parameters)
def _create_task(session, task_id, label, task_def): # create the task using 'http://taskcluster/queue', which is proxied to the queue service # with credentials appropriate to this job. # Resolve timestamps now = current_json_time(datetime_format=True) task_def = resolve_timestamps(now, task_def) logger.debug("Creating task with taskId {} for {}".format(task_id, label)) res = session.put('http://taskcluster/queue/v1/task/{}'.format(task_id), data=json.dumps(task_def)) if res.status_code != 200: try: logger.error(res.json()['message']) except: logger.error(res.text) res.raise_for_status()
def create_task(session, task_id, label, task_def): # create the task using 'http://taskcluster/queue', which is proxied to the queue service # with credentials appropriate to this job. # Resolve timestamps now = current_json_time(datetime_format=True) task_def = resolve_timestamps(now, task_def) logger.debug("Creating task with taskId {} for {}".format(task_id, label)) res = session.put('http://taskcluster/queue/v1/task/{}'.format(task_id), data=json.dumps(task_def)) if res.status_code != 200: try: logger.error(res.json()['message']) except: logger.error(res.text) res.raise_for_status()
def get_action_yml(parameters): templates = Templates(os.path.join(GECKO, "taskcluster/taskgraph")) action_parameters = parameters.copy() match = re.match(r'https://(hg.mozilla.org)/(.*?)/?$', action_parameters['head_repository']) if not match: raise Exception('Unrecognized head_repository') repo_scope = 'assume:repo:{}/{}:*'.format(match.group(1), match.group(2)) action_parameters.update({ "action": "{{action}}", "action_args": "{{action_args}}", "repo_scope": repo_scope, "from_now": json_time_from_now, "now": current_json_time() }) return templates.load('action.yml', action_parameters)
def get_action_yml(parameters): templates = Templates(os.path.join(GECKO, "taskcluster/taskgraph")) action_parameters = parameters.copy() match = re.match(r'https://(hg.mozilla.org)/(.*?)/?$', action_parameters['head_repository']) if not match: raise Exception('Unrecognized head_repository') repo_scope = 'assume:repo:{}/{}:*'.format( match.group(1), match.group(2)) action_parameters.update({ "action": "{{action}}", "action_args": "{{action_args}}", "repo_scope": repo_scope, "from_now": json_time_from_now, "now": current_json_time() }) return templates.load('action.yml', action_parameters)
def make_decision_task(params, root, symbol, arguments=[]): """Generate a basic decision task, based on the root .taskcluster.yml""" with open(os.path.join(root, '.taskcluster.yml'), 'rb') as f: taskcluster_yml = yaml.safe_load(f) push_info = find_hg_revision_push_info(params['repository_url'], params['head_rev']) # provide a similar JSON-e context to what mozilla-taskcluster provides: # https://docs.taskcluster.net/reference/integrations/mozilla-taskcluster/docs/taskcluster-yml # but with a different tasks_for and an extra `cron` section context = { 'tasks_for': 'cron', 'repository': { 'url': params['repository_url'], 'project': params['project'], 'level': params['level'], }, 'push': { 'revision': params['head_rev'], # remainder are fake values, but the decision task expects them anyway 'pushlog_id': push_info['pushid'], 'pushdate': push_info['pushdate'], 'owner': 'cron', }, 'cron': { 'task_id': os.environ.get('TASK_ID', '<cron task id>'), 'job_name': params['job_name'], 'job_symbol': symbol, # args are shell-quoted since they are given to `bash -c` 'quoted_args': ' '.join(pipes.quote(a) for a in arguments), }, 'now': current_json_time(), 'ownTaskId': slugid.nice(), } rendered = jsone.render(taskcluster_yml, context) if len(rendered['tasks']) != 1: raise Exception( "Expected .taskcluster.yml to only produce one cron task") task = rendered['tasks'][0] task_id = task.pop('taskId') return (task_id, task)
def set_expiration(task, relative_datestamp): task_def = task['task'] task_def['expires'] = {'relative-datestamp': relative_datestamp} if 'deadline' in task_def: now = current_json_time(datetime_format=True) timestamp = json_time_from_now(input_str=TRY_EXPIRATION, now=now, datetime_format=True) deadline = json_time_from_now(input_str=task_def['deadline']['relative-datestamp'], now=now, datetime_format=True) if deadline > timestamp: task_def['deadline']['relative-datestamp'] = relative_datestamp try: artifacts = task_def['payload']['artifacts'] except KeyError: return # for docker-worker, artifacts is a dictionary # for generic-worker, artifacts is a list # for taskcluster-worker, it will depend on what we do in artifacts plugin for artifact in artifacts.values() if hasattr(artifacts, "values") else artifacts: artifact['expires']['relative-datestamp'] = relative_datestamp
def create_task(session, task_id, label, task_def): # create the task using 'http://taskcluster/queue', which is proxied to the queue service # with credentials appropriate to this job. # Resolve timestamps now = current_json_time(datetime_format=True) task_def = resolve_timestamps(now, task_def) if testing: json.dump([task_id, task_def], sys.stdout, sort_keys=True, indent=4, separators=(',', ': ')) # add a newline print("") return logger.debug("Creating task with taskId {} for {}".format(task_id, label)) res = session.put('http://taskcluster/queue/v1/task/{}'.format(task_id), data=json.dumps(task_def)) if res.status_code != 200: try: logger.error(res.json()['message']) except Exception: logger.error(res.text) res.raise_for_status()
def load_tasks(self, params): root = os.path.abspath( os.path.join(self.path, self.config['legacy_path'])) project = params['project'] # NOTE: message is ignored here; we always use DEFAULT_TRY, then filter the # resulting task graph later message = DEFAULT_TRY templates = Templates(root) job_path = os.path.join(root, 'tasks', 'branches', project, 'job_flags.yml') job_path = job_path if os.path.exists(job_path) else \ os.path.join(root, DEFAULT_JOB_PATH) jobs = templates.load(job_path, {}) job_graph, trigger_tests = parse_commit(message, jobs) cmdline_interactive = params.get('interactive', False) # Default to current time if querying the head rev fails pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime()) vcs_info = query_vcs_info(params['head_repository'], params['head_rev']) changed_files = set() if vcs_info: pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(vcs_info.pushdate)) logger.debug('{} commits influencing task scheduling:'.format( len(vcs_info.changesets))) for c in vcs_info.changesets: logger.debug("{cset} {desc}".format( cset=c['node'][0:12], desc=c['desc'].splitlines()[0].encode('ascii', 'ignore'))) changed_files |= set(c['files']) # Template parameters used when expanding the graph parameters = dict( gaia_info().items() + { 'index': 'index', 'project': project, 'pushlog_id': params.get('pushlog_id', 0), 'docker_image': docker_image, 'base_repository': params['base_repository'] or params['head_repository'], 'head_repository': params['head_repository'], 'head_ref': params['head_ref'] or params['head_rev'], 'head_rev': params['head_rev'], 'pushdate': pushdate, 'pushtime': pushdate[8:], 'year': pushdate[0:4], 'month': pushdate[4:6], 'day': pushdate[6:8], 'owner': params['owner'], 'level': params['level'], 'from_now': json_time_from_now, 'now': current_json_time(), }.items()) routes_file = os.path.join(root, 'routes.json') with open(routes_file) as f: contents = json.load(f) json_routes = contents['routes'] # TODO: Nightly and/or l10n routes # Task graph we are generating for taskcluster... graph = { 'tasks': [], 'scopes': set(), } for env in TREEHERDER_ROUTES: route = format_treeherder_route(TREEHERDER_ROUTES[env], parameters['project'], parameters['head_rev'], parameters['pushlog_id']) graph['scopes'].add("queue:route:{}".format(route)) graph['metadata'] = { 'source': '{repo}file/{rev}/testing/taskcluster/mach_commands.py'.format( repo=params['head_repository'], rev=params['head_rev']), 'owner': params['owner'], # TODO: Add full mach commands to this example? 'description': 'Task graph generated via ./mach taskcluster-graph', 'name': 'task graph local' } # Filter the job graph according to conditions met by this invocation run. def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: # Always consider changes to the task definition itself file_patterns.append( 'testing/taskcluster/{task}'.format(task=task['task'])) for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): logger.debug( 'scheduling {task} because pattern {pattern} ' 'matches {path}'.format( task=task['task'], pattern=pattern, path=path, )) return True # No file patterns matched. Discard task. logger.debug( 'discarding {task} because no relevant files changed'. format(task=task['task'], pattern=pattern, path=path)) return False return True job_graph = filter(should_run, job_graph) all_routes = {} for build in job_graph: logging.debug("loading build task {}".format(build['task'])) interactive = cmdline_interactive or build["interactive"] build_parameters = merge_dicts(parameters, build['additional-parameters']) build_parameters['build_slugid'] = mklabel() build_parameters[ 'source'] = '{repo}file/{rev}/testing/taskcluster/{file}'.format( repo=params['head_repository'], rev=params['head_rev'], file=build['task']) build_task = templates.load(build['task'], build_parameters) # Copy build_* attributes to expose them to post-build tasks # as well as json routes and tests task_extra = build_task['task']['extra'] build_parameters['build_name'] = task_extra['build_name'] build_parameters['build_type'] = task_extra['build_type'] build_parameters['build_product'] = task_extra['build_product'] set_interactive_task(build_task, interactive) # try builds don't use cache if project == "try": remove_caches_from_task(build_task) set_expiration(build_task, json_time_from_now(TRY_EXPIRATION)) decorate_task_treeherder_routes(build_task['task'], build_parameters['project'], build_parameters['head_rev'], build_parameters['pushlog_id']) decorate_task_json_routes(build_task['task'], json_routes, build_parameters) # Ensure each build graph is valid after construction. validate_build_task(build_task) attributes = build_task['attributes'] = { 'kind': 'legacy', 'legacy_kind': 'build' } if 'build_name' in build: attributes['build_platform'] = build['build_name'] if 'build_type' in task_extra: attributes['build_type'] = { 'dbg': 'debug' }.get(task_extra['build_type'], task_extra['build_type']) if build.get('is_job'): attributes['job'] = build['build_name'] attributes['legacy_kind'] = 'job' graph['tasks'].append(build_task) for location in build_task['task']['extra'].get('locations', {}): build_parameters['{}_location'.format(location)] = build_task[ 'task']['extra']['locations'][location] for url in build_task['task']['extra'].get('url', {}): build_parameters['{}_url'.format(url)] = \ build_task['task']['extra']['url'][url] define_task = DEFINE_TASK.format(build_task['task']['workerType']) for route in build_task['task'].get('routes', []): if route.startswith('index.gecko.v2') and route in all_routes: raise Exception( "Error: route '%s' is in use by multiple tasks: '%s' and '%s'" % ( route, build_task['task']['metadata']['name'], all_routes[route], )) all_routes[route] = build_task['task']['metadata']['name'] graph['scopes'].add(define_task) graph['scopes'] |= set(build_task['task'].get('scopes', [])) route_scopes = map(lambda route: 'queue:route:' + route, build_task['task'].get('routes', [])) graph['scopes'] |= set(route_scopes) # Treeherder symbol configuration for the graph required for each # build so tests know which platform they belong to. build_treeherder_config = build_task['task']['extra']['treeherder'] if 'machine' not in build_treeherder_config: message = '({}), extra.treeherder.machine required for all builds' raise ValueError(message.format(build['task'])) if 'build' not in build_treeherder_config: build_treeherder_config['build'] = \ build_treeherder_config['machine'] if 'collection' not in build_treeherder_config: build_treeherder_config['collection'] = {'opt': True} if len(build_treeherder_config['collection'].keys()) != 1: message = '({}), extra.treeherder.collection must contain one type' raise ValueError(message.fomrat(build['task'])) for post_build in build['post-build']: # copy over the old parameters to update the template # TODO additional-parameters is currently not an option, only # enabled for build tasks post_parameters = merge_dicts( build_parameters, post_build.get('additional-parameters', {})) post_task = configure_dependent_task(post_build['task'], post_parameters, mklabel(), templates, build_treeherder_config) set_interactive_task(post_task, interactive) if project == "try": set_expiration(post_task, json_time_from_now(TRY_EXPIRATION)) post_task['attributes'] = attributes.copy() post_task['attributes']['legacy_kind'] = 'post_build' post_task['attributes']['post_build'] = post_build['job_flag'] graph['tasks'].append(post_task) for test in build['dependents']: test = test['allowed_build_tasks'][build['task']] # TODO additional-parameters is currently not an option, only # enabled for build tasks test_parameters = merge_dicts( build_parameters, test.get('additional-parameters', {})) test_parameters = copy.copy(build_parameters) test_definition = templates.load(test['task'], {})['task'] chunk_config = test_definition['extra'].get('chunks', {}) # Allow branch configs to override task level chunking... if 'chunks' in test: chunk_config['total'] = test['chunks'] chunked = 'total' in chunk_config if chunked: test_parameters['total_chunks'] = chunk_config['total'] if 'suite' in test_definition['extra']: suite_config = test_definition['extra']['suite'] test_parameters['suite'] = suite_config['name'] test_parameters['flavor'] = suite_config.get('flavor', '') for chunk in range(1, chunk_config.get('total', 1) + 1): if 'only_chunks' in test and chunked and \ chunk not in test['only_chunks']: continue if chunked: test_parameters['chunk'] = chunk test_task = configure_dependent_task( test['task'], test_parameters, mklabel(), templates, build_treeherder_config) set_interactive_task(test_task, interactive) decorate_task_treeherder_routes( test_task['task'], test_parameters['project'], test_parameters['head_rev'], test_parameters['pushlog_id']) if project == "try": set_expiration(test_task, json_time_from_now(TRY_EXPIRATION)) test_task['attributes'] = attributes.copy() test_task['attributes']['legacy_kind'] = 'unittest' test_task['attributes']['test_platform'] = attributes[ 'build_platform'] test_task['attributes']['unittest_try_name'] = test[ 'unittest_try_name'] for param, attr in [('suite', 'unittest_suite'), ('flavor', 'unittest_flavor'), ('chunk', 'test_chunk')]: if param in test_parameters: test_task['attributes'][attr] = str( test_parameters[param]) # This will schedule test jobs N times for i in range(0, trigger_tests): graph['tasks'].append(test_task) # If we're scheduling more tasks each have to be unique test_task = copy.deepcopy(test_task) test_task['taskId'] = mklabel() define_task = DEFINE_TASK.format( test_task['task']['workerType']) graph['scopes'].add(define_task) graph['scopes'] |= set(test_task['task'].get('scopes', [])) graph['scopes'] = sorted(graph['scopes']) # save the graph for later, when taskgraph asks for additional information # such as dependencies self.graph = graph self.tasks_by_label = {t['taskId']: t for t in self.graph['tasks']} # Convert to a dictionary of tasks. The process above has invented a # taskId for each task, and we use those as the *labels* for the tasks; # taskgraph will later assign them new taskIds. return [ Task(self, t['taskId'], task=t['task'], attributes=t['attributes']) for t in self.graph['tasks'] ]
def load_tasks(cls, kind, path, config, params, loaded_tasks): # TODO: make this match the pushdate (get it from a parameter rather than vcs) pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime()) parameters = { 'pushlog_id': params.get('pushlog_id', 0), 'pushdate': pushdate, 'pushtime': pushdate[8:], 'year': pushdate[0:4], 'month': pushdate[4:6], 'day': pushdate[6:8], 'project': params['project'], 'docker_image': docker_image, 'base_repository': params['base_repository'] or params['head_repository'], 'head_repository': params['head_repository'], 'head_ref': params['head_ref'] or params['head_rev'], 'head_rev': params['head_rev'], 'owner': params['owner'], 'level': params['level'], 'from_now': json_time_from_now, 'now': current_json_time(), 'source': '{repo}file/{rev}/testing/taskcluster/tasks/image.yml' .format(repo=params['head_repository'], rev=params['head_rev']), } tasks = [] templates = Templates(path) for image_name in config['images']: context_path = os.path.join('testing', 'docker', image_name) context_hash = generate_context_hash(context_path) image_parameters = dict(parameters) image_parameters['context_hash'] = context_hash image_parameters['context_path'] = context_path image_parameters['artifact_path'] = 'public/image.tar' image_parameters['image_name'] = image_name image_artifact_path = \ "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name) if os.environ.get('TASK_ID'): destination = os.path.join( os.environ['HOME'], "artifacts/decision_task/image_contexts/{}/context.tar.gz".format(image_name)) image_parameters['context_url'] = ARTIFACT_URL.format( os.environ['TASK_ID'], image_artifact_path) cls.create_context_tar(context_path, destination, image_name) else: # skip context generation since this isn't a decision task # TODO: generate context tarballs using subdirectory clones in # the image-building task so we don't have to worry about this. image_parameters['context_url'] = 'file:///tmp/' + image_artifact_path image_task = templates.load('image.yml', image_parameters) attributes = {'image_name': image_name} # As an optimization, if the context hash exists for mozilla-central, that image # task ID will be used. The reasoning behind this is that eventually everything ends # up on mozilla-central at some point if most tasks use this as a common image # for a given context hash, a worker within Taskcluster does not need to contain # the same image per branch. index_paths = ['docker.images.v1.{}.{}.hash.{}'.format( project, image_name, context_hash) for project in ['mozilla-central', params['project']]] tasks.append(cls(kind, 'build-docker-image-' + image_name, task=image_task['task'], attributes=attributes, index_paths=index_paths)) return tasks
def load_tasks(self, params): # TODO: make this match the pushdate (get it from a parameter rather than vcs) pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime()) parameters = { 'pushlog_id': params.get('pushlog_id', 0), 'pushdate': pushdate, 'pushtime': pushdate[8:], 'year': pushdate[0:4], 'month': pushdate[4:6], 'day': pushdate[6:8], 'project': params['project'], 'docker_image': docker_image, 'base_repository': params['base_repository'] or params['head_repository'], 'head_repository': params['head_repository'], 'head_ref': params['head_ref'] or params['head_rev'], 'head_rev': params['head_rev'], 'owner': params['owner'], 'level': params['level'], 'from_now': json_time_from_now, 'now': current_json_time(), 'source': '{repo}file/{rev}/testing/taskcluster/tasks/image.yml'.format( repo=params['head_repository'], rev=params['head_rev']), } tasks = [] templates = Templates(self.path) for image_name in self.config['images']: context_path = os.path.join('testing', 'docker', image_name) context_hash = generate_context_hash(context_path) image_parameters = dict(parameters) image_parameters['context_hash'] = context_hash image_parameters['context_path'] = context_path image_parameters['artifact_path'] = 'public/image.tar' image_parameters['image_name'] = image_name image_artifact_path = \ "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name) if os.environ.get('TASK_ID'): destination = os.path.join( os.environ['HOME'], "artifacts/decision_task/image_contexts/{}/context.tar.gz". format(image_name)) image_parameters['context_url'] = ARTIFACT_URL.format( os.environ['TASK_ID'], image_artifact_path) self.create_context_tar(context_path, destination, image_name) else: # skip context generation since this isn't a decision task # TODO: generate context tarballs using subdirectory clones in # the image-building task so we don't have to worry about this. image_parameters[ 'context_url'] = 'file:///tmp/' + image_artifact_path image_task = templates.load('image.yml', image_parameters) attributes = { 'kind': self.name, 'image_name': image_name, } # As an optimization, if the context hash exists for mozilla-central, that image # task ID will be used. The reasoning behind this is that eventually everything ends # up on mozilla-central at some point if most tasks use this as a common image # for a given context hash, a worker within Taskcluster does not need to contain # the same image per branch. index_paths = [ 'docker.images.v1.{}.{}.hash.{}'.format( project, image_name, context_hash) for project in ['mozilla-central', params['project']] ] tasks.append( Task(self, 'build-docker-image-' + image_name, task=image_task['task'], attributes=attributes, index_paths=index_paths)) return tasks
def load_tasks(cls, kind, path, config, params, loaded_tasks): root = os.path.abspath(os.path.join(path, config['legacy_path'])) project = params['project'] # NOTE: message is ignored here; we always use DEFAULT_TRY, then filter the # resulting task graph later message = DEFAULT_TRY templates = Templates(root) job_path = os.path.join(root, 'tasks', 'branches', project, 'job_flags.yml') job_path = job_path if os.path.exists(job_path) else \ os.path.join(root, DEFAULT_JOB_PATH) jobs = templates.load(job_path, {}) job_graph, trigger_tests = parse_commit(message, jobs) cmdline_interactive = params.get('interactive', False) # Default to current time if querying the head rev fails push_epoch = int(time.time()) vcs_info = query_vcs_info(params['head_repository'], params['head_rev']) changed_files = set() if vcs_info: push_epoch = vcs_info.pushdate logger.debug( '{} commits influencing task scheduling:'.format(len(vcs_info.changesets))) for c in vcs_info.changesets: logger.debug("{cset} {desc}".format( cset=c['node'][0:12], desc=c['desc'].splitlines()[0].encode('ascii', 'ignore'))) changed_files |= set(c['files']) pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(push_epoch)) # Template parameters used when expanding the graph parameters = dict(gaia_info().items() + { 'index': 'index', 'project': project, 'pushlog_id': params.get('pushlog_id', 0), 'docker_image': docker_image, 'base_repository': params['base_repository'] or params['head_repository'], 'head_repository': params['head_repository'], 'head_ref': params['head_ref'] or params['head_rev'], 'head_rev': params['head_rev'], 'pushdate': pushdate, 'pushtime': pushdate[8:], 'year': pushdate[0:4], 'month': pushdate[4:6], 'day': pushdate[6:8], 'rank': push_epoch, 'owner': params['owner'], 'level': params['level'], 'from_now': json_time_from_now, 'now': current_json_time(), }.items()) routes_file = os.path.join(root, 'routes.json') with open(routes_file) as f: contents = json.load(f) json_routes = contents['routes'] # TODO: Nightly and/or l10n routes # Task graph we are generating for taskcluster... graph = { 'tasks': [], 'scopes': set(), } for env in TREEHERDER_ROUTES: route = format_treeherder_route(TREEHERDER_ROUTES[env], parameters['project'], parameters['head_rev'], parameters['pushlog_id']) graph['scopes'].add("queue:route:{}".format(route)) graph['metadata'] = { 'source': '{repo}file/{rev}/testing/taskcluster/mach_commands.py'.format( repo=params['head_repository'], rev=params['head_rev']), 'owner': params['owner'], # TODO: Add full mach commands to this example? 'description': 'Task graph generated via ./mach taskcluster-graph', 'name': 'task graph local' } # Filter the job graph according to conditions met by this invocation run. def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: # Always consider changes to the task definition itself file_patterns.append('testing/taskcluster/{task}'.format(task=task['task'])) for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): logger.debug('scheduling {task} because pattern {pattern} ' 'matches {path}'.format( task=task['task'], pattern=pattern, path=path, )) return True # No file patterns matched. Discard task. logger.debug('discarding {task} because no relevant files changed'.format( task=task['task'], pattern=pattern, path=path)) return False return True job_graph = filter(should_run, job_graph) all_routes = {} for build in job_graph: logging.debug("loading build task {}".format(build['task'])) interactive = cmdline_interactive or build["interactive"] build_parameters = merge_dicts(parameters, build['additional-parameters']) build_parameters['build_slugid'] = mklabel() build_parameters['source'] = '{repo}file/{rev}/testing/taskcluster/{file}'.format( repo=params['head_repository'], rev=params['head_rev'], file=build['task']) build_task = templates.load(build['task'], build_parameters) # Copy build_* attributes to expose them to post-build tasks # as well as json routes and tests task_extra = build_task['task']['extra'] build_parameters['build_name'] = task_extra['build_name'] build_parameters['build_type'] = task_extra['build_type'] build_parameters['build_product'] = task_extra['build_product'] if 'treeherder' in task_extra: tier = task_extra['treeherder'].get('tier', 1) if tier != 1: # Only tier 1 jobs use the build time as rank. Everything # else gets rank 0 until it is promoted to tier 1. task_extra['index']['rank'] = 0 set_interactive_task(build_task, interactive) # try builds don't use cache if project == "try": remove_caches_from_task(build_task) set_expiration(build_task, json_time_from_now(TRY_EXPIRATION)) decorate_task_treeherder_routes(build_task['task'], build_parameters['project'], build_parameters['head_rev'], build_parameters['pushlog_id']) decorate_task_json_routes(build_task['task'], json_routes, build_parameters) # Ensure each build graph is valid after construction. validate_build_task(build_task) attributes = build_task['attributes'] = {'kind': 'legacy', 'legacy_kind': 'build'} if 'build_name' in build: attributes['build_platform'] = build['build_name'] if 'build_type' in task_extra: attributes['build_type'] = {'dbg': 'debug'}.get(task_extra['build_type'], task_extra['build_type']) if build.get('is_job'): attributes['job'] = build['build_name'] attributes['legacy_kind'] = 'job' graph['tasks'].append(build_task) for location in build_task['task']['extra'].get('locations', {}): build_parameters['{}_location'.format(location)] = \ build_task['task']['extra']['locations'][location] for url in build_task['task']['extra'].get('url', {}): build_parameters['{}_url'.format(url)] = \ build_task['task']['extra']['url'][url] define_task = DEFINE_TASK.format(build_task['task']['workerType']) for route in build_task['task'].get('routes', []): if route.startswith('index.gecko.v2') and route in all_routes: raise Exception( "Error: route '%s' is in use by multiple tasks: '%s' and '%s'" % ( route, build_task['task']['metadata']['name'], all_routes[route], )) all_routes[route] = build_task['task']['metadata']['name'] graph['scopes'].add(define_task) graph['scopes'] |= set(build_task['task'].get('scopes', [])) route_scopes = map( lambda route: 'queue:route:' + route, build_task['task'].get('routes', []) ) graph['scopes'] |= set(route_scopes) # Treeherder symbol configuration for the graph required for each # build so tests know which platform they belong to. build_treeherder_config = build_task['task']['extra']['treeherder'] if 'machine' not in build_treeherder_config: message = '({}), extra.treeherder.machine required for all builds' raise ValueError(message.format(build['task'])) if 'build' not in build_treeherder_config: build_treeherder_config['build'] = \ build_treeherder_config['machine'] if 'collection' not in build_treeherder_config: build_treeherder_config['collection'] = {'opt': True} if len(build_treeherder_config['collection'].keys()) != 1: message = '({}), extra.treeherder.collection must contain one type' raise ValueError(message.fomrat(build['task'])) for post_build in build['post-build']: # copy over the old parameters to update the template # TODO additional-parameters is currently not an option, only # enabled for build tasks post_parameters = merge_dicts(build_parameters, post_build.get('additional-parameters', {})) post_task = configure_dependent_task(post_build['task'], post_parameters, mklabel(), templates, build_treeherder_config) set_interactive_task(post_task, interactive) if project == "try": set_expiration(post_task, json_time_from_now(TRY_EXPIRATION)) post_task['attributes'] = attributes.copy() post_task['attributes']['legacy_kind'] = 'post_build' post_task['attributes']['post_build'] = post_build['job_flag'] graph['tasks'].append(post_task) for test in build['dependents']: test = test['allowed_build_tasks'][build['task']] # TODO additional-parameters is currently not an option, only # enabled for build tasks test_parameters = merge_dicts(build_parameters, test.get('additional-parameters', {})) test_parameters = copy.copy(build_parameters) test_definition = templates.load(test['task'], {})['task'] chunk_config = test_definition['extra'].get('chunks', {}) # Allow branch configs to override task level chunking... if 'chunks' in test: chunk_config['total'] = test['chunks'] chunked = 'total' in chunk_config if chunked: test_parameters['total_chunks'] = chunk_config['total'] if 'suite' in test_definition['extra']: suite_config = test_definition['extra']['suite'] test_parameters['suite'] = suite_config['name'] test_parameters['flavor'] = suite_config.get('flavor', '') for chunk in range(1, chunk_config.get('total', 1) + 1): if 'only_chunks' in test and chunked and \ chunk not in test['only_chunks']: continue if chunked: test_parameters['chunk'] = chunk test_task = configure_dependent_task(test['task'], test_parameters, mklabel(), templates, build_treeherder_config) set_interactive_task(test_task, interactive) decorate_task_treeherder_routes(test_task['task'], test_parameters['project'], test_parameters['head_rev'], test_parameters['pushlog_id']) if project == "try": set_expiration(test_task, json_time_from_now(TRY_EXPIRATION)) test_task['attributes'] = attributes.copy() test_task['attributes']['legacy_kind'] = 'unittest' test_task['attributes']['test_platform'] = attributes['build_platform'] test_task['attributes']['unittest_try_name'] = test['unittest_try_name'] for param, attr in [ ('suite', 'unittest_suite'), ('flavor', 'unittest_flavor'), ('chunk', 'test_chunk')]: if param in test_parameters: test_task['attributes'][attr] = str(test_parameters[param]) # This will schedule test jobs N times for i in range(0, trigger_tests): graph['tasks'].append(test_task) # If we're scheduling more tasks each have to be unique test_task = copy.deepcopy(test_task) test_task['taskId'] = mklabel() define_task = DEFINE_TASK.format( test_task['task']['workerType'] ) graph['scopes'].add(define_task) graph['scopes'] |= set(test_task['task'].get('scopes', [])) graph['scopes'] = sorted(graph['scopes']) # Convert to a dictionary of tasks. The process above has invented a # taskId for each task, and we use those as the *labels* for the tasks; # taskgraph will later assign them new taskIds. return [ cls(kind, t['taskId'], task=t['task'], attributes=t['attributes'], task_dict=t) for t in graph['tasks'] ]