def test_try_flags_not_specified(self): ''' Try flags are optional, and if not provided, should cause an empty graph to be generated. ''' commit = 'Bug XXX - test commit with no flags' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': {} } expected = [] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_specific_chunks(self): ''' This test covers specifying specific chunks for a given test suite. ''' commit = 'try: -b o -p linux -u mochitest-1,mochitest-2 -t none' jobs = { 'flags': { 'builds': ['linux'], 'tests': ['mochitest'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': { 'mochitest': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/mochitest', 'chunks': 5 }, } } } } expected = [ { 'task': 'task/linux', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/mochitest', 'chunks': 5, 'only_chunks': set([1, 2]), 'unittest_try_name': 'mochitest', }, } } ], 'additional-parameters': {}, 'post-build': [], 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'when': {}, } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_specific_chunks(self): ''' This test covers specifying specific chunks for a given test suite. ''' commit = 'try: -b o -p linux -u mochitest-1,mochitest-2 -t none' jobs = { 'flags': { 'builds': ['linux'], 'tests': ['mochitest'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': { 'mochitest': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/mochitest', 'chunks': 5 }, } } } } expected = [{ 'task': 'task/linux', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/mochitest', 'chunks': 5, 'only_chunks': set([1, 2]), 'unittest_try_name': 'mochitest', }, } }], 'additional-parameters': {}, 'post-build': [], 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'when': {}, }] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_try_flag_in_middle_of_commit(self): ''' The try command prefix may appear anywhere this test ensures that it works in some common cases. ''' commit = 'Bug XXX - I like cheese try: -b o -p all -u none wootbar' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': {} } expected = [ { 'task': 'task/linux', 'dependents': [], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {} } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_commit_no_tests(self): ''' This test covers the case of builds but no tests passed -u none ''' commit = 'try: -b o -p linux -u none -t none' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': {} } expected = [ { 'task': 'task/linux', 'dependents': [], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'opt', 'post-build': [], 'interactive': False, 'when': {} } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_flag_aliasing(self): commit = 'try: -b o -p magic-alias -u none -t none' jobs = { 'flags': { 'aliases': { 'magic-alias': 'linux' }, 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': {} } expected = [ { 'task': 'task/linux', 'dependents': [], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {}, } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_try_flag_in_middle_of_commit(self): ''' The try command prefix may appear anywhere this test ensures that it works in some common cases. ''' commit = 'Bug XXX - I like cheese try: -b o -p all -u none wootbar' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': {} } expected = [{ 'task': 'task/linux', 'dependents': [], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {} }] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_flag_aliasing(self): commit = 'try: -b o -p magic-alias -u none -t none' jobs = { 'flags': { 'aliases': { 'magic-alias': 'linux' }, 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': {} } expected = [{ 'task': 'task/linux', 'dependents': [], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {}, }] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_commit_all_builds_no_tests(self): ''' This test covers the case of all builds but no tests passed -u none ''' commit = 'try: -b o -p all -u none -t none' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': {} } expected = [{ 'task': 'task/linux', 'dependents': [], 'post-build': [], 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'additional-parameters': {}, 'when': {} }] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def load_tasks(cls, kind, path, config, params, loaded_tasks): root = os.path.abspath(os.path.join(path, config['legacy_path'])) project = params['project'] # NOTE: message is ignored here; we always use DEFAULT_TRY, then filter the # resulting task graph later message = DEFAULT_TRY templates = Templates(root) job_path = os.path.join(root, 'tasks', 'branches', project, 'job_flags.yml') job_path = job_path if os.path.exists(job_path) else \ os.path.join(root, DEFAULT_JOB_PATH) jobs = templates.load(job_path, {}) job_graph, trigger_tests = parse_commit(message, jobs) cmdline_interactive = params.get('interactive', False) # Default to current time if querying the head rev fails vcs_info = query_vcs_info(params['head_repository'], params['head_rev']) changed_files = set() if vcs_info: logger.debug('{} commits influencing task scheduling:'.format( len(vcs_info.changesets))) for c in vcs_info.changesets: logger.debug("{cset} {desc}".format( cset=c['node'][0:12], desc=c['desc'].splitlines()[0].encode('ascii', 'ignore'))) changed_files |= set(c['files']) pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(params['pushdate'])) # Template parameters used when expanding the graph parameters = dict( gaia_info().items() + { 'index': 'index', 'project': project, 'pushlog_id': params.get('pushlog_id', 0), 'docker_image': docker_image, 'base_repository': params['base_repository'] or params['head_repository'], 'head_repository': params['head_repository'], 'head_ref': params['head_ref'] or params['head_rev'], 'head_rev': params['head_rev'], 'pushdate': pushdate, 'pushtime': pushdate[8:], 'year': pushdate[0:4], 'month': pushdate[4:6], 'day': pushdate[6:8], 'rank': params['pushdate'], 'owner': params['owner'], 'level': params['level'], }.items()) routes_file = os.path.join(root, 'routes.json') with open(routes_file) as f: contents = json.load(f) json_routes = contents['routes'] # TODO: Nightly and/or l10n routes # Task graph we are generating for taskcluster... graph = { 'tasks': [], 'scopes': set(), } for env in TREEHERDER_ROUTES: route = format_treeherder_route(TREEHERDER_ROUTES[env], parameters['project'], parameters['head_rev'], parameters['pushlog_id']) graph['scopes'].add("queue:route:{}".format(route)) graph['metadata'] = { 'source': '{repo}file/{rev}/testing/taskcluster/mach_commands.py'.format( repo=params['head_repository'], rev=params['head_rev']), 'owner': params['owner'], # TODO: Add full mach commands to this example? 'description': 'Task graph generated via ./mach taskcluster-graph', 'name': 'task graph local' } # Filter the job graph according to conditions met by this invocation run. def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: # Always consider changes to the task definition itself file_patterns.append( 'testing/taskcluster/{task}'.format(task=task['task'])) for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): logger.debug( 'scheduling {task} because pattern {pattern} ' 'matches {path}'.format( task=task['task'], pattern=pattern, path=path, )) return True # No file patterns matched. Discard task. logger.debug( 'discarding {task} because no relevant files changed'. format(task=task['task'], pattern=pattern, path=path)) return False return True job_graph = filter(should_run, job_graph) all_routes = {} for build in job_graph: logging.debug("loading build task {}".format(build['task'])) interactive = cmdline_interactive or build["interactive"] build_parameters = merge_dicts(parameters, build['additional-parameters']) build_parameters['build_slugid'] = mklabel() build_parameters[ 'source'] = '{repo}file/{rev}/testing/taskcluster/{file}'.format( repo=params['head_repository'], rev=params['head_rev'], file=build['task']) build_task = templates.load(build['task'], build_parameters) # Copy build_* attributes to expose them to post-build tasks # as well as json routes and tests task_extra = build_task['task']['extra'] build_parameters['build_name'] = task_extra['build_name'] build_parameters['build_type'] = task_extra['build_type'] build_parameters['build_product'] = task_extra['build_product'] if 'treeherder' in task_extra: tier = task_extra['treeherder'].get('tier', 1) if tier != 1: # Only tier 1 jobs use the build time as rank. Everything # else gets rank 0 until it is promoted to tier 1. task_extra['index']['rank'] = 0 set_interactive_task(build_task, interactive) # try builds don't use cache nor coalescing if project == "try": remove_caches_from_task(build_task) remove_coalescing_from_task(build_task) set_expiration(build_task, TRY_EXPIRATION) decorate_task_treeherder_routes(build_task['task'], build_parameters['project'], build_parameters['head_rev'], build_parameters['pushlog_id']) decorate_task_json_routes(build_task['task'], json_routes, build_parameters) # Ensure each build graph is valid after construction. validate_build_task(build_task) attributes = build_task['attributes'] = { 'kind': 'legacy', 'legacy_kind': 'build', 'run_on_projects': ['all'], } if 'build_name' in build: attributes['build_platform'] = build['build_name'] if 'build_type' in task_extra: attributes['build_type'] = { 'dbg': 'debug' }.get(task_extra['build_type'], task_extra['build_type']) if build.get('is_job'): attributes['job'] = build['build_name'] attributes['legacy_kind'] = 'job' graph['tasks'].append(build_task) for location in build_task['task']['extra'].get('locations', {}): build_parameters['{}_location'.format(location)] = \ build_task['task']['extra']['locations'][location] for url in build_task['task']['extra'].get('url', {}): build_parameters['{}_url'.format(url)] = \ build_task['task']['extra']['url'][url] define_task = DEFINE_TASK.format(build_task['task']['workerType']) for route in build_task['task'].get('routes', []): if route.startswith('index.gecko.v2') and route in all_routes: raise Exception( "Error: route '%s' is in use by multiple tasks: '%s' and '%s'" % ( route, build_task['task']['metadata']['name'], all_routes[route], )) all_routes[route] = build_task['task']['metadata']['name'] graph['scopes'].add(define_task) graph['scopes'] |= set(build_task['task'].get('scopes', [])) route_scopes = map(lambda route: 'queue:route:' + route, build_task['task'].get('routes', [])) graph['scopes'] |= set(route_scopes) # Treeherder symbol configuration for the graph required for each # build so tests know which platform they belong to. build_treeherder_config = build_task['task']['extra']['treeherder'] if 'machine' not in build_treeherder_config: message = '({}), extra.treeherder.machine required for all builds' raise ValueError(message.format(build['task'])) if 'build' not in build_treeherder_config: build_treeherder_config['build'] = \ build_treeherder_config['machine'] if 'collection' not in build_treeherder_config: build_treeherder_config['collection'] = {'opt': True} if len(build_treeherder_config['collection'].keys()) != 1: message = '({}), extra.treeherder.collection must contain one type' raise ValueError(message.fomrat(build['task'])) for post_build in build['post-build']: # copy over the old parameters to update the template # TODO additional-parameters is currently not an option, only # enabled for build tasks post_parameters = merge_dicts( build_parameters, post_build.get('additional-parameters', {})) post_task = configure_dependent_task(post_build['task'], post_parameters, mklabel(), templates, build_treeherder_config) set_interactive_task(post_task, interactive) if project == "try": set_expiration(post_task, TRY_EXPIRATION) post_task['attributes'] = attributes.copy() post_task['attributes']['legacy_kind'] = 'post_build' post_task['attributes']['post_build'] = post_build['job_flag'] graph['tasks'].append(post_task) graph['scopes'] = sorted(graph['scopes']) # Convert to a dictionary of tasks. The process above has invented a # taskId for each task, and we use those as the *labels* for the tasks; # taskgraph will later assign them new taskIds. return [ cls(kind, t['taskId'], task=t['task'], attributes=t['attributes'], task_dict=t) for t in graph['tasks'] ]
def load_tasks(cls, kind, path, config, params, loaded_tasks): root = os.path.abspath(os.path.join(path, config['legacy_path'])) project = params['project'] # NOTE: message is ignored here; we always use DEFAULT_TRY, then filter the # resulting task graph later message = DEFAULT_TRY templates = Templates(root) job_path = os.path.join(root, 'tasks', 'branches', project, 'job_flags.yml') job_path = job_path if os.path.exists(job_path) else \ os.path.join(root, DEFAULT_JOB_PATH) jobs = templates.load(job_path, {}) job_graph, trigger_tests = parse_commit(message, jobs) cmdline_interactive = params.get('interactive', False) # Default to current time if querying the head rev fails push_epoch = int(time.time()) vcs_info = query_vcs_info(params['head_repository'], params['head_rev']) changed_files = set() if vcs_info: push_epoch = vcs_info.pushdate logger.debug( '{} commits influencing task scheduling:'.format(len(vcs_info.changesets))) for c in vcs_info.changesets: logger.debug("{cset} {desc}".format( cset=c['node'][0:12], desc=c['desc'].splitlines()[0].encode('ascii', 'ignore'))) changed_files |= set(c['files']) pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(push_epoch)) # Template parameters used when expanding the graph parameters = dict(gaia_info().items() + { 'index': 'index', 'project': project, 'pushlog_id': params.get('pushlog_id', 0), 'docker_image': docker_image, 'base_repository': params['base_repository'] or params['head_repository'], 'head_repository': params['head_repository'], 'head_ref': params['head_ref'] or params['head_rev'], 'head_rev': params['head_rev'], 'pushdate': pushdate, 'pushtime': pushdate[8:], 'year': pushdate[0:4], 'month': pushdate[4:6], 'day': pushdate[6:8], 'rank': push_epoch, 'owner': params['owner'], 'level': params['level'], }.items()) routes_file = os.path.join(root, 'routes.json') with open(routes_file) as f: contents = json.load(f) json_routes = contents['routes'] # TODO: Nightly and/or l10n routes # Task graph we are generating for taskcluster... graph = { 'tasks': [], 'scopes': set(), } for env in TREEHERDER_ROUTES: route = format_treeherder_route(TREEHERDER_ROUTES[env], parameters['project'], parameters['head_rev'], parameters['pushlog_id']) graph['scopes'].add("queue:route:{}".format(route)) graph['metadata'] = { 'source': '{repo}file/{rev}/testing/taskcluster/mach_commands.py'.format( repo=params['head_repository'], rev=params['head_rev']), 'owner': params['owner'], # TODO: Add full mach commands to this example? 'description': 'Task graph generated via ./mach taskcluster-graph', 'name': 'task graph local' } # Filter the job graph according to conditions met by this invocation run. def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: # Always consider changes to the task definition itself file_patterns.append('testing/taskcluster/{task}'.format(task=task['task'])) for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): logger.debug('scheduling {task} because pattern {pattern} ' 'matches {path}'.format( task=task['task'], pattern=pattern, path=path, )) return True # No file patterns matched. Discard task. logger.debug('discarding {task} because no relevant files changed'.format( task=task['task'], pattern=pattern, path=path)) return False return True job_graph = filter(should_run, job_graph) all_routes = {} for build in job_graph: logging.debug("loading build task {}".format(build['task'])) interactive = cmdline_interactive or build["interactive"] build_parameters = merge_dicts(parameters, build['additional-parameters']) build_parameters['build_slugid'] = mklabel() build_parameters['source'] = '{repo}file/{rev}/testing/taskcluster/{file}'.format( repo=params['head_repository'], rev=params['head_rev'], file=build['task']) build_task = templates.load(build['task'], build_parameters) # Copy build_* attributes to expose them to post-build tasks # as well as json routes and tests task_extra = build_task['task']['extra'] build_parameters['build_name'] = task_extra['build_name'] build_parameters['build_type'] = task_extra['build_type'] build_parameters['build_product'] = task_extra['build_product'] if 'treeherder' in task_extra: tier = task_extra['treeherder'].get('tier', 1) if tier != 1: # Only tier 1 jobs use the build time as rank. Everything # else gets rank 0 until it is promoted to tier 1. task_extra['index']['rank'] = 0 set_interactive_task(build_task, interactive) # try builds don't use cache nor coalescing if project == "try": remove_caches_from_task(build_task) remove_coalescing_from_task(build_task) set_expiration(build_task, TRY_EXPIRATION) decorate_task_treeherder_routes(build_task['task'], build_parameters['project'], build_parameters['head_rev'], build_parameters['pushlog_id']) decorate_task_json_routes(build_task['task'], json_routes, build_parameters) # Ensure each build graph is valid after construction. validate_build_task(build_task) attributes = build_task['attributes'] = {'kind': 'legacy', 'legacy_kind': 'build'} if 'build_name' in build: attributes['build_platform'] = build['build_name'] if 'build_type' in task_extra: attributes['build_type'] = {'dbg': 'debug'}.get(task_extra['build_type'], task_extra['build_type']) if build.get('is_job'): attributes['job'] = build['build_name'] attributes['legacy_kind'] = 'job' graph['tasks'].append(build_task) for location in build_task['task']['extra'].get('locations', {}): build_parameters['{}_location'.format(location)] = \ build_task['task']['extra']['locations'][location] for url in build_task['task']['extra'].get('url', {}): build_parameters['{}_url'.format(url)] = \ build_task['task']['extra']['url'][url] define_task = DEFINE_TASK.format(build_task['task']['workerType']) for route in build_task['task'].get('routes', []): if route.startswith('index.gecko.v2') and route in all_routes: raise Exception( "Error: route '%s' is in use by multiple tasks: '%s' and '%s'" % ( route, build_task['task']['metadata']['name'], all_routes[route], )) all_routes[route] = build_task['task']['metadata']['name'] graph['scopes'].add(define_task) graph['scopes'] |= set(build_task['task'].get('scopes', [])) route_scopes = map( lambda route: 'queue:route:' + route, build_task['task'].get('routes', []) ) graph['scopes'] |= set(route_scopes) # Treeherder symbol configuration for the graph required for each # build so tests know which platform they belong to. build_treeherder_config = build_task['task']['extra']['treeherder'] if 'machine' not in build_treeherder_config: message = '({}), extra.treeherder.machine required for all builds' raise ValueError(message.format(build['task'])) if 'build' not in build_treeherder_config: build_treeherder_config['build'] = \ build_treeherder_config['machine'] if 'collection' not in build_treeherder_config: build_treeherder_config['collection'] = {'opt': True} if len(build_treeherder_config['collection'].keys()) != 1: message = '({}), extra.treeherder.collection must contain one type' raise ValueError(message.fomrat(build['task'])) for post_build in build['post-build']: # copy over the old parameters to update the template # TODO additional-parameters is currently not an option, only # enabled for build tasks post_parameters = merge_dicts(build_parameters, post_build.get('additional-parameters', {})) post_task = configure_dependent_task(post_build['task'], post_parameters, mklabel(), templates, build_treeherder_config) set_interactive_task(post_task, interactive) if project == "try": set_expiration(post_task, TRY_EXPIRATION) post_task['attributes'] = attributes.copy() post_task['attributes']['legacy_kind'] = 'post_build' post_task['attributes']['post_build'] = post_build['job_flag'] graph['tasks'].append(post_task) graph['scopes'] = sorted(graph['scopes']) # Convert to a dictionary of tasks. The process above has invented a # taskId for each task, and we use those as the *labels* for the tasks; # taskgraph will later assign them new taskIds. return [ cls(kind, t['taskId'], task=t['task'], attributes=t['attributes'], task_dict=t) for t in graph['tasks'] ]
def test_commit_long_form(self): ''' This tests the long form of the try flags. ''' commit = \ 'try: --build od --platform linux,linux64 --unittests web-platform-tests --talos none' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, 'linux64': { 'types': { 'opt': { 'task': 'task/linux64', }, 'debug': { 'task': 'task/linux64-debug' } } } }, 'tests': { 'web-platform-tests': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', } } } } } expected = [{ 'task': 'task/linux', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'opt', 'post-build': [], 'interactive': False, 'when': {} }, { 'task': 'task/linux-debug', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'debug', 'post-build': [], 'interactive': False, 'when': {} }, { 'task': 'task/linux64', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'additional-parameters': {}, 'build_name': 'linux64', 'build_type': 'opt', 'post-build': [], 'interactive': False, 'when': {} }, { 'task': 'task/linux64-debug', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'additional-parameters': {}, 'build_name': 'linux64', 'build_type': 'debug', 'post-build': [], 'interactive': False, 'when': {} }] result, triggers = parse_commit(commit, jobs) self.assertEqual(sorted(expected), sorted(result))
def test_commit_with_builds_and_tests(self): ''' This test covers the broad case of a commit which has both builds and tests without any exclusions or other fancy logic. ''' commit = 'try: -b od -p linux,linux64 -u web-platform-tests -t none' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, 'linux64': { 'types': { 'opt': { 'task': 'task/linux64', }, 'debug': { 'task': 'task/linux64-debug' } } } }, 'tests': { 'web-platform-tests': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', } } } } } expected = [{ 'task': 'task/linux64', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'build_name': 'linux64', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} }, { 'task': 'task/linux64-debug', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'build_name': 'linux64', 'build_type': 'debug', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} }, { 'task': 'task/linux', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} }, { 'task': 'task/linux-debug', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } }], 'build_name': 'linux', 'build_type': 'debug', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} }] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_some_test_tasks_restricted(self): ''' This test covers the case of all builds but no tests passed -u none ''' commit = 'try: -b do -p all -u all -t none' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, }, 'tests': { 'web-platform-tests': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', } } } } } expected = [ { 'task': 'task/linux-debug', 'dependents': [], 'additional-parameters': {}, 'post-build': [], 'build_name': 'linux', 'build_type': 'debug', 'interactive': False, 'when': {}, }, { 'task': 'task/linux', 'dependents': [{ 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests' } } }], 'additional-parameters': {}, 'post-build': [], 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'when': {}, } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_specific_test_platforms_with_specific_platform(self): ''' This test cases covers the platform specific test exclusion options. ''' commit = 'try: -b od -p win32 -u mochitest[windows] -t none' jobs = { 'flags': { 'builds': ['linux', 'win32'], 'tests': ['web-platform-tests', 'mochitest'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux' }, 'debug': { 'task': 'task/linux-debug' } } }, 'win32': { 'platforms': ['windows'], 'types': { 'opt': { 'task': 'task/win32' } } }, }, 'tests': { 'web-platform-tests': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', }, 'task/win32': { 'task': 'task/web-platform-tests', } } }, 'mochitest': { 'allowed_build_tasks': { 'task/linux-debug': { 'task': 'task/mochitest', }, 'task/win32': { 'task': 'task/mochitest', } } } } } expected = [{ 'task': 'task/win32', 'dependents': [{ 'allowed_build_tasks': { 'task/linux-debug': { 'task': 'task/mochitest', 'unittest_try_name': 'mochitest', }, 'task/win32': { 'task': 'task/mochitest', 'unittest_try_name': 'mochitest', } } }], 'additional-parameters': {}, 'post-build': [], 'build_name': 'win32', 'build_type': 'opt', 'interactive': False, 'when': {} }] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_specific_test_platforms_with_specific_platform(self): ''' This test cases covers the platform specific test exclusion options. ''' commit = 'try: -b od -p win32 -u mochitest[windows] -t none' jobs = { 'flags': { 'builds': ['linux', 'win32'], 'tests': ['web-platform-tests', 'mochitest'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux' }, 'debug': { 'task': 'task/linux-debug' } } }, 'win32': { 'platforms': ['windows'], 'types': { 'opt': { 'task': 'task/win32' } } }, }, 'tests': { 'web-platform-tests': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', }, 'task/win32': { 'task': 'task/web-platform-tests', } } }, 'mochitest': { 'allowed_build_tasks': { 'task/linux-debug': { 'task': 'task/mochitest', }, 'task/win32': { 'task': 'task/mochitest', } } } } } expected = [ { 'task': 'task/win32', 'dependents': [ { 'allowed_build_tasks': { 'task/linux-debug': { 'task': 'task/mochitest', 'unittest_try_name': 'mochitest', }, 'task/win32': { 'task': 'task/mochitest', 'unittest_try_name': 'mochitest', } } } ], 'additional-parameters': {}, 'post-build': [], 'build_name': 'win32', 'build_type': 'opt', 'interactive': False, 'when': {} } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_commit_with_builds_and_tests(self): ''' This test covers the broad case of a commit which has both builds and tests without any exclusions or other fancy logic. ''' commit = 'try: -b od -p linux,linux64 -u web-platform-tests -t none' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, 'linux64': { 'types': { 'opt': { 'task': 'task/linux64', }, 'debug': { 'task': 'task/linux64-debug' } } } }, 'tests': { 'web-platform-tests': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', } } } } } expected = [ { 'task': 'task/linux64', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'build_name': 'linux64', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} }, { 'task': 'task/linux64-debug', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'build_name': 'linux64', 'build_type': 'debug', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} }, { 'task': 'task/linux', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'build_name': 'linux', 'build_type': 'opt', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} }, { 'task': 'task/linux-debug', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'build_name': 'linux', 'build_type': 'debug', 'interactive': False, 'post-build': [], 'when': {}, 'additional-parameters': {} } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(expected, result)
def test_commit_long_form(self): ''' This tests the long form of the try flags. ''' commit = \ 'try: --build od --platform linux,linux64 --unittests web-platform-tests --talos none' jobs = { 'flags': { 'builds': ['linux', 'linux64'], 'tests': ['web-platform-tests'], }, 'builds': { 'linux': { 'types': { 'opt': { 'task': 'task/linux', }, 'debug': { 'task': 'task/linux-debug' } } }, 'linux64': { 'types': { 'opt': { 'task': 'task/linux64', }, 'debug': { 'task': 'task/linux64-debug' } } } }, 'tests': { 'web-platform-tests': { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', } } } } } expected = [ { 'task': 'task/linux', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'opt', 'post-build': [], 'interactive': False, 'when': {} }, { 'task': 'task/linux-debug', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'additional-parameters': {}, 'build_name': 'linux', 'build_type': 'debug', 'post-build': [], 'interactive': False, 'when': {} }, { 'task': 'task/linux64', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'additional-parameters': {}, 'build_name': 'linux64', 'build_type': 'opt', 'post-build': [], 'interactive': False, 'when': {} }, { 'task': 'task/linux64-debug', 'dependents': [ { 'allowed_build_tasks': { 'task/linux': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', }, 'task/linux64-debug': { 'task': 'task/web-platform-tests', 'unittest_try_name': 'web-platform-tests', } } } ], 'additional-parameters': {}, 'build_name': 'linux64', 'build_type': 'debug', 'post-build': [], 'interactive': False, 'when': {} } ] result, triggers = parse_commit(commit, jobs) self.assertEqual(sorted(expected), sorted(result))