def check(params, file_patterns): """Determine whether any of the files changed in the indicated push to https://hg.mozilla.org match any of the given file patterns.""" repository = params.get("head_repository") revision = params.get("head_rev") if not repository or not revision: logger.warning("Missing `head_repository` or `head_rev` parameters; " "assuming all files have changed") return True changed_files = get_changed_files(repository, revision) if "comm_head_repository" in params: repository = params.get("comm_head_repository") revision = params.get("comm_head_rev") if not revision: logger.warning("Missing `comm_head_rev` parameters; " "assuming all files have changed") return True changed_files |= { join_path("comm", file) for file in get_changed_files(repository, revision) } for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): return True return False
def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True # Command line override to not filter. if params['ignore_conditions']: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): sys.stderr.write( 'scheduling %s because pattern %s ' 'matches %s\n' % (task['task'], pattern, path)) return True # No file patterns matched. Discard task. sys.stderr.write( 'discarding %s because no relevant files changed\n' % task['task']) return False return True
def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True # Command line override to not filter. if params['ignore_conditions']: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): sys.stderr.write('scheduling %s because pattern %s ' 'matches %s\n' % (task['task'], pattern, path)) return True # No file patterns matched. Discard task. sys.stderr.write('discarding %s because no relevant files changed\n' % task['task']) return False return True
def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: # Always consider changes to the task definition itself file_patterns.append( 'testing/taskcluster/{task}'.format(task=task['task'])) for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): logger.debug( 'scheduling {task} because pattern {pattern} ' 'matches {path}'.format( task=task['task'], pattern=pattern, path=path, )) return True # No file patterns matched. Discard task. logger.debug( 'discarding {task} because no relevant files changed'. format(task=task['task'], pattern=pattern, path=path)) return False return True
def should_run(task): # Old style build or test task that doesn't define conditions. Always runs. if 'when' not in task: return True when = task['when'] # If the task defines file patterns and we have a set of changed # files to compare against, only run if a file pattern matches one # of the changed files. file_patterns = when.get('file_patterns', None) if file_patterns and changed_files: # Always consider changes to the task definition itself file_patterns.append('testing/taskcluster/{task}'.format(task=task['task'])) for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): logger.debug('scheduling {task} because pattern {pattern} ' 'matches {path}'.format( task=task['task'], pattern=pattern, path=path, )) return True # No file patterns matched. Discard task. logger.debug('discarding {task} because no relevant files changed'.format( task=task['task'], pattern=pattern, path=path)) return False return True
def check(params, file_patterns): """Determine whether any of the files changed in the indicated push to https://hg.mozilla.org match any of the given file patterns.""" repository = params.get('head_repository') revision = params.get('head_rev') if not repository or not revision: logger.warning("Missing `head_repository` or `head_rev` parameters; " "assuming all files have changed") return True changed_files = get_changed_files(repository, revision) if 'comm_head_repository' in params: repository = params.get('comm_head_repository') revision = params.get('comm_head_rev') if not revision: logger.warning("Missing `comm_head_rev` parameters; " "assuming all files have changed") return True changed_files |= { join_path("comm", file) for file in get_changed_files(repository, revision) } for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): return True return False
def perfile_number_of_chunks(config, type): # A rough estimate of how many chunks we need based on simple rules # for determining what a test file is. # TODO: Make this flexible based on coverage vs verify || test type tests_per_chunk = 10.0 if type.startswith('test-verify-wpt'): file_patterns = ['testing/web-platform/tests/**'] elif type.startswith('test-verify-gpu'): file_patterns = [ '**/*webgl*/**/test_*', '**/dom/canvas/**/test_*', '**/gfx/tests/**/test_*', '**/devtools/canvasdebugger/**/browser_*', '**/reftest*/**' ] elif type.startswith('test-verify'): file_patterns = [ '**/test_*', '**/browser_*', '**/crashtest*/**', 'js/src/test/test/', 'js/src/test/non262/', 'js/src/test/test262/' ] changed_files = files_changed.get_changed_files( config.params.get('head_repository'), config.params.get('head_rev')) test_count = 0 for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): gpu = False if type == 'test-verify-e10s': # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = [ 'dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl' ] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count / tests_per_chunk return int(math.ceil(chunks))
def perfile_number_of_chunks(is_try, try_task_config, head_repository, head_rev, type): if taskgraph.fast and not is_try: # When iterating on taskgraph changes, the exact number of chunks that # test-verify runs usually isn't important, so skip it when going fast. return 3 tests_per_chunk = 10.0 if type.startswith('test-coverage'): tests_per_chunk = 30.0 if type.startswith('test-verify-wpt') or type.startswith('test-coverage-wpt'): file_patterns = ['testing/web-platform/tests/**', 'testing/web-platform/mozilla/tests/**'] elif type.startswith('test-verify-gpu') or type.startswith('test-coverage-gpu'): file_patterns = ['**/*webgl*/**/test_*', '**/dom/canvas/**/test_*', '**/gfx/tests/**/test_*', '**/devtools/canvasdebugger/**/browser_*', '**/reftest*/**'] elif type.startswith('test-verify') or type.startswith('test-coverage'): file_patterns = ['**/test_*', '**/browser_*', '**/crashtest*/**', 'js/src/tests/test/**', 'js/src/tests/non262/**', 'js/src/tests/test262/**'] else: # Returning 0 means no tests to run, this captures non test-verify tasks return 1 changed_files = set() specified_files = [] if try_task_config: specified_files = try_task_config.split(":") if is_try: changed_files.update(files_changed.get_locally_changed_files(GECKO)) else: changed_files.update(files_changed.get_changed_files(head_repository, head_rev)) changed_files.update(specified_files) test_count = 0 for pattern in file_patterns: for path in changed_files: # TODO: consider running tests if a manifest changes if path.endswith('.list') or path.endswith('.ini'): continue if path.endswith('^headers^'): continue if mozpackmatch(path, pattern): gpu = False if type == 'test-verify-e10s' or type == 'test-coverage-e10s': # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl'] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count/tests_per_chunk return int(math.ceil(chunks))
def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type): tests_per_chunk = 10.0 if type.startswith('test-coverage'): tests_per_chunk = 30.0 if type.startswith('test-verify-wpt') or type.startswith('test-coverage-wpt'): file_patterns = ['testing/web-platform/tests/**', 'testing/web-platform/mozilla/tests/**'] elif type.startswith('test-verify-gpu') or type.startswith('test-coverage-gpu'): file_patterns = ['**/*webgl*/**/test_*', '**/dom/canvas/**/test_*', '**/gfx/tests/**/test_*', '**/devtools/canvasdebugger/**/browser_*', '**/reftest*/**'] elif type.startswith('test-verify') or type.startswith('test-coverage'): file_patterns = ['**/test_*', '**/browser_*', '**/crashtest*/**', 'js/src/test/test/**', 'js/src/test/non262/**', 'js/src/test/test262/**'] else: # Returning 0 means no tests to run, this captures non test-verify tasks return 1 changed_files = set() specified_files = [] if try_task_config: specified_files = try_task_config.split(":") try: vcs = get_repository_object(GECKO) changed_files.update(vcs.get_outgoing_files('AM')) except InvalidRepoPath: vcs = None except CalledProcessError: return 0 if not changed_files: changed_files.update(files_changed.get_changed_files(head_repository, head_rev)) changed_files.update(specified_files) test_count = 0 for pattern in file_patterns: for path in changed_files: # TODO: consider running tests if a manifest changes if path.endswith('.list') or path.endswith('.ini'): continue if mozpackmatch(path, pattern): gpu = False if type == 'test-verify-e10s' or type == 'test-coverage-e10s': # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl'] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count/tests_per_chunk return int(math.ceil(chunks))
def is_excluded(check_path, file_patterns): for pattern in file_patterns: if mozpackmatch(check_path, pattern): return True return False
def perfile_number_of_chunks(is_try, try_task_config, head_repository, head_rev, type): if gecko_taskgraph.fast and not is_try: # When iterating on taskgraph changes, the exact number of chunks that # test-verify runs usually isn't important, so skip it when going fast. return 3 tests_per_chunk = 10.0 if type.startswith("test-coverage"): tests_per_chunk = 30.0 if type.startswith("test-verify-wpt") or type.startswith( "test-coverage-wpt"): file_patterns = [ "testing/web-platform/tests/**", "testing/web-platform/mozilla/tests/**", ] elif type.startswith("test-verify-gpu") or type.startswith( "test-coverage-gpu"): file_patterns = [ "**/*webgl*/**/test_*", "**/dom/canvas/**/test_*", "**/gfx/tests/**/test_*", "**/devtools/canvasdebugger/**/browser_*", "**/reftest*/**", ] elif type.startswith("test-verify") or type.startswith("test-coverage"): file_patterns = [ "**/test_*", "**/browser_*", "**/crashtest*/**", "js/src/tests/test/**", "js/src/tests/non262/**", "js/src/tests/test262/**", ] else: # Returning 0 means no tests to run, this captures non test-verify tasks return 1 changed_files = set() if try_task_config: suite_to_paths = json.loads(try_task_config) specified_files = itertools.chain.from_iterable( suite_to_paths.values()) changed_files.update(specified_files) if is_try: changed_files.update(files_changed.get_locally_changed_files(GECKO)) else: changed_files.update( files_changed.get_changed_files(head_repository, head_rev)) test_count = 0 for pattern in file_patterns: for path in changed_files: # TODO: consider running tests if a manifest changes if path.endswith(".list") or path.endswith(".ini"): continue if path.endswith("^headers^"): continue if mozpackmatch(path, pattern): gpu = False if type == "test-verify-e10s" or type == "test-coverage-e10s": # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = [ "dom/canvas", "gfx/tests", "devtools/canvasdebugger", "webgl", ] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count / tests_per_chunk chunks = int(math.ceil(chunks)) # Never return 0 chunks on try, so that per-file tests can be pushed to try with # an explicit path, and also so "empty" runs can be checked on try. if is_try and chunks == 0: chunks = 1 return chunks