def test_get_changed_files(self): """Get_changed_files correctly gets the list of changed files in a push. This tests against the production hg.mozilla.org so that it will detect any changes in the format of the returned data.""" self.assertEqual( sorted(files_changed.get_changed_files(PARAMS['head_repository'], PARAMS['head_rev'])), FILES_CHANGED)
def get_non_suite_changed_files(repository, revision): """ Returns the list of changed files from comm- repository (no prefixing) with suite/** and editor/** files removed. """ return { file for file in get_changed_files(repository, revision) if not is_suite(file) }
def test_get_changed_files(self): """Get_changed_files correctly gets the list of changed files in a push. This tests against the production hg.mozilla.org so that it will detect any changes in the format of the returned data.""" self.assertEqual( sorted( files_changed.get_changed_files(PARAMS['head_repository'], PARAMS['head_rev'])), FILES_CHANGED)
def scheduled_by_push(self, repository, revision): changed_files = files_changed.get_changed_files(repository, revision) mbo = MozbuildObject.from_environment() # the decision task has a sparse checkout, so, mozbuild_reader will use # a MercurialRevisionFinder with revision '.', which should be the same # as `revision`; in other circumstances, it will use a default reader rdr = mbo.mozbuild_reader(config_mode='empty') components = set() for p, m in rdr.files_info(changed_files).items(): components |= set(m['SCHEDULES'].components) return components
def perfile_number_of_chunks(config, type): # A rough estimate of how many chunks we need based on simple rules # for determining what a test file is. # TODO: Make this flexible based on coverage vs verify || test type tests_per_chunk = 10.0 if type.startswith('test-verify-wpt'): file_patterns = ['testing/web-platform/tests/**'] elif type.startswith('test-verify-gpu'): file_patterns = [ '**/*webgl*/**/test_*', '**/dom/canvas/**/test_*', '**/gfx/tests/**/test_*', '**/devtools/canvasdebugger/**/browser_*', '**/reftest*/**' ] elif type.startswith('test-verify'): file_patterns = [ '**/test_*', '**/browser_*', '**/crashtest*/**', 'js/src/test/test/', 'js/src/test/non262/', 'js/src/test/test262/' ] changed_files = files_changed.get_changed_files( config.params.get('head_repository'), config.params.get('head_rev')) test_count = 0 for pattern in file_patterns: for path in changed_files: if mozpackmatch(path, pattern): gpu = False if type == 'test-verify-e10s': # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = [ 'dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl' ] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count / tests_per_chunk return int(math.ceil(chunks))
def perfile_number_of_chunks(is_try, try_task_config, head_repository, head_rev, type): if taskgraph.fast and not is_try: # When iterating on taskgraph changes, the exact number of chunks that # test-verify runs usually isn't important, so skip it when going fast. return 3 tests_per_chunk = 10.0 if type.startswith('test-coverage'): tests_per_chunk = 30.0 if type.startswith('test-verify-wpt') or type.startswith('test-coverage-wpt'): file_patterns = ['testing/web-platform/tests/**', 'testing/web-platform/mozilla/tests/**'] elif type.startswith('test-verify-gpu') or type.startswith('test-coverage-gpu'): file_patterns = ['**/*webgl*/**/test_*', '**/dom/canvas/**/test_*', '**/gfx/tests/**/test_*', '**/devtools/canvasdebugger/**/browser_*', '**/reftest*/**'] elif type.startswith('test-verify') or type.startswith('test-coverage'): file_patterns = ['**/test_*', '**/browser_*', '**/crashtest*/**', 'js/src/tests/test/**', 'js/src/tests/non262/**', 'js/src/tests/test262/**'] else: # Returning 0 means no tests to run, this captures non test-verify tasks return 1 changed_files = set() specified_files = [] if try_task_config: specified_files = try_task_config.split(":") if is_try: changed_files.update(files_changed.get_locally_changed_files(GECKO)) else: changed_files.update(files_changed.get_changed_files(head_repository, head_rev)) changed_files.update(specified_files) test_count = 0 for pattern in file_patterns: for path in changed_files: # TODO: consider running tests if a manifest changes if path.endswith('.list') or path.endswith('.ini'): continue if path.endswith('^headers^'): continue if mozpackmatch(path, pattern): gpu = False if type == 'test-verify-e10s' or type == 'test-coverage-e10s': # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl'] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count/tests_per_chunk return int(math.ceil(chunks))
def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type): tests_per_chunk = 10.0 if type.startswith('test-coverage'): tests_per_chunk = 30.0 if type.startswith('test-verify-wpt') or type.startswith('test-coverage-wpt'): file_patterns = ['testing/web-platform/tests/**', 'testing/web-platform/mozilla/tests/**'] elif type.startswith('test-verify-gpu') or type.startswith('test-coverage-gpu'): file_patterns = ['**/*webgl*/**/test_*', '**/dom/canvas/**/test_*', '**/gfx/tests/**/test_*', '**/devtools/canvasdebugger/**/browser_*', '**/reftest*/**'] elif type.startswith('test-verify') or type.startswith('test-coverage'): file_patterns = ['**/test_*', '**/browser_*', '**/crashtest*/**', 'js/src/test/test/**', 'js/src/test/non262/**', 'js/src/test/test262/**'] else: # Returning 0 means no tests to run, this captures non test-verify tasks return 1 changed_files = set() specified_files = [] if try_task_config: specified_files = try_task_config.split(":") try: vcs = get_repository_object(GECKO) changed_files.update(vcs.get_outgoing_files('AM')) except InvalidRepoPath: vcs = None except CalledProcessError: return 0 if not changed_files: changed_files.update(files_changed.get_changed_files(head_repository, head_rev)) changed_files.update(specified_files) test_count = 0 for pattern in file_patterns: for path in changed_files: # TODO: consider running tests if a manifest changes if path.endswith('.list') or path.endswith('.ini'): continue if mozpackmatch(path, pattern): gpu = False if type == 'test-verify-e10s' or type == 'test-coverage-e10s': # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl'] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count/tests_per_chunk return int(math.ceil(chunks))
def get_changed_dirs(self, repo, rev): changed = map(mozpath.dirname, files_changed.get_changed_files(repo, rev)) # Filter out empty directories (from files modified in the root). # Otherwise all tasks would be scheduled. return {d for d in changed if d}
def perfile_number_of_chunks(is_try, try_task_config, head_repository, head_rev, type): if taskgraph.fast and not is_try: # When iterating on taskgraph changes, the exact number of chunks that # test-verify runs usually isn't important, so skip it when going fast. return 3 tests_per_chunk = 10.0 if type.startswith("test-coverage"): tests_per_chunk = 30.0 if type.startswith("test-verify-wpt") or type.startswith("test-coverage-wpt"): file_patterns = [ "testing/web-platform/tests/**", "testing/web-platform/mozilla/tests/**", ] elif type.startswith("test-verify-gpu") or type.startswith("test-coverage-gpu"): file_patterns = [ "**/*webgl*/**/test_*", "**/dom/canvas/**/test_*", "**/gfx/tests/**/test_*", "**/devtools/canvasdebugger/**/browser_*", "**/reftest*/**", ] elif type.startswith("test-verify") or type.startswith("test-coverage"): file_patterns = [ "**/test_*", "**/browser_*", "**/crashtest*/**", "js/src/tests/test/**", "js/src/tests/non262/**", "js/src/tests/test262/**", ] else: # Returning 0 means no tests to run, this captures non test-verify tasks return 1 changed_files = set() if try_task_config: suite_to_paths = json.loads(try_task_config) specified_files = itertools.chain.from_iterable(suite_to_paths.values()) changed_files.update(specified_files) if is_try: changed_files.update(files_changed.get_locally_changed_files(GECKO)) else: changed_files.update(files_changed.get_changed_files(head_repository, head_rev)) test_count = 0 for pattern in file_patterns: for path in changed_files: # TODO: consider running tests if a manifest changes if path.endswith(".list") or path.endswith(".ini"): continue if path.endswith("^headers^"): continue if mozpackmatch(path, pattern): gpu = False if type == "test-verify-e10s" or type == "test-coverage-e10s": # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = [ "dom/canvas", "gfx/tests", "devtools/canvasdebugger", "webgl", ] for gdir in gpu_dirs: if len(path.split(gdir)) > 1: gpu = True if not gpu: test_count += 1 chunks = test_count / tests_per_chunk chunks = int(math.ceil(chunks)) # Never return 0 chunks on try, so that per-file tests can be pushed to try with # an explicit path, and also so "empty" runs can be checked on try. if is_try and chunks == 0: chunks = 1 return chunks