예제 #1
0
    def artifacts(self, tree, job, artifact_job_class, rev):
        # Grab the second part of the repo name, which is generally how things
        # are indexed. Eg: 'integration/mozilla-inbound' is indexed as
        # 'mozilla-inbound'
        tree = tree.split('/')[1] if '/' in tree else tree

        # PGO builds are now known as "shippable" for all platforms but Android.
        # For macOS and linux32 shippable builds are equivalent to opt builds and
        # replace them on some trees. Additionally, we no longer produce win64
        # opt builds on integration branches.
        if not job.startswith('android-'):
            if job.endswith('-pgo') or job in ('macosx64-opt', 'linux-opt',
                                               'win64-opt'):
                tree += '.shippable'
            if job.endswith('-pgo'):
                job = job.replace('-pgo', '-opt')

        namespace = '{trust_domain}.v2.{tree}.revision.{rev}.{product}.{job}'.format(
            trust_domain=artifact_job_class.trust_domain,
            rev=rev,
            tree=tree,
            product=artifact_job_class.product,
            job=job,
        )
        self.log(logging.INFO, 'artifact',
                 {'namespace': namespace},
                 'Searching Taskcluster index with namespace: {namespace}')
        try:
            taskId = find_task_id(namespace)
        except KeyError:
            # Not all revisions correspond to pushes that produce the job we
            # care about; and even those that do may not have completed yet.
            raise ValueError('Task for {namespace} does not exist (yet)!'.format(namespace=namespace))

        return taskId, list_artifacts(taskId)
예제 #2
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """
    re_test = re.compile(r'"test": "([^"]+)"')
    re_bad_test = re.compile(r'(Last test finished|'
                             r'Main app process exited normally|'
                             r'[(]SimpleTest/TestRunner.js[)]|'
                             r'remoteautomation.py|'
                             r'unknown test url|'
                             r'https?://localhost:\d+/\d+/\d+/.*[.]html)')
    re_extract_tests = [
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ ]+)'),
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ ]+)'),
        re.compile(r'xpcshell-[^ ]+\.ini:(.*)'),
    ]

    def munge_test_path(test_path):
        if re_bad_test.search(test_path):
            return None
        for r in re_extract_tests:
            m = r.match(test_path)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if 'name' in artifact and artifact['name'].endswith('errorsummary.log'):
            stream = get_artifact(task_id, artifact['name'])
            if stream:
                # Read all of the content from the stream and split
                # the lines out since on macosx and windows, the first
                # line is empty.
                for line in stream.read().split('\n'):
                    line = line.strip()
                    match = re_test.search(line)
                    if match:
                        test_path = munge_test_path(match.group(1))
                        if test_path:
                            tests.add(test_path)
                            test_dir = os.path.dirname(test_path)
                            if test_dir:
                                dirs.add(test_dir)
    return {'dirs': sorted(dirs), 'tests': sorted(tests)}
예제 #3
0
    def install_from_task(self, taskId, distdir):
        artifacts = list_artifacts(taskId)

        urls = []
        for artifact_name in self._artifact_job.find_candidate_artifacts(artifacts):
            # We can easily extract the task ID from the URL.  We can't easily
            # extract the build ID; we use the .ini files embedded in the
            # downloaded artifact for this.
            url = get_artifact_url(taskId, artifact_name)
            urls.append(url)
        if not urls:
            raise ValueError('Task {taskId} existed, but no artifacts found!'.format(taskId=taskId))
        for url in urls:
            if self.install_from_url(url, distdir):
                return 1
        return 0
예제 #4
0
    def artifact_urls(self, tree, job, rev, download_symbols):
        try:
            artifact_job = get_job_details(job,
                                           log=self._log,
                                           download_symbols=download_symbols)
        except KeyError:
            self.log(logging.INFO, 'artifact', {'job': job},
                     'Unknown job {job}')
            raise KeyError("Unknown job")

        # Grab the second part of the repo name, which is generally how things
        # are indexed. Eg: 'integration/mozilla-inbound' is indexed as
        # 'mozilla-inbound'
        tree = tree.split('/')[1] if '/' in tree else tree

        namespace = 'gecko.v2.{tree}.revision.{rev}.{product}.{job}'.format(
            rev=rev,
            tree=tree,
            product=artifact_job.product,
            job=job,
        )
        self.log(logging.DEBUG, 'artifact', {'namespace': namespace},
                 'Searching Taskcluster index with namespace: {namespace}')
        try:
            taskId = find_task_id(namespace)
        except Exception:
            # Not all revisions correspond to pushes that produce the job we
            # care about; and even those that do may not have completed yet.
            raise ValueError(
                'Task for {namespace} does not exist (yet)!'.format(
                    namespace=namespace))

        artifacts = list_artifacts(taskId)

        urls = []
        for artifact_name in artifact_job.find_candidate_artifacts(artifacts):
            # We can easily extract the task ID from the URL.  We can't easily
            # extract the build ID; we use the .ini files embedded in the
            # downloaded artifact for this.  We could also use the uploaded
            # public/build/buildprops.json for this purpose.
            url = get_artifact_url(taskId, artifact_name)
            urls.append(url)
        if not urls:
            raise ValueError(
                'Task for {namespace} existed, but no artifacts found!'.format(
                    namespace=namespace))
        return urls
예제 #5
0
    def artifact_urls(self, tree, job, rev, download_symbols):
        try:
            artifact_job = get_job_details(job, log=self._log, download_symbols=download_symbols)
        except KeyError:
            self.log(logging.INFO, 'artifact',
                {'job': job},
                'Unknown job {job}')
            raise KeyError("Unknown job")

        # Grab the second part of the repo name, which is generally how things
        # are indexed. Eg: 'integration/mozilla-inbound' is indexed as
        # 'mozilla-inbound'
        tree = tree.split('/')[1] if '/' in tree else tree

        namespace = 'gecko.v2.{tree}.revision.{rev}.{product}.{job}'.format(
            rev=rev,
            tree=tree,
            product=artifact_job.product,
            job=job,
        )
        self.log(logging.DEBUG, 'artifact',
                 {'namespace': namespace},
                 'Searching Taskcluster index with namespace: {namespace}')
        try:
            taskId = find_task_id(namespace)
        except Exception:
            # Not all revisions correspond to pushes that produce the job we
            # care about; and even those that do may not have completed yet.
            raise ValueError('Task for {namespace} does not exist (yet)!'.format(namespace=namespace))

        artifacts = list_artifacts(taskId)

        urls = []
        for artifact_name in artifact_job.find_candidate_artifacts(artifacts):
            # We can easily extract the task ID from the URL.  We can't easily
            # extract the build ID; we use the .ini files embedded in the
            # downloaded artifact for this.  We could also use the uploaded
            # public/build/buildprops.json for this purpose.
            url = get_artifact_url(taskId, artifact_name)
            urls.append(url)
        if not urls:
            raise ValueError('Task for {namespace} existed, but no artifacts found!'.format(namespace=namespace))
        return urls
예제 #6
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """
    re_bad_tests = [
        re.compile(r'Last test finished'),
        re.compile(r'LeakSanitizer'),
        re.compile(r'Main app process exited normally'),
        re.compile(r'ShutdownLeaks'),
        re.compile(r'[(]SimpleTest/TestRunner.js[)]'),
        re.compile(r'automation.py'),
        re.compile(r'https?://localhost:\d+/\d+/\d+/.*[.]html'),
        re.compile(r'jsreftest'),
        re.compile(r'leakcheck'),
        re.compile(r'mozrunner-startup'),
        re.compile(r'pid: '),
        re.compile(r'remoteautomation.py'),
        re.compile(r'unknown test url'),
    ]
    re_extract_tests = [
        re.compile(
            r'"test": "(?:[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ "]+)'
        ),
        re.compile(
            r'"test": "(?:[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ "]+)'
        ),
        re.compile(r'xpcshell-?[^ "]*\.ini:([^ "]+)'),
        re.compile(r'/tests/([^ "]+) - finished .*'),
        re.compile(r'"test": "([^ "]+)"'),
        re.compile(
            r'"message": "Error running command run_test with arguments '
            '[(]<wptrunner[.]wpttest[.]TestharnessTest ([^>]+)>'),
        re.compile(r'"message": "TEST-[^ ]+ [|] ([^ "]+)[^|]*[|]')
    ]

    def munge_test_path(line):
        test_path = None
        for r in re_bad_tests:
            if r.search(line):
                return None
        for r in re_extract_tests:
            m = r.search(line)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if 'name' not in artifact or not artifact['name'].endswith(
                'errorsummary.log'):
            continue

        stream = get_artifact(task_id, artifact['name'])
        if not stream:
            continue

        # The number of tasks created is determined by the
        # `times` value and the number of distinct tests and
        # directories as: times * (1 + len(tests) + len(dirs)).
        # Since the maximum value of `times` specifiable in the
        # Treeherder UI is 100, the number of tasks created can
        # reach a very large value depending on the number of
        # unique tests.  During testing, it was found that 10
        # distinct tests were sufficient to cause the action task
        # to exceed the maxRunTime of 1800 seconds resulting in it
        # being aborted.  We limit the number of distinct tests
        # and thereby the number of distinct test directories to a
        # maximum of 5 to keep the action task from timing out.

        for line in stream.read().split('\n'):
            test_path = munge_test_path(line.strip())

            if test_path:
                tests.add(test_path)
                test_dir = os.path.dirname(test_path)
                if test_dir:
                    dirs.add(test_dir)

            if len(tests) > 4:
                break

    return {'dirs': sorted(dirs), 'tests': sorted(tests)}
예제 #7
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """
    re_test = re.compile(r'"test": "([^"]+)"')
    re_bad_test = re.compile(r'(Last test finished|'
                             r'Main app process exited normally|'
                             r'[(]SimpleTest/TestRunner.js[)]|'
                             r'remoteautomation.py|'
                             r'unknown test url|'
                             r'https?://localhost:\d+/\d+/\d+/.*[.]html)')
    re_extract_tests = [
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ ]+)'),
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ ]+)'),
        re.compile(r'xpcshell-[^ ]+\.ini:(.*)'),
    ]

    def munge_test_path(test_path):
        if re_bad_test.search(test_path):
            return None
        for r in re_extract_tests:
            m = r.match(test_path)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if 'name' in artifact and artifact['name'].endswith(
                'errorsummary.log'):
            stream = get_artifact(task_id, artifact['name'])
            if stream:
                # Read all of the content from the stream and split
                # the lines out since on macosx and windows, the first
                # line is empty.
                for line in stream.read().split('\n'):
                    if len(tests) > 4:
                        # The number of tasks created is determined by
                        # the `times` value and the number of distinct
                        # tests and directories as:
                        # times * (1 + len(tests) + len(dirs)).
                        # Since the maximum value of `times`
                        # specifiable in the Treeherder UI is 100, the
                        # number of tasks created can reach a very
                        # large value depending on the number of
                        # unique tests.  During testing, it was found
                        # that 10 distinct tests were sufficient to
                        # cause the action task to exceed the
                        # maxRunTime of 1800 seconds resulting in it
                        # being aborted.  We limit the number of
                        # distinct tests and thereby the number of
                        # distinct test directories to a maximum of 5
                        # to keep the action task from timing out.
                        break
                    line = line.strip()
                    match = re_test.search(line)
                    if match:
                        test_path = munge_test_path(match.group(1))
                        if test_path:
                            tests.add(test_path)
                            test_dir = os.path.dirname(test_path)
                            if test_dir:
                                dirs.add(test_dir)
            break
    return {'dirs': sorted(dirs), 'tests': sorted(tests)}
예제 #8
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """

    def re_compile_list(*lst):
        # Ideally we'd just use rb"" literals and avoid the encode, but
        # this file needs to be importable in python2 for now.
        return [re.compile(s.encode("utf-8")) for s in lst]

    re_bad_tests = re_compile_list(
        r"Last test finished",
        r"LeakSanitizer",
        r"Main app process exited normally",
        r"ShutdownLeaks",
        r"[(]SimpleTest/TestRunner.js[)]",
        r"automation.py",
        r"https?://localhost:\d+/\d+/\d+/.*[.]html",
        r"jsreftest",
        r"leakcheck",
        r"mozrunner-startup",
        r"pid: ",
        r"RemoteProcessMonitor",
        r"unknown test url",
    )
    re_extract_tests = re_compile_list(
        r'"test": "(?:[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ "]+)',
        r'"test": "(?:[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ "]+)',
        r'xpcshell-?[^ "]*\.ini:([^ "]+)',
        r'/tests/([^ "]+) - finished .*',
        r'"test": "([^ "]+)"',
        r'"message": "Error running command run_test with arguments '
        r"[(]<wptrunner[.]wpttest[.]TestharnessTest ([^>]+)>",
        r'"message": "TEST-[^ ]+ [|] ([^ "]+)[^|]*[|]',
    )

    def munge_test_path(line):
        test_path = None
        for r in re_bad_tests:
            if r.search(line):
                return None
        for r in re_extract_tests:
            m = r.search(line)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if "name" not in artifact or not artifact["name"].endswith("errorsummary.log"):
            continue

        stream = get_artifact(task_id, artifact["name"])
        if not stream:
            continue

        # The number of tasks created is determined by the
        # `times` value and the number of distinct tests and
        # directories as: times * (1 + len(tests) + len(dirs)).
        # Since the maximum value of `times` specifiable in the
        # Treeherder UI is 100, the number of tasks created can
        # reach a very large value depending on the number of
        # unique tests.  During testing, it was found that 10
        # distinct tests were sufficient to cause the action task
        # to exceed the maxRunTime of 1800 seconds resulting in it
        # being aborted.  We limit the number of distinct tests
        # and thereby the number of distinct test directories to a
        # maximum of 5 to keep the action task from timing out.

        # We handle the stream as raw bytes because it may contain invalid
        # UTF-8 characters in portions other than those containing the error
        # messages we're looking for.
        for line in stream.read().split(b"\n"):
            test_path = munge_test_path(line.strip())

            if test_path:
                tests.add(test_path.decode("utf-8"))
                test_dir = os.path.dirname(test_path)
                if test_dir:
                    dirs.add(test_dir.decode("utf-8"))

            if len(tests) > 4:
                break

    return {"dirs": sorted(dirs), "tests": sorted(tests)}