Exemplo n.º 1
0
    def __init__(self, config):
        self.populate_logger()

        self.config = config

        mozinfo.find_and_update_from_json(config.topobjdir)

        # Python 2.6 doesn't allow unicode keys to be used for keyword
        # arguments. This gross hack works around the problem until we
        # rid ourselves of 2.6.
        self.info = {}
        for k, v in mozinfo.info.items():
            if isinstance(k, unicode):
                k = k.encode('ascii')
            self.info[k] = v

        self._libs = OrderedDefaultDict(list)
        self._binaries = OrderedDict()
        self._linkage = []
        self._static_linking_shared = set()

        # Keep track of external paths (third party build systems), starting
        # from what we run a subconfigure in. We'll eliminate some directories
        # as we traverse them with moz.build (e.g. js/src).
        subconfigures = os.path.join(self.config.topobjdir, 'subconfigures')
        paths = []
        if os.path.exists(subconfigures):
            paths = open(subconfigures).read().splitlines()
        self._external_paths = set(mozpath.normsep(d) for d in paths)
        # Add security/nss manually, since it doesn't have a subconfigure.
        self._external_paths.add('security/nss')
Exemplo n.º 2
0
def get_default_debugger_name(search=DebuggerSearch.OnlyFirst):
    '''
    Get the debugger name for the default debugger on current platform.

    :param search: If specified, stops looking for the debugger if the
     default one is not found (|DebuggerSearch.OnlyFirst|) or keeps
     looking for other compatible debuggers (|DebuggerSearch.KeepLooking|).
    '''

    mozinfo.find_and_update_from_json()
    os = mozinfo.info['os']

    # Find out which debuggers are preferred for use on this platform.
    debuggerPriorities = _DEBUGGER_PRIORITIES[
        os if os in _DEBUGGER_PRIORITIES else 'unknown']

    # Finally get the debugger information.
    for debuggerName in debuggerPriorities:
        debuggerPath = get_debugger_path(debuggerName)
        if debuggerPath:
            return debuggerName
        elif not search == DebuggerSearch.KeepLooking:
            return None

    return None
Exemplo n.º 3
0
    def __init__(self, config):
        self.populate_logger()

        self.config = config

        mozinfo.find_and_update_from_json(config.topobjdir)

        # Python 2.6 doesn't allow unicode keys to be used for keyword
        # arguments. This gross hack works around the problem until we
        # rid ourselves of 2.6.
        self.info = {}
        for k, v in mozinfo.info.items():
            if isinstance(k, unicode):
                k = k.encode('ascii')
            self.info[k] = v

        self._libs = OrderedDefaultDict(list)
        self._binaries = OrderedDict()
        self._linkage = []
        self._static_linking_shared = set()

        # Keep track of external paths (third party build systems), starting
        # from what we run a subconfigure in. We'll eliminate some directories
        # as we traverse them with moz.build (e.g. js/src).
        subconfigures = os.path.join(self.config.topobjdir, 'subconfigures')
        paths = []
        if os.path.exists(subconfigures):
            paths = open(subconfigures).read().splitlines()
        self._external_paths = set(mozpath.normsep(d) for d in paths)
        # Add security/nss manually, since it doesn't have a subconfigure.
        self._external_paths.add('security/nss')
Exemplo n.º 4
0
def setup_argument_parser():
    import mozinfo
    import reftestcommandline

    global parser
    mozinfo.find_and_update_from_json(here)
    parser = reftestcommandline.DesktopArgumentsParser()
    return parser
Exemplo n.º 5
0
def update_mozinfo():
    """walk up directories to find mozinfo.json update the info"""
    path = SCRIPT_DIR
    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.add(path)
        path = os.path.split(path)[0]
    mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 6
0
def update_mozinfo():
    """walk up directories to find mozinfo.json update the info"""
    path = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.add(path)
        path = os.path.split(path)[0]
    mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 7
0
def update_mozinfo():
    """walk up directories to find mozinfo.json update the info"""
    path = SCRIPT_DIR
    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.add(path)
        path = os.path.split(path)[0]
    mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 8
0
def update_mozinfo():
    """walk up directories to find mozinfo.json update the info"""
    path = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.add(path)
        path = os.path.split(path)[0]
    mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 9
0
def setup_mochitest_argument_parser():
    import mozinfo
    mozinfo.find_and_update_from_json(here)
    app = 'generic'
    if mozinfo.info.get('buildapp') == 'mobile/android':
        app = 'android'

    from mochitest_options import MochitestArgumentParser
    global parser
    parser = MochitestArgumentParser(app=app)
    return parser
def setup_argument_parser():
    import mozinfo
    import reftestcommandline

    global parser
    mozinfo.find_and_update_from_json(os.path.dirname(here))
    if mozinfo.info.get('buildapp') == 'mobile/android':
        parser = reftestcommandline.RemoteArgumentsParser()
    else:
        parser = reftestcommandline.DesktopArgumentsParser()
    return parser
Exemplo n.º 11
0
def setup_argument_parser():
    import mozinfo
    import reftestcommandline

    global parser
    mozinfo.find_and_update_from_json(here)
    if mozinfo.info.get('buildapp') == 'mobile/android':
        parser = reftestcommandline.RemoteArgumentsParser()
    else:
        parser = reftestcommandline.DesktopArgumentsParser()
    return parser
Exemplo n.º 12
0
def setup_argument_parser():
    import mozinfo
    mozinfo.find_and_update_from_json(here)
    global parser
    if mozinfo.info.get('buildapp') == 'mobile/android':
        import remotegtests
        parser = remotegtests.remoteGtestOptions()
    else:
        import rungtests
        parser = rungtests.gtestOptions()
    return parser
def setup_argument_parser():
    import mozinfo
    mozinfo.find_and_update_from_json(here)
    app = 'generic'
    if mozinfo.info.get('buildapp') == 'mobile/android':
        app = 'android'

    from mochitest_options import MochitestArgumentParser
    global parser
    parser = MochitestArgumentParser(app=app)
    return parser
Exemplo n.º 14
0
    def __init__(self):
        os.chdir(SCRIPT_DIR)
        path = SCRIPT_DIR
        dirs = []
        while path != os.path.expanduser('~'):
            if path in dirs:
                break
            dirs.append(path)
            path = os.path.split(path)[0]

        mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 15
0
  def __init__(self):
    os.chdir(SCRIPT_DIR)
    path = SCRIPT_DIR
    dirs = []
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.append(path)
        path = os.path.split(path)[0]

    mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 16
0
    def _update_mozinfo(self, metadata_root):
        """Add extra build information from a mozinfo.json file in a parent
        directory"""
        path = metadata_root
        dirs = set()
        while path != os.path.expanduser('~'):
            if path in dirs:
                break
            dirs.add(str(path))
            path = os.path.split(path)[0]

        mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 17
0
  def update_mozinfo(self):
    """walk up directories to find mozinfo.json update the info"""
    # TODO: This should go in a more generic place, e.g. mozinfo

    path = SCRIPT_DIRECTORY
    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.add(path)
        path = os.path.split(path)[0]
    mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 18
0
    def find_tests_for_verification(self, action, success=None):
        """
           For each file modified on this push, determine if the modified file
           is a test, by searching test manifests. Populate self.verify_suites
           with test files, organized by suite.

           This depends on test manifests, so can only run after test zips have
           been downloaded and extracted.
        """

        if self.config.get('verify') != True:
            return

        repository = os.environ.get("GECKO_HEAD_REPOSITORY")
        revision = os.environ.get("GECKO_HEAD_REV")
        if not repository or not revision:
            self.warning("unable to verify tests: no repo or revision!")
            return []

        def get_automationrelevance():
            response = self.load_json_url(url)
            return response

        dirs = self.query_abs_dirs()
        mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])
        e10s = self.config.get('e10s', False)
        mozinfo.update({"e10s": e10s})
        headless = self.config.get('headless', False)
        mozinfo.update({"headless": headless})
        # FIXME(emilio): Need to update test expectations.
        mozinfo.update({'stylo': True})
        mozinfo.update({'verify': True})
        self.info("Verification using mozinfo: %s" % str(mozinfo.info))

        # determine which files were changed on this push
        url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'),
                                                  revision)
        contents = self.retry(get_automationrelevance,
                              attempts=2,
                              sleeptime=10)
        changed_files = set()
        for c in contents['changesets']:
            self.info(" {cset} {desc}".format(
                cset=c['node'][0:12],
                desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
            changed_files |= set(c['files'])

        if self.config.get('verify_category') == "web-platform":
            self._find_wpt_tests(dirs, changed_files)
        else:
            self._find_misc_tests(dirs, changed_files)

        self.verify_downloaded = True
Exemplo n.º 19
0
    def update_mozinfo(self):
        """walk up directories to find mozinfo.json update the info"""
        # TODO: This should go in a more generic place, e.g. mozinfo

        path = SCRIPT_DIRECTORY
        dirs = set()
        while path != os.path.expanduser('~'):
            if path in dirs:
                break
            dirs.add(path)
            path = os.path.split(path)[0]
        mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 20
0
def setup_mochitest_argument_parser():
    import mozinfo

    mozinfo.find_and_update_from_json(here)
    app = "generic"
    if mozinfo.info.get("buildapp") == "mobile/android":
        app = "android"

    from mochitest_options import MochitestArgumentParser

    global parser
    parser = MochitestArgumentParser(app=app)
    add_global_arguments(parser)
    return parser
Exemplo n.º 21
0
 def test_find_and_update_file_suppress_exception(self):
     """Test that mozinfo.find_and_update_from_json suppresses
     an IOError exception if a False boolean value is
     provided as the only argument.
     """
     self.assertEqual(
         mozinfo.find_and_update_from_json(raise_exception=False), None)
Exemplo n.º 22
0
 def test_find_and_update_file(self):
     """Test that mozinfo.find_and_update_from_json can
     find mozinfo.json in a directory passed to it."""
     j = os.path.join(self.tempdir, "mozinfo.json")
     with open(j, "w") as f:
         f.write(json.dumps({"foo": "abcdefg"}))
     self.assertEqual(mozinfo.find_and_update_from_json(self.tempdir), j)
     self.assertEqual(mozinfo.info["foo"], "abcdefg")
Exemplo n.º 23
0
    def __init__(self, config):
        self.populate_logger()

        self.config = config

        mozinfo.find_and_update_from_json(config.topobjdir)

        # Python 2.6 doesn't allow unicode keys to be used for keyword
        # arguments. This gross hack works around the problem until we
        # rid ourselves of 2.6.
        self.info = {}
        for k, v in mozinfo.info.items():
            if isinstance(k, unicode):
                k = k.encode("ascii")
            self.info[k] = v

        self._libs = {}
        self._final_libs = []
Exemplo n.º 24
0
    def __init__(self, config):
        self.populate_logger()

        self.config = config

        mozinfo.find_and_update_from_json(config.topobjdir)

        # Python 2.6 doesn't allow unicode keys to be used for keyword
        # arguments. This gross hack works around the problem until we
        # rid ourselves of 2.6.
        self.info = {}
        for k, v in mozinfo.info.items():
            if isinstance(k, unicode):
                k = k.encode('ascii')
            self.info[k] = v

        self._libs = {}
        self._final_libs = []
Exemplo n.º 25
0
def update_mozinfo(path=None):
    """Walk up directories to find mozinfo.json and update the info."""
    path = path or here
    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.add(path)
        path = os.path.split(path)[0]

    return mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 26
0
def update_mozinfo(path=None):
    """Walk up directories to find mozinfo.json and update the info."""
    path = path or here
    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
            break
        dirs.add(path)
        path = os.path.split(path)[0]

    return mozinfo.find_and_update_from_json(*dirs)
Exemplo n.º 27
0
    def __init__(self, config):
        self.populate_logger()

        self.config = config

        mozinfo.find_and_update_from_json(config.topobjdir)

        # Python 2.6 doesn't allow unicode keys to be used for keyword
        # arguments. This gross hack works around the problem until we
        # rid ourselves of 2.6.
        self.info = {}
        for k, v in mozinfo.info.items():
            if isinstance(k, unicode):
                k = k.encode('ascii')
            self.info[k] = v

        self._libs = OrderedDefaultDict(list)
        self._binaries = OrderedDict()
        self._linkage = []
        self._static_linking_shared = set()
Exemplo n.º 28
0
 def test_find_and_update_file_mozbuild(self):
     """Test that mozinfo.find_and_update_from_json can
     find mozinfo.json using the mozbuild module."""
     j = os.path.join(self.tempdir, "mozinfo.json")
     with open(j, "w") as f:
         f.write(json.dumps({"foo": "123456"}))
     m = mock.MagicMock()
     # Mock the value of MozbuildObject.from_environment().topobjdir.
     m.MozbuildObject.from_environment.return_value.topobjdir = self.tempdir
     with mock.patch.dict(sys.modules, {"mozbuild": m, "mozbuild.base": m}):
         self.assertEqual(mozinfo.find_and_update_from_json(), j)
     self.assertEqual(mozinfo.info["foo"], "123456")
Exemplo n.º 29
0
def get_default_debugger_name(search=DebuggerSearch.OnlyFirst):
    '''
    Get the debugger name for the default debugger on current platform.

    :param search: If specified, stops looking for the debugger if the
     default one is not found (|DebuggerSearch.OnlyFirst|) or keeps
     looking for other compatible debuggers (|DebuggerSearch.KeepLooking|).
    '''

    mozinfo.find_and_update_from_json()
    os = mozinfo.info['os']

    # Find out which debuggers are preferred for use on this platform.
    debuggerPriorities = _DEBUGGER_PRIORITIES[os if os in _DEBUGGER_PRIORITIES else 'unknown']

    # Finally get the debugger information.
    for debuggerName in debuggerPriorities:
        debuggerPath = find_executable(debuggerName)
        if debuggerPath:
            return debuggerName
        elif not search == DebuggerSearch.KeepLooking:
            return None

    return None
Exemplo n.º 30
0
    def _query_cmd(self, test_types):
        if not self.binary_path:
            self.fatal("Binary path could not be determined")
            # And exit

        c = self.config
        run_file_name = "runtests.py"

        dirs = self.query_abs_dirs()
        abs_app_dir = self.query_abs_app_dir()
        str_format_values = {
            'binary_path': self.binary_path,
            'test_path': dirs["abs_wpttest_dir"],
            'test_install_path': dirs["abs_test_install_dir"],
            'abs_app_dir': abs_app_dir,
            'abs_work_dir': dirs["abs_work_dir"],
            'xre_path': self.xre_path,
        }

        cmd = [self.query_python_path('python'), '-u']
        cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))

        mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])

        cmd += [
            "--log-raw=-",
            "--log-raw=%s" %
            os.path.join(dirs["abs_blob_upload_dir"], "wpt_raw.log"),
            "--log-wptreport=%s" %
            os.path.join(dirs["abs_blob_upload_dir"], "wptreport.json"),
            "--log-errorsummary=%s" %
            os.path.join(dirs["abs_blob_upload_dir"], "wpt_errorsummary.log"),
            "--binary=%s" % self.binary_path,
            "--symbols-path=%s" % self.symbols_path,
            "--stackwalk-binary=%s" % self.query_minidump_stackwalk(),
            "--stackfix-dir=%s" %
            os.path.join(dirs["abs_test_install_dir"], "bin"),
            "--run-by-dir=%i" % (3 if not mozinfo.info["asan"] else 0),
            "--no-pause-after-test",
            "--instrument-to-file=%s" %
            os.path.join(dirs["abs_blob_upload_dir"], "wpt_instruments.txt")
        ]

        if (self.is_android or "wdspec" in test_types
                or "fission.autostart=true" in c['extra_prefs']):
            processes = 1
        else:
            processes = 2
        cmd.append("--processes=%s" % processes)

        if self.is_android:
            cmd += [
                "--device-serial=%s" % self.device_serial,
                "--package-name=%s" % self.query_package_name()
            ]

        if mozinfo.info["os"] == "win" and mozinfo.info["os_version"] == "6.1":
            # On Windows 7 --install-fonts fails, so fall back to a Firefox-specific codepath
            self._install_fonts()
        else:
            cmd += ["--install-fonts"]

        for test_type in test_types:
            cmd.append("--test-type=%s" % test_type)

        if c['extra_prefs']:
            cmd.extend(['--setpref={}'.format(p) for p in c['extra_prefs']])

        if not c["e10s"]:
            cmd.append("--disable-e10s")
        if c["enable_webrender"]:
            cmd.append("--enable-webrender")

        if c["skip_timeout"]:
            cmd.append("--skip-timeout")

        for implementation_status in c["skip_implementation_status"]:
            cmd.append("--skip-implementation-status=%s" %
                       implementation_status)

        test_paths = set()
        if not (self.verify_enabled or self.per_test_coverage):
            mozharness_test_paths = json.loads(
                os.environ.get('MOZHARNESS_TEST_PATHS', '""'))
            if mozharness_test_paths:
                keys = ([
                    'web-platform-tests-%s' % test_type
                    for test_type in test_types
                ] + ['web-platform-tests'])
                for key in keys:
                    paths = mozharness_test_paths.get(key, [])
                    for path in paths:
                        if not path.startswith("/"):
                            # Assume this is a filesystem path rather than a test id
                            path = os.path.relpath(path,
                                                   'testing/web-platform')
                            if ".." in path:
                                self.fatal("Invalid WPT path: {}".format(path))
                            path = os.path.join(dirs["abs_wpttest_dir"], path)
                        test_paths.add(path)
            else:
                for opt in ["total_chunks", "this_chunk"]:
                    val = c.get(opt)
                    if val:
                        cmd.append("--%s=%s" % (opt.replace("_", "-"), val))

        options = list(c.get("options", []))

        if "wdspec" in test_types:
            geckodriver_path = self._query_geckodriver()
            if not geckodriver_path or not os.path.isfile(geckodriver_path):
                self.fatal("Unable to find geckodriver binary "
                           "in common test package: %s" %
                           str(geckodriver_path))
            cmd.append("--webdriver-binary=%s" % geckodriver_path)
            cmd.append("--webdriver-arg=-vv")  # enable trace logs

        test_type_suite = {
            "testharness": "web-platform-tests",
            "crashtest": "web-platform-tests-crashtest",
            "reftest": "web-platform-tests-reftest",
            "wdspec": "web-platform-tests-wdspec",
        }
        for test_type in test_types:
            try_options, try_tests = self.try_args(test_type_suite[test_type])

            cmd.extend(
                self.query_options(options,
                                   try_options,
                                   str_format_values=str_format_values))
            cmd.extend(
                self.query_tests_args(try_tests,
                                      str_format_values=str_format_values))
        if "include" in c and c["include"]:
            cmd.append("--include=%s" % c["include"])

        cmd.extend(test_paths)

        return cmd
Exemplo n.º 31
0
 def test_find_and_update_file_no_argument(self):
     """Test that mozinfo.find_and_update_from_json no-ops on not being
     given any arguments.
     """
     self.assertEqual(mozinfo.find_and_update_from_json(), None)
Exemplo n.º 32
0
#

from __future__ import with_statement
import sys, os, unittest, tempfile, shutil
import mozinfo

from StringIO import StringIO

from mozlog import structured
from mozbuild.base import MozbuildObject
os.environ.pop('MOZ_OBJDIR', None)
build_obj = MozbuildObject.from_environment()

from runxpcshelltests import XPCShellTests

mozinfo.find_and_update_from_json()

objdir = build_obj.topobjdir.encode("utf-8")

if mozinfo.isMac:
    from buildconfig import substs
    xpcshellBin = os.path.join(objdir, "dist", substs['MOZ_MACBUNDLE_NAME'],
                               "Contents", "MacOS", "xpcshell")
else:
    xpcshellBin = os.path.join(objdir, "dist", "bin", "xpcshell")
    if sys.platform == "win32":
        xpcshellBin += ".exe"

TEST_PASS_STRING = "TEST-PASS"
TEST_FAIL_STRING = "TEST-UNEXPECTED-FAIL"
Exemplo n.º 33
0
    def _query_cmd(self, test_types):
        if not self.binary_path:
            self.fatal("Binary path could not be determined")
            # And exit

        c = self.config
        run_file_name = "runtests.py"

        dirs = self.query_abs_dirs()
        abs_app_dir = self.query_abs_app_dir()
        str_format_values = {
            'binary_path': self.binary_path,
            'test_path': dirs["abs_wpttest_dir"],
            'test_install_path': dirs["abs_test_install_dir"],
            'abs_app_dir': abs_app_dir,
            'abs_work_dir': dirs["abs_work_dir"],
            'xre_path': self.xre_path,
        }

        cmd = [self.query_python_path('python'), '-u']
        cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))

        # Make sure that the logging directory exists
        if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
            self.fatal("Could not create blobber upload directory")
            # Exit

        mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])

        cmd += [
            "--log-raw=-",
            "--log-raw=%s" %
            os.path.join(dirs["abs_blob_upload_dir"], "wpt_raw.log"),
            "--log-wptreport=%s" %
            os.path.join(dirs["abs_blob_upload_dir"], "wptreport.json"),
            "--log-errorsummary=%s" %
            os.path.join(dirs["abs_blob_upload_dir"], "wpt_errorsummary.log"),
            "--binary=%s" % self.binary_path,
            "--symbols-path=%s" % self.query_symbols_url(),
            "--stackwalk-binary=%s" % self.query_minidump_stackwalk(),
            "--stackfix-dir=%s" %
            os.path.join(dirs["abs_test_install_dir"], "bin"),
            "--run-by-dir=%i" % (3 if not mozinfo.info["asan"] else 0),
            "--no-pause-after-test"
        ]

        if self.is_android:
            cmd += ["--device-serial=%s" % self.device_serial]
            cmd += ["--package-name=%s" % self.query_package_name()]

        if mozinfo.info["os"] == "win" and mozinfo.info["os_version"] == "6.1":
            # On Windows 7 --install-fonts fails, so fall back to a Firefox-specific codepath
            self._install_fonts()
        else:
            cmd += ["--install-fonts"]

        for test_type in test_types:
            cmd.append("--test-type=%s" % test_type)

        if c['extra_prefs']:
            cmd.extend(['--setpref={}'.format(p) for p in c['extra_prefs']])

        if not c["e10s"]:
            cmd.append("--disable-e10s")

        if c["single_stylo_traversal"]:
            cmd.append("--stylo-threads=1")
        else:
            cmd.append("--stylo-threads=4")

        if not (self.verify_enabled or self.per_test_coverage):
            test_paths = json.loads(
                os.environ.get('MOZHARNESS_TEST_PATHS', '""'))
            if test_paths and 'web-platform-tests' in test_paths:
                relpaths = [
                    os.path.relpath(p, 'testing/web-platform')
                    for p in test_paths['web-platform-tests']
                ]
                paths = [
                    os.path.join(dirs["abs_wpttest_dir"], relpath)
                    for relpath in relpaths
                ]
                cmd.extend(paths)
            else:
                for opt in ["total_chunks", "this_chunk"]:
                    val = c.get(opt)
                    if val:
                        cmd.append("--%s=%s" % (opt.replace("_", "-"), val))

        options = list(c.get("options", []))

        if "wdspec" in test_types:
            geckodriver_path = self._query_geckodriver()
            if not geckodriver_path or not os.path.isfile(geckodriver_path):
                self.fatal("Unable to find geckodriver binary "
                           "in common test package: %s" %
                           str(geckodriver_path))
            cmd.append("--webdriver-binary=%s" % geckodriver_path)
            cmd.append("--webdriver-arg=-vv")  # enable trace logs

        test_type_suite = {
            "testharness": "web-platform-tests",
            "reftest": "web-platform-tests-reftests",
            "wdspec": "web-platform-tests-wdspec",
        }
        for test_type in test_types:
            try_options, try_tests = self.try_args(test_type_suite[test_type])

            cmd.extend(
                self.query_options(options,
                                   try_options,
                                   str_format_values=str_format_values))
            cmd.extend(
                self.query_tests_args(try_tests,
                                      str_format_values=str_format_values))

        return cmd
Exemplo n.º 34
0
 def test_find_and_update_file_raise_exception(self):
     """Test that mozinfo.find_and_update_from_json raises
     an IOError when exceptions are unsuppressed.
     """
     with self.assertRaises(IOError):
         mozinfo.find_and_update_from_json(raise_exception=True)
Exemplo n.º 35
0
    def find_modified_tests(self):
        """
           For each file modified on this push, determine if the modified file
           is a test, by searching test manifests. Populate self.suites
           with test files, organized by suite.

           This depends on test manifests, so can only run after test zips have
           been downloaded and extracted.
        """
        repository = os.environ.get("GECKO_HEAD_REPOSITORY")
        revision = os.environ.get("GECKO_HEAD_REV")
        if not repository or not revision:
            self.warning(
                "unable to run tests in per-test mode: no repo or revision!")
            return []

        def get_automationrelevance():
            response = self.load_json_url(url)
            return response

        dirs = self.query_abs_dirs()
        mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])
        e10s = self.config.get('e10s', False)
        mozinfo.update({"e10s": e10s})
        headless = self.config.get('headless', False)
        mozinfo.update({"headless": headless})
        # FIXME(emilio): Need to update test expectations.
        mozinfo.update({'stylo': True})
        mozinfo.update({'verify': True})
        self.info("Per-test run using mozinfo: %s" % str(mozinfo.info))

        # determine which files were changed on this push
        url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'),
                                                  revision)
        contents = self.retry(get_automationrelevance,
                              attempts=2,
                              sleeptime=10)
        changed_files = set()
        for c in contents['changesets']:
            self.info(" {cset} {desc}".format(
                cset=c['node'][0:12],
                desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
            changed_files |= set(c['files'])

        if self.config.get('per_test_category') == "web-platform":
            self._find_wpt_tests(dirs, changed_files)
        elif self.config.get('gpu_required') == True:
            self._find_misc_tests(dirs, changed_files, gpu=True)
        else:
            self._find_misc_tests(dirs, changed_files)

        # per test mode run specific tests from any given test suite
        # _find_*_tests organizes tests to run into suites so we can
        # run each suite at a time

        # chunk files
        total_tests = sum([len(self.suites[x]) for x in self.suites])

        files_per_chunk = total_tests / float(
            self.config.get('total_chunks', 1))
        files_per_chunk = int(math.ceil(files_per_chunk))

        chunk_number = int(self.config.get('this_chunk', 1))
        suites = {}
        start = (chunk_number - 1) * files_per_chunk
        end = (chunk_number * files_per_chunk)
        current = -1
        for suite in self.suites:
            for test in self.suites[suite]:
                current += 1
                if current >= start and current < end:
                    if suite not in suites:
                        suites[suite] = []
                    suites[suite].append(test)
            if current >= end:
                break

        self.suites = suites
        self.tests_downloaded = True
Exemplo n.º 36
0
    def find_tests_for_verification(self, action, success=None):
        """
           For each file modified on this push, determine if the modified file
           is a test, by searching test manifests. Populate self.verify_suites
           with test files, organized by suite.

           This depends on test manifests, so can only run after test zips have
           been downloaded and extracted.
        """

        if self.config.get('verify') != True:
            return

        repository = os.environ.get("GECKO_HEAD_REPOSITORY")
        revision = os.environ.get("GECKO_HEAD_REV")
        if not repository or not revision:
            self.warning("unable to verify tests: no repo or revision!")
            return []

        def get_automationrelevance():
            response = self.load_json_url(url)
            return response

        dirs = self.query_abs_dirs()
        mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])
        if self.config.get('e10s') == True:
            mozinfo.update({"e10s": True})
            # Additional mozinfo properties like "headless" and "coverage" are
            # also normally updated dynamically in the harness, but neither of
            # these apply to the test-verify task.

        manifests = [
            (os.path.join(dirs['abs_mochitest_dir'], 'tests',
                          'mochitest.ini'), 'plain'),
            (os.path.join(dirs['abs_mochitest_dir'], 'chrome',
                          'chrome.ini'), 'chrome'),
            (os.path.join(dirs['abs_mochitest_dir'], 'browser',
                          'browser-chrome.ini'), 'browser-chrome'),
            (os.path.join(dirs['abs_mochitest_dir'], 'a11y',
                          'a11y.ini'), 'a11y'),
            (os.path.join(dirs['abs_xpcshell_dir'], 'tests',
                          'xpcshell.ini'), 'xpcshell'),
        ]
        tests_by_path = {}
        for (path, suite) in manifests:
            if os.path.exists(path):
                man = TestManifest([path], strict=False)
                active = man.active_tests(exists=False,
                                          disabled=False,
                                          filters=[],
                                          **mozinfo.info)
                tests_by_path.update(
                    {t['relpath']: (suite, t.get('subsuite'))
                     for t in active})
                self.info("Verification updated with manifest %s" % path)

        ref_manifests = [
            (os.path.join(dirs['abs_reftest_dir'], 'tests', 'layout',
                          'reftests', 'reftest.list'), 'reftest'),
            (os.path.join(dirs['abs_reftest_dir'], 'tests', 'testing',
                          'crashtest', 'crashtests.list'), 'crashtest'),
            # TODO (os.path.join(dirs['abs_test_install_dir'], 'jsreftest', 'tests', 'jstests.list'), 'jstestbrowser'),
        ]
        sys.path.append(dirs['abs_reftest_dir'])
        import manifest
        self.reftest_test_dir = os.path.join(dirs['abs_reftest_dir'], 'tests')
        for (path, suite) in ref_manifests:
            if os.path.exists(path):
                man = manifest.ReftestManifest()
                man.load(path)
                tests_by_path.update({
                    os.path.relpath(t, self.reftest_test_dir): (suite, None)
                    for t in man.files
                })
                self.info("Verification updated with manifest %s" % path)

        # determine which files were changed on this push
        url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'),
                                                  revision)
        contents = self.retry(get_automationrelevance,
                              attempts=2,
                              sleeptime=10)
        changed_files = set()
        for c in contents['changesets']:
            self.info(" {cset} {desc}".format(
                cset=c['node'][0:12],
                desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
            changed_files |= set(c['files'])

        # for each changed file, determine if it is a test file, and what suite it is in
        for file in changed_files:
            # manifest paths use os.sep (like backslash on Windows) but
            # automation-relevance uses posixpath.sep
            file = file.replace(posixpath.sep, os.sep)
            entry = tests_by_path.get(file)
            if entry:
                self.info("Verification found test %s" % file)
                subsuite_mapping = {
                    ('browser-chrome', 'clipboard'):
                    'browser-chrome-clipboard',
                    ('chrome', 'clipboard'): 'chrome-clipboard',
                    ('plain', 'clipboard'): 'plain-clipboard',
                    ('browser-chrome', 'devtools'):
                    'mochitest-devtools-chrome',
                    ('browser-chrome', 'gpu'): 'browser-chrome-gpu',
                    ('chrome', 'gpu'): 'chrome-gpu',
                    ('plain', 'gpu'): 'plain-gpu',
                    ('plain', 'media'): 'mochitest-media',
                    ('plain', 'webgl'): 'mochitest-gl',
                }
                if entry in subsuite_mapping:
                    suite = subsuite_mapping[entry]
                else:
                    suite = entry[0]
                suite_files = self.verify_suites.get(suite)
                if not suite_files:
                    suite_files = []
                suite_files.append(file)
                self.verify_suites[suite] = suite_files
        self.verify_downloaded = True
Exemplo n.º 37
0
 def __init__(self):
   os.chdir(SCRIPT_DIR)
   mozinfo.find_and_update_from_json(SCRIPT_DIR)
Exemplo n.º 38
0
#

from __future__ import with_statement
import sys, os, unittest, tempfile, shutil
import mozinfo

from StringIO import StringIO

from mozlog import structured
from mozbuild.base import MozbuildObject
os.environ.pop('MOZ_OBJDIR', None)
build_obj = MozbuildObject.from_environment()

from runxpcshelltests import XPCShellTests

mozinfo.find_and_update_from_json()

objdir = build_obj.topobjdir.encode("utf-8")

if mozinfo.isMac:
  from buildconfig import substs
  xpcshellBin = os.path.join(objdir, "dist", substs['MOZ_MACBUNDLE_NAME'], "Contents", "MacOS", "xpcshell")
else:
  xpcshellBin = os.path.join(objdir, "dist", "bin", "xpcshell")
  if sys.platform == "win32":
    xpcshellBin += ".exe"

TEST_PASS_STRING = "TEST-PASS"
TEST_FAIL_STRING = "TEST-UNEXPECTED-FAIL"

SIMPLE_PASSING_TEST = "function run_test() { do_check_true(true); }"
Exemplo n.º 39
0
    def _query_cmd(self, test_types):
        if not self.binary_path:
            self.fatal("Binary path could not be determined")
            # And exit

        c = self.config
        run_file_name = "runtests.py"

        dirs = self.query_abs_dirs()
        abs_app_dir = self.query_abs_app_dir()
        str_format_values = {
            "binary_path": self.binary_path,
            "test_path": dirs["abs_wpttest_dir"],
            "test_install_path": dirs["abs_test_install_dir"],
            "abs_app_dir": abs_app_dir,
            "abs_work_dir": dirs["abs_work_dir"],
            "xre_path": self.xre_path,
        }

        cmd = [self.query_python_path("python"), "-u"]
        cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))

        mozinfo.find_and_update_from_json(dirs["abs_test_install_dir"])

        raw_log_file, error_summary_file = self.get_indexed_logs(
            dirs["abs_blob_upload_dir"], "wpt"
        )

        cmd += [
            "--log-raw=-",
            "--log-raw=%s" % raw_log_file,
            "--log-wptreport=%s"
            % os.path.join(dirs["abs_blob_upload_dir"], "wptreport.json"),
            "--log-errorsummary=%s" % error_summary_file,
            "--binary=%s" % self.binary_path,
            "--symbols-path=%s" % self.symbols_path,
            "--stackwalk-binary=%s" % self.query_minidump_stackwalk(),
            "--stackfix-dir=%s" % os.path.join(dirs["abs_test_install_dir"], "bin"),
            "--no-pause-after-test",
            "--instrument-to-file=%s"
            % os.path.join(dirs["abs_blob_upload_dir"], "wpt_instruments.txt"),
            "--specialpowers-path=%s"
            % os.path.join(
                dirs["abs_test_extensions_dir"], "*****@*****.**"
            ),
        ]

        is_windows_7 = (
            mozinfo.info["os"] == "win" and mozinfo.info["os_version"] == "6.1"
        )

        if (
            self.is_android
            or mozinfo.info["tsan"]
            or "wdspec" in test_types
            or "fission.autostart=true" in c["extra_prefs"]
            or
            # Bug 1392106 - skia error 0x80070005: Access is denied.
            is_windows_7
            and mozinfo.info["debug"]
        ):
            processes = 1
        else:
            processes = 2
        cmd.append("--processes=%s" % processes)

        if self.is_android:
            cmd += [
                "--device-serial=%s" % self.device_serial,
                "--package-name=%s" % self.query_package_name(),
            ]

        if is_windows_7:
            # On Windows 7 --install-fonts fails, so fall back to a Firefox-specific codepath
            self._install_fonts()
        else:
            cmd += ["--install-fonts"]

        for test_type in test_types:
            cmd.append("--test-type=%s" % test_type)

        if c["extra_prefs"]:
            cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]])

        if not c["e10s"]:
            cmd.append("--disable-e10s")

        if c["enable_webrender"]:
            cmd.append("--enable-webrender")

        if c["skip_timeout"]:
            cmd.append("--skip-timeout")

        for implementation_status in c["skip_implementation_status"]:
            cmd.append("--skip-implementation-status=%s" % implementation_status)

        # Bug 1643177 - reduce timeout multiplier for web-platform-tests backlog
        if c["backlog"]:
            cmd.append("--timeout-multiplier=0.25")

        test_paths = set()
        if not (self.verify_enabled or self.per_test_coverage):
            mozharness_test_paths = json.loads(
                os.environ.get("MOZHARNESS_TEST_PATHS", '""')
            )
            if mozharness_test_paths:
                path = os.path.join(dirs["abs_fetches_dir"], "wpt_tests_by_group.json")

                if not os.path.exists(path):
                    self.critical("Unable to locate web-platform-test groups file.")

                cmd.append("--test-groups={}".format(path))

                for key in mozharness_test_paths.keys():
                    paths = mozharness_test_paths.get(key, [])
                    for path in paths:
                        if not path.startswith("/"):
                            # Assume this is a filesystem path rather than a test id
                            path = os.path.relpath(path, "testing/web-platform")
                            if ".." in path:
                                self.fatal("Invalid WPT path: {}".format(path))
                            path = os.path.join(dirs["abs_wpttest_dir"], path)
                        test_paths.add(path)
            else:
                # As per WPT harness, the --run-by-dir flag is incompatible with
                # the --test-groups flag.
                cmd.append("--run-by-dir=%i" % (3 if not mozinfo.info["asan"] else 0))
                for opt in ["total_chunks", "this_chunk"]:
                    val = c.get(opt)
                    if val:
                        cmd.append("--%s=%s" % (opt.replace("_", "-"), val))

        options = list(c.get("options", []))

        if "wdspec" in test_types:
            geckodriver_path = self._query_geckodriver()
            if not geckodriver_path or not os.path.isfile(geckodriver_path):
                self.fatal(
                    "Unable to find geckodriver binary "
                    "in common test package: %s" % str(geckodriver_path)
                )
            cmd.append("--webdriver-binary=%s" % geckodriver_path)
            cmd.append("--webdriver-arg=-vv")  # enable trace logs

        test_type_suite = {
            "testharness": "web-platform-tests",
            "crashtest": "web-platform-tests-crashtest",
            "print-reftest": "web-platform-tests-print-reftest",
            "reftest": "web-platform-tests-reftest",
            "wdspec": "web-platform-tests-wdspec",
        }
        for test_type in test_types:
            try_options, try_tests = self.try_args(test_type_suite[test_type])

            cmd.extend(
                self.query_options(
                    options, try_options, str_format_values=str_format_values
                )
            )
            cmd.extend(
                self.query_tests_args(try_tests, str_format_values=str_format_values)
            )
        if "include" in c and c["include"]:
            cmd.append("--include=%s" % c["include"])

        cmd.extend(test_paths)

        return cmd
Exemplo n.º 40
0
    def find_modified_tests(self):
        """
        For each file modified on this push, determine if the modified file
        is a test, by searching test manifests. Populate self.suites
        with test files, organized by suite.

        This depends on test manifests, so can only run after test zips have
        been downloaded and extracted.
        """
        repository = os.environ.get("GECKO_HEAD_REPOSITORY")
        revision = os.environ.get("GECKO_HEAD_REV")
        if not repository or not revision:
            self.warning(
                "unable to run tests in per-test mode: no repo or revision!")
            self.suites = {}
            self.tests_downloaded = True
            return

        def get_automationrelevance():
            response = self.load_json_url(url)
            return response

        dirs = self.query_abs_dirs()
        mozinfo.find_and_update_from_json(dirs["abs_test_install_dir"])
        e10s = self.config.get("e10s", False)
        mozinfo.update({"e10s": e10s})
        is_fission = "fission.autostart=true" in self.config.get(
            "extra_prefs", [])
        mozinfo.update({"fission": is_fission})
        headless = self.config.get("headless", False)
        mozinfo.update({"headless": headless})
        if mozinfo.info["buildapp"] == "mobile/android":
            # extra android mozinfo normally comes from device queries, but this
            # code may run before the device is ready, so rely on configuration
            mozinfo.update({
                "android_version":
                str(self.config.get("android_version", 24))
            })
            mozinfo.update({"is_fennec": self.config.get("is_fennec", False)})
            mozinfo.update(
                {"is_emulator": self.config.get("is_emulator", True)})
        mozinfo.update({"verify": True})
        self.info("Per-test run using mozinfo: %s" % str(mozinfo.info))

        # determine which files were changed on this push
        changed_files = set()
        url = "%s/json-automationrelevance/%s" % (repository.rstrip("/"),
                                                  revision)
        contents = self.retry(get_automationrelevance,
                              attempts=2,
                              sleeptime=10)
        for c in contents["changesets"]:
            self.info(" {cset} {desc}".format(
                cset=c["node"][0:12],
                desc=c["desc"].splitlines()[0].encode("ascii", "ignore"),
            ))
            changed_files |= set(c["files"])
        changed_files = list(changed_files)

        # check specified test paths, as from 'mach try ... <path>'
        if os.environ.get("MOZHARNESS_TEST_PATHS", None) is not None:
            suite_to_paths = json.loads(os.environ["MOZHARNESS_TEST_PATHS"])
            specified_paths = itertools.chain.from_iterable(
                suite_to_paths.values())
            specified_paths = list(specified_paths)
            # filter the list of changed files to those found under the
            # specified path(s)
            changed_and_specified = set()
            for changed in changed_files:
                for specified in specified_paths:
                    if changed.startswith(specified):
                        changed_and_specified.add(changed)
                        break
            if changed_and_specified:
                changed_files = changed_and_specified
            else:
                # if specified paths do not match changed files, assume the
                # specified paths are explicitly requested tests
                changed_files = set()
                changed_files.update(specified_paths)
            self.info(
                "Per-test run found explicit request in MOZHARNESS_TEST_PATHS:"
            )
            self.info(str(changed_files))

        if self.config.get("per_test_category") == "web-platform":
            self._find_wpt_tests(dirs, changed_files)
        elif self.config.get("gpu_required", False) is not False:
            self._find_misc_tests(dirs, changed_files, gpu=True)
        else:
            self._find_misc_tests(dirs, changed_files)

        # per test mode run specific tests from any given test suite
        # _find_*_tests organizes tests to run into suites so we can
        # run each suite at a time

        # chunk files
        total_tests = sum([len(self.suites[x]) for x in self.suites])

        if total_tests == 0:
            self.warning("No tests to verify.")
            self.suites = {}
            self.tests_downloaded = True
            return

        files_per_chunk = total_tests / float(
            self.config.get("total_chunks", 1))
        files_per_chunk = int(math.ceil(files_per_chunk))

        chunk_number = int(self.config.get("this_chunk", 1))
        suites = {}
        start = (chunk_number - 1) * files_per_chunk
        end = chunk_number * files_per_chunk
        current = -1
        for suite in self.suites:
            for test in self.suites[suite]:
                current += 1
                if current >= start and current < end:
                    if suite not in suites:
                        suites[suite] = []
                    suites[suite].append(test)
            if current >= end:
                break

        self.suites = suites
        self.tests_downloaded = True
Exemplo n.º 41
0
    def find_modified_tests(self):
        """
           For each file modified on this push, determine if the modified file
           is a test, by searching test manifests. Populate self.suites
           with test files, organized by suite.

           This depends on test manifests, so can only run after test zips have
           been downloaded and extracted.
        """
        repository = os.environ.get("GECKO_HEAD_REPOSITORY")
        revision = os.environ.get("GECKO_HEAD_REV")
        if not repository or not revision:
            self.warning(
                "unable to run tests in per-test mode: no repo or revision!")
            return []

        def get_automationrelevance():
            response = self.load_json_url(url)
            return response

        dirs = self.query_abs_dirs()
        mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])
        e10s = self.config.get('e10s', False)
        mozinfo.update({"e10s": e10s})
        headless = self.config.get('headless', False)
        mozinfo.update({"headless": headless})
        if mozinfo.info['buildapp'] == 'mobile/android':
            # extra android mozinfo normally comes from device queries, but this
            # code may run before the device is ready, so rely on configuration
            mozinfo.update(
                {'android_version': self.config.get('android_version', 18)})
            mozinfo.update({'is_fennec': self.config.get('is_fennec', True)})
            mozinfo.update(
                {'is_emulator': self.config.get('is_emulator', True)})
        mozinfo.update({'verify': True})
        self.info("Per-test run using mozinfo: %s" % str(mozinfo.info))

        changed_files = set()
        if os.environ.get('MOZHARNESS_TEST_PATHS', None) is not None:
            suite_to_paths = json.loads(os.environ['MOZHARNESS_TEST_PATHS'])
            specified_files = itertools.chain.from_iterable(
                suite_to_paths.values())
            changed_files.update(specified_files)
            self.info(
                "Per-test run found explicit request in MOZHARNESS_TEST_PATHS:"
            )
            self.info(str(changed_files))
        else:
            # determine which files were changed on this push
            url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'),
                                                      revision)
            contents = self.retry(get_automationrelevance,
                                  attempts=2,
                                  sleeptime=10)
            for c in contents['changesets']:
                self.info(" {cset} {desc}".format(
                    cset=c['node'][0:12],
                    desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
                changed_files |= set(c['files'])

        if self.config.get('per_test_category') == "web-platform":
            self._find_wpt_tests(dirs, changed_files)
        elif self.config.get('gpu_required', False) is not False:
            self._find_misc_tests(dirs, changed_files, gpu=True)
        else:
            self._find_misc_tests(dirs, changed_files)

        # per test mode run specific tests from any given test suite
        # _find_*_tests organizes tests to run into suites so we can
        # run each suite at a time

        # chunk files
        total_tests = sum([len(self.suites[x]) for x in self.suites])

        files_per_chunk = total_tests / float(
            self.config.get('total_chunks', 1))
        files_per_chunk = int(math.ceil(files_per_chunk))

        chunk_number = int(self.config.get('this_chunk', 1))
        suites = {}
        start = (chunk_number - 1) * files_per_chunk
        end = (chunk_number * files_per_chunk)
        current = -1
        for suite in self.suites:
            for test in self.suites[suite]:
                current += 1
                if current >= start and current < end:
                    if suite not in suites:
                        suites[suite] = []
                    suites[suite].append(test)
            if current >= end:
                break

        self.suites = suites
        self.tests_downloaded = True
Exemplo n.º 42
0
    def _query_cmd(self, test_types):
        if not self.binary_path:
            self.fatal("Binary path could not be determined")
            # And exit

        c = self.config
        dirs = self.query_abs_dirs()
        abs_app_dir = self.query_abs_app_dir()
        run_file_name = "runtests.py"

        cmd = [self.query_python_path('python'), '-u']
        cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))

        # Make sure that the logging directory exists
        if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
            self.fatal("Could not create blobber upload directory")
            # Exit

        mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])

        cmd += ["--log-raw=-",
                "--log-raw=%s" % os.path.join(dirs["abs_blob_upload_dir"],
                                              "wpt_raw.log"),
                "--log-wptreport=%s" % os.path.join(dirs["abs_blob_upload_dir"],
                                                    "wptreport.json"),
                "--log-errorsummary=%s" % os.path.join(dirs["abs_blob_upload_dir"],
                                                       "wpt_errorsummary.log"),
                "--binary=%s" % self.binary_path,
                "--symbols-path=%s" % self.query_symbols_url(),
                "--stackwalk-binary=%s" % self.query_minidump_stackwalk(),
                "--stackfix-dir=%s" % os.path.join(dirs["abs_test_install_dir"], "bin"),
                "--run-by-dir=%i" % (3 if not mozinfo.info["asan"] else 0),
                "--no-pause-after-test"]

        if not sys.platform.startswith("linux"):
            cmd += ["--exclude=css"]

        for test_type in test_types:
            cmd.append("--test-type=%s" % test_type)

        if not c["e10s"]:
            cmd.append("--disable-e10s")

        if c["single_stylo_traversal"]:
            cmd.append("--stylo-threads=1")
        else:
            cmd.append("--stylo-threads=4")

        if not (self.verify_enabled or self.per_test_coverage):
            if os.environ.get('MOZHARNESS_TEST_PATHS'):
                prefix = 'testing/web-platform'
                paths = os.environ['MOZHARNESS_TEST_PATHS'].split(':')
                paths = [os.path.join(dirs["abs_wpttest_dir"], os.path.relpath(p, prefix))
                         for p in paths if p.startswith(prefix)]
                cmd.extend(paths)
            else:
                for opt in ["total_chunks", "this_chunk"]:
                    val = c.get(opt)
                    if val:
                        cmd.append("--%s=%s" % (opt.replace("_", "-"), val))

        if "wdspec" in test_types:
            geckodriver_path = self._query_geckodriver()
            if not geckodriver_path or not os.path.isfile(geckodriver_path):
                self.fatal("Unable to find geckodriver binary "
                           "in common test package: %s" % str(geckodriver_path))
            cmd.append("--webdriver-binary=%s" % geckodriver_path)
            cmd.append("--webdriver-arg=-vv")  # enable trace logs

        options = list(c.get("options", []))

        str_format_values = {
            'binary_path': self.binary_path,
            'test_path': dirs["abs_wpttest_dir"],
            'test_install_path': dirs["abs_test_install_dir"],
            'abs_app_dir': abs_app_dir,
            'abs_work_dir': dirs["abs_work_dir"]
        }

        test_type_suite = {
            "testharness": "web-platform-tests",
            "reftest": "web-platform-tests-reftests",
            "wdspec": "web-platform-tests-wdspec",
        }
        for test_type in test_types:
            try_options, try_tests = self.try_args(test_type_suite[test_type])

            cmd.extend(self.query_options(options,
                                          try_options,
                                          str_format_values=str_format_values))
            cmd.extend(self.query_tests_args(try_tests,
                                             str_format_values=str_format_values))

        return cmd
Exemplo n.º 43
0
def main():
    parser = ArgumentParser()
    parser.add_argument('output_json', help='Output JSON file')
    parser.add_argument('buildhub_json', help='Output buildhub JSON file')
    parser.add_argument('output_txt', help='Output text file')
    # TODO: Move package-name.mk variables into moz.configure.
    parser.add_argument('pkg_platform', help='Package platform identifier')
    parser.add_argument('--package', help='Path to application package file')
    parser.add_argument('--installer',
                        help='Path to application installer file')
    args = parser.parse_args()
    mozinfo.find_and_update_from_json()

    important_substitutions = [
        'target_alias',
        'target_cpu',
        'target_os',
        'target_vendor',
        'host_alias',
        'host_cpu',
        'host_os',
        'host_vendor',
        'MOZ_UPDATE_CHANNEL',
        'MOZ_APP_VENDOR',
        'MOZ_APP_NAME',
        'MOZ_APP_VERSION',
        'MOZ_APP_MAXVERSION',
        'MOZ_APP_ID',
        'CC',
        'CXX',
        'AS',
        'MOZ_SOURCE_REPO',
    ]

    all_key_value_pairs = {
        x.lower(): buildconfig.substs[x]
        for x in important_substitutions
    }
    build_id = os.environ['MOZ_BUILD_DATE']
    all_key_value_pairs.update({
        'buildid':
        build_id,
        'moz_source_stamp':
        buildconfig.substs['MOZ_SOURCE_CHANGESET'],
        'moz_pkg_platform':
        args.pkg_platform,
    })

    with open(args.output_json, 'wb') as f:
        json.dump(all_key_value_pairs, f, indent=2, sort_keys=True)
        f.write('\n')

    with open(args.buildhub_json, 'wb') as f:
        if args.installer and os.path.exists(args.installer):
            package = args.installer
        else:
            package = args.package
        build_time = datetime.datetime.strptime(build_id, '%Y%m%d%H%M%S')
        st = os.stat(package)
        mtime = datetime.datetime.fromtimestamp(st.st_mtime)
        s = buildconfig.substs
        record = {
            'build': {
                'id': build_id,
                'date': build_time.isoformat() + 'Z',
                'as': s['AS'],
                'cc': s['CC'],
                'cxx': s['CXX'],
                'host': s['host_alias'],
                'target': s['target_alias'],
            },
            'source': {
                'product': s['MOZ_APP_NAME'],
                'repository': s['MOZ_SOURCE_REPO'],
                'tree': os.environ['MH_BRANCH'],
                'revision': s['MOZ_SOURCE_CHANGESET'],
            },
            'target': {
                'platform': args.pkg_platform,
                'os': mozinfo.info['os'],
                # This would be easier if the locale was specified at configure time.
                'locale': os.environ.get('AB_CD', 'en-US'),
                'version': s['MOZ_APP_VERSION_DISPLAY']
                or s['MOZ_APP_VERSION'],
                'channel': s['MOZ_UPDATE_CHANNEL'],
            },
            'download': {
                # The release pipeline will update these keys.
                'url': os.path.basename(package),
                'mimetype': 'application/octet-stream',
                'date': mtime.isoformat() + 'Z',
                'size': st.st_size,
            }
        }
        json.dump(record, f, indent=2, sort_keys=True)
        f.write('\n')

    with open(args.output_txt, 'wb') as f:
        f.write('buildID={}\n'.format(build_id))