Exemplo n.º 1
0
def buildCommandLine(test):
    """build firefox command line options for tp tests"""

    # sanity check pageloader values
    # mandatory options: tpmanifest, tpcycles
    if test['tpcycles'] not in range(1, 1000):
        raise TalosError('pageloader cycles must be int 1 to 1,000')
    if test.get('tpdelay') and test['tpdelay'] not in range(1, 10000):
        raise TalosError('pageloader delay must be int 1 to 10,000')
    if 'tpmanifest' not in test:
        raise TalosError("tpmanifest not found in test: %s" % test)

    # build pageloader command from options
    url = ['-tp', test['tpmanifest']]
    CLI_bool_options = [
        'tpchrome', 'tpmozafterpaint', 'tpdisable_e10s', 'tpnoisy', 'rss',
        'tprender', 'tploadnocache', 'tpscrolltest'
    ]
    CLI_options = ['tpcycles', 'tppagecycles', 'tpdelay', 'tptimeout']
    for key in CLI_bool_options:
        if test.get(key):
            url.append('-%s' % key)
    for key in CLI_options:
        value = test.get(key)
        if value:
            url.extend(['-%s' % key, str(value)])

    # XXX we should actually return the list but since we abuse
    # the url as a command line flag to pass to firefox all over the place
    # will just make a string for now
    return ' '.join(url)
Exemplo n.º 2
0
def set_tp_preferences(test, browser_config):
    # sanity check pageloader values
    # mandatory options: tpmanifest, tpcycles
    if test['tpcycles'] not in range(1, 1000):
        raise TalosError('pageloader cycles must be int 1 to 1,000')
    if 'tpmanifest' not in test:
        raise TalosError("tpmanifest not found in test: %s" % test)

    # if profiling is on, override tppagecycles to prevent test hanging
    if test['gecko_profile']:
        LOG.info("Gecko profiling is enabled so talos is reducing the number "
                 "of cycles, please disregard reported numbers")
        for cycle_var in ['tppagecycles', 'tpcycles', 'cycles']:
            if test[cycle_var] > 2:
                test[cycle_var] = 2

    CLI_bool_options = ['tpchrome', 'tpmozafterpaint', 'tploadnocache', 'tpscrolltest', 'fnbpaint']
    CLI_options = ['tpcycles', 'tppagecycles', 'tptimeout', 'tpmanifest']
    for key in CLI_bool_options:
        if key in test:
            _pref_name = "talos.%s" % key
            test['preferences'][_pref_name] = test.get(key)

    for key in CLI_options:
        value = test.get(key)
        if value:
            _pref_name = "talos.%s" % key
            test['preferences'][_pref_name] = value
Exemplo n.º 3
0
    def _run_profile(self):
        command_args = utils.GenerateBrowserCommandLine(
            self.browser_config["browser_path"],
            self.browser_config["extra_args"], self.profile_dir,
            self.browser_config["init_url"])

        def browser_log(line):
            LOG.process_output(browser.pid, line)

        browser = ProcessHandler(command_args,
                                 env=self.env,
                                 processOutputLine=browser_log)
        browser.run()
        LOG.process_start(browser.pid, ' '.join(command_args))
        try:
            exit_code = browser.wait()
        except KeyboardInterrupt:
            browser.kill()
            raise

        LOG.process_exit(browser.pid, exit_code)
        results_raw = '\n'.join(browser.output)

        if not self.PROFILE_REGEX.search(results_raw):
            LOG.info("Could not find %s in browser output" %
                     self.PROFILE_REGEX.pattern)
            LOG.info("Raw results:%s" % results_raw)
            raise TalosError("browser failed to close after being initialized")
Exemplo n.º 4
0
    def _updateCounterPathsForChildProcesses(self, counter):
        # Create a counter path for each instance of the child process that
        # is running.  If any of these paths are not in our counter list,
        # add them to our counter query and append them to the counter list,
        # so that we'll begin tracking their statistics.  We don't need to
        # worry about removing invalid paths from the list, as
        # getCounterValue() will generate a value of 0 for those.
        hq = self.registeredCounters[counter][0]
        oldCounterListLength = len(self.registeredCounters[counter][1])

        pdh.PdhEnumObjectsA(None, None, 0, 1, 0, True)

        expandedPaths = _getExpandedCounterPaths(self.childProcess, counter)
        if not expandedPaths:
            return
        for expandedPath in expandedPaths:
            alreadyInCounterList = False
            for singleCounter in self.registeredCounters[counter][1]:
                if expandedPath == singleCounter[1]:
                    alreadyInCounterList = True
            if not alreadyInCounterList:
                try:
                    newhc = HANDLE()
                    if pdh.PdhAddCounterA(hq, expandedPath, 0,
                                          byref(newhc)) != 0:
                        raise TalosError(
                            "Could not add expanded win32 counter %s" %
                            expandedPath)
                    self.registeredCounters[counter][1].append(
                        (newhc, expandedPath))
                except Exception:
                    continue

        if oldCounterListLength != len(self.registeredCounters[counter][1]):
            pdh.PdhCollectQueryData(hq)
Exemplo n.º 5
0
    def _run_profile(self):
        runner_cls = mozrunner.runners.get(
            mozinfo.info.get(
                'appname',
                'firefox'),
            mozrunner.Runner)
        args = [self.browser_config["extra_args"], self.browser_config["init_url"]]
        runner = runner_cls(profile=self.profile_dir,
                            binary=self.browser_config["browser_path"],
                            cmdargs=args,
                            env=self.env,
                            process_class=ProcessHandlerMixin,
                            process_args={})

        runner.start(outputTimeout=30)
        proc = runner.process_handler
        LOG.process_start(proc.pid, "%s %s" % (self.browser_config["browser_path"],
                                               ' '.join(args)))

        try:
            exit_code = proc.wait()
        except Exception:
            proc.kill()
            raise TalosError("Browser Failed to close properly during warmup")

        LOG.process_exit(proc.pid, exit_code)
Exemplo n.º 6
0
    def _run_profile(self):
        command_args = utils.GenerateBrowserCommandLine(
            self.browser_config["browser_path"],
            self.browser_config["extra_args"], self.profile_dir,
            self.browser_config["init_url"])

        def browser_log(line):
            logging.debug('BROWSER_OUTPUT: %s', line)

        browser = ProcessHandler(command_args,
                                 env=self.env,
                                 processOutputLine=browser_log)
        browser.run()
        try:
            browser.wait()
        except KeyboardInterrupt:
            browser.kill()
            raise

        results_raw = '\n'.join(browser.output)

        if not self.PROFILE_REGEX.search(results_raw):
            logging.info("Could not find %s in browser output",
                         self.PROFILE_REGEX.pattern)
            logging.info("Raw results:%s", results_raw)
            raise TalosError("browser failed to close after being initialized")
Exemplo n.º 7
0
def set_tp_preferences(test, browser_config):
    # sanity check pageloader values
    # mandatory options: tpmanifest, tpcycles
    if test["tpcycles"] not in six.moves.range(1, 1000):
        raise TalosError("pageloader cycles must be int 1 to 1,000")
    if "tpmanifest" not in test:
        raise TalosError("tpmanifest not found in test: %s" % test)

    # if profiling is on, override tppagecycles to prevent test hanging
    if test["gecko_profile"]:
        LOG.info(
            "Gecko profiling is enabled so talos is reducing the number "
            "of cycles, please disregard reported numbers"
        )
        for cycle_var in ["tppagecycles", "tpcycles", "cycles"]:
            if test[cycle_var] > 2:
                test[cycle_var] = 2

    CLI_bool_options = [
        "tpchrome",
        "tphero",
        "tpmozafterpaint",
        "tploadnocache",
        "tpscrolltest",
        "fnbpaint",
        "pdfpaint",
        "a11y",
    ]
    CLI_options = ["tpcycles", "tppagecycles", "tptimeout", "tpmanifest"]
    for key in CLI_bool_options:
        _pref_name = "talos.%s" % key
        if key in test:
            test["preferences"][_pref_name] = test.get(key)
        else:
            # current test doesn't use this setting, remove it from our prefs
            if _pref_name in test["preferences"]:
                del test["preferences"][_pref_name]

    for key in CLI_options:
        value = test.get(key)
        _pref_name = "talos.%s" % key
        if value:
            test["preferences"][_pref_name] = value
        else:
            # current test doesn't use this setting, remove it from our prefs
            if _pref_name in test["preferences"]:
                del test["preferences"][_pref_name]
Exemplo n.º 8
0
    def _addCounter(self, processName, counterType, counterName):
        pCounterPathElements = _PDH_COUNTER_PATH_ELEMENTS_A(
            LPSTR(None),
            LPSTR(counterType),
            LPSTR(processName),
            LPSTR(None),
            DWORD(-1),
            LPSTR(counterName),
        )

        pcchbufferSize = DWORD(0)

        # First run we just try to get the buffer size so we can allocate a
        # string big enough to fill it
        if (pdh.PdhMakeCounterPathA(
                pointer(pCounterPathElements),
                LPCSTR(0),
                pointer(pcchbufferSize),
                DWORD(0),
        ) != _PDH_MORE_DATA):
            raise TalosError(
                "Could not create counter path for counter %s for %s" %
                (counterName, processName))

        szFullPathBuffer = LPCSTR("\0" * pcchbufferSize.value)
        # Then we run to get the actual value
        if (pdh.PdhMakeCounterPathA(
                pointer(pCounterPathElements),
                szFullPathBuffer,
                pointer(pcchbufferSize),
                DWORD(0),
        ) != 0):
            raise TalosError(
                "Could not create counter path for counter %s for %s" %
                (counterName, processName))

        path = szFullPathBuffer.value

        hq = HANDLE()
        if pdh.PdhOpenQuery(None, None, byref(hq)) != 0:
            raise TalosError("Could not open win32 counter query")

        hc = HANDLE()
        if pdh.PdhAddCounterA(hq, path, 0, byref(hc)) != 0:
            raise TalosError("Could not add win32 counter %s" % path)

        self.registeredCounters[counterName] = [hq, [(hc, path)]]
Exemplo n.º 9
0
def buildCommandLine(test):
    """build firefox command line options for tp tests"""

    # sanity check pageloader values
    # mandatory options: tpmanifest, tpcycles
    if test['tpcycles'] not in range(1, 1000):
        raise TalosError('pageloader cycles must be int 1 to 1,000')
    if test.get('tpdelay') and test['tpdelay'] not in range(1, 10000):
        raise TalosError('pageloader delay must be int 1 to 10,000')
    if 'tpmanifest' not in test:
        raise TalosError("tpmanifest not found in test: %s" % test)

    # if profiling is on, override tppagecycles to prevent test hanging
    if test['gecko_profile']:
        LOG.info("Gecko profiling is enabled so talos is reducing the number "
                 "of cycles, please disregard reported numbers")
        for cycle_var in ['tppagecycles', 'tpcycles', 'cycles']:
            if test[cycle_var] > 2:
                test[cycle_var] = 2

    # build pageloader command from options
    url = ['-tp', test['tpmanifest']]
    CLI_bool_options = [
        'tpchrome', 'tpmozafterpaint', 'tpdisable_e10s', 'tpnoisy', 'rss',
        'tprender', 'tploadnocache', 'tpscrolltest', 'fnbpaint'
    ]
    CLI_options = ['tpcycles', 'tppagecycles', 'tpdelay', 'tptimeout']
    for key in CLI_bool_options:
        if test.get(key):
            url.append('-%s' % key)

    for key in CLI_options:
        value = test.get(key)
        if value:
            url.extend(['-%s' % key, str(value)])

    # XXX we should actually return the list but since we abuse
    # the url as a command line flag to pass to firefox all over the place
    # will just make a string for now
    return ' '.join(url)
Exemplo n.º 10
0
 def _get_counter_prefix():
     if platform.system() == "Linux":
         return "linux"
     elif platform.system() in ("Windows", "Microsoft"):
         if "6.1" in platform.version():  # w7
             return "w7"
         elif "6.2" in platform.version():  # w8
             return "w8"
         # Bug 1264325 - FIXME: with python 2.7.11: reports win8 instead of 8.1
         elif "6.3" in platform.version():
             return "w8"
         # Bug 1264325 - FIXME: with python 2.7.11: reports win8 instead of 10
         elif "10.0" in platform.version():
             return "w8"
         else:
             raise TalosError("unsupported windows version")
     elif platform.system() == "Darwin":
         return "mac"
Exemplo n.º 11
0
 def _get_counter_prefix():
     if platform.system() == "Linux":
         return 'linux'
     elif platform.system() in ("Windows", "Microsoft"):
         if '6.1' in platform.version():  # w7
             return 'w7'
         elif '6.2' in platform.version():  # w8
             return 'w8'
         # Bug 1264325 - FIXME: with python 2.7.11: reports win8 instead of 8.1
         elif '6.3' in platform.version():
             return 'w8'
         # Bug 1264325 - FIXME: with python 2.7.11: reports win8 instead of 10
         elif '10.0' in platform.version():
             return 'w8'
         else:
             raise TalosError('unsupported windows version')
     elif platform.system() == "Darwin":
         return 'mac'
Exemplo n.º 12
0
def find_debugger_info(debug, debugger, debugger_args):
    debuggerInfo = None
    if debug or debugger or debugger_args:
        import mozdebug

        if not debugger:
            # No debugger name was provided. Look for the default ones on
            # current OS.
            debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking)

        debuggerInfo = None
        if debugger:
            debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args)

        if debuggerInfo is None:
            raise TalosError('Could not find a suitable debugger in your PATH.')

    return debuggerInfo
Exemplo n.º 13
0
def run_browser(command,
                minidump_dir,
                timeout=None,
                on_started=None,
                debug=None,
                debugger=None,
                debugger_args=None,
                **kwargs):
    """
    Run the browser using the given `command`.

    After the browser prints __endTimestamp, we give it 5
    seconds to quit and kill it if it's still alive at that point.

    Note that this method ensure that the process is killed at
    the end. If this is not possible, an exception will be raised.

    :param command: the commad (as a string list) to run the browser
    :param minidump_dir: a path where to extract minidumps in case the
                         browser hang. This have to be the same value
                         used in `mozcrash.check_for_crashes`.
    :param timeout: if specified, timeout to wait for the browser before
                    we raise a :class:`TalosError`
    :param on_started: a callback that can be used to do things just after
                       the browser has been started. The callback must takes
                       an argument, which is the psutil.Process instance
    :param kwargs: additional keyword arguments for the :class:`ProcessHandler`
                   instance

    Returns a ProcessContext instance, with available output and pid used.
    """

    debugger_info = find_debugger_info(debug, debugger, debugger_args)
    if debugger_info is not None:
        return run_in_debug_mode(command,
                                 debugger_info,
                                 on_started=on_started,
                                 env=kwargs.get('env'))

    is_launcher = sys.platform.startswith(
        'win') and '-wait-for-browser' in command
    context = ProcessContext(is_launcher)
    first_time = int(time.time()) * 1000
    wait_for_quit_timeout = 20
    event = Event()
    reader = Reader(event)

    LOG.info("Using env: %s" % pprint.pformat(kwargs['env']))

    kwargs['storeOutput'] = False
    kwargs['processOutputLine'] = reader
    kwargs['onFinish'] = event.set
    proc = ProcessHandler(command, **kwargs)
    reader.proc = proc
    proc.run()

    LOG.process_start(proc.pid, ' '.join(command))
    try:
        context.process = psutil.Process(proc.pid)
        if on_started:
            on_started(context.process)
        # wait until we saw __endTimestamp in the proc output,
        # or the browser just terminated - or we have a timeout
        if not event.wait(timeout):
            LOG.info("Timeout waiting for test completion; killing browser...")
            # try to extract the minidump stack if the browser hangs
            kill_and_get_minidump(context, minidump_dir)
            raise TalosError("timeout")
        if reader.got_end_timestamp:
            for i in range(1, wait_for_quit_timeout):
                if proc.wait(1) is not None:
                    break
            if proc.poll() is None:
                LOG.info(
                    "Browser shutdown timed out after {0} seconds, killing"
                    " process.".format(wait_for_quit_timeout))
                kill_and_get_minidump(context, minidump_dir)
                raise TalosError(
                    "Browser shutdown timed out after {0} seconds, killed"
                    " process.".format(wait_for_quit_timeout))
        elif reader.got_timeout:
            raise TalosError('TIMEOUT: %s' % reader.timeout_message)
        elif reader.got_error:
            raise TalosError("unexpected error")
    finally:
        # this also handle KeyboardInterrupt
        # ensure early the process is really terminated
        return_code = None
        try:
            return_code = context.kill_process()
            if return_code is None:
                return_code = proc.wait(1)
        except Exception:
            # Maybe killed by kill_and_get_minidump(), maybe ended?
            LOG.info("Unable to kill process")
            LOG.info(traceback.format_exc())

    reader.output.append(
        "__startBeforeLaunchTimestamp%d__endBeforeLaunchTimestamp" %
        first_time)
    reader.output.append(
        "__startAfterTerminationTimestamp%d__endAfterTerminationTimestamp" %
        (int(time.time()) * 1000))

    if return_code is not None:
        LOG.process_exit(proc.pid, return_code)
    else:
        LOG.debug("Unable to detect exit code of the process %s." % proc.pid)
    context.output = reader.output
    return context
Exemplo n.º 14
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report."""
    # get the test data
    tests = config["tests"]
    tests = useBaseTestDefaults(config.get("basetest", {}), tests)
    paths = ["profile_path", "tpmanifest", "extensions", "setup", "cleanup"]

    for test in tests:
        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                if path == "extensions":
                    for _index, _ext in enumerate(test["extensions"]):
                        test["extensions"][_index] = utils.interpolate(_ext)
                else:
                    test[path] = utils.interpolate(test[path])
        if test.get("tpmanifest"):
            test["tpmanifest"] = os.path.normpath(
                "file:/%s" %
                (six.moves.urllib.parse.quote(test["tpmanifest"], "/\\t:\\")))
            test["preferences"]["talos.tpmanifest"] = test["tpmanifest"]

        # if using firstNonBlankPaint, set test preference for it
        # so that the browser pref will be turned on (in ffsetup)
        if test.get("fnbpaint", False):
            LOG.info(
                "Test is using firstNonBlankPaint, browser pref will be turned on"
            )
            test["preferences"][
                "dom.performance.time_to_non_blank_paint.enabled"] = True

        test["setup"] = utils.interpolate(test["setup"])
        test["cleanup"] = utils.interpolate(test["cleanup"])

        if not test.get("profile", False):
            test["profile"] = config.get("profile")

    if mozinfo.os == "win":
        browser_config["extra_args"] = ["-wait-for-browser", "-no-deelevate"]
    else:
        browser_config["extra_args"] = []

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config["develop"]:
        browser_config["extra_args"].append("--no-remote")

    # Pass subtests filter argument via a preference
    if browser_config["subtests"]:
        browser_config["preferences"]["talos.subtests"] = browser_config[
            "subtests"]

    if browser_config.get("enable_fission", False):
        browser_config["preferences"]["fission.autostart"] = True
        browser_config["preferences"][
            "dom.serviceWorkers.parent_intercept"] = True

    browser_config["preferences"]["network.proxy.type"] = 2
    browser_config["preferences"]["network.proxy.autoconfig_url"] = (
        """data:text/plain,
function FindProxyForURL(url, host) {
  if (url.startsWith('http')) {
   return 'PROXY %s';
  }
  return 'DIRECT';
}""" % browser_config["webserver"])

    # If --code-coverage files are expected, set flag in browser config so ffsetup knows
    # that it needs to delete any ccov files resulting from browser initialization
    # NOTE: This is only supported in production; local setup of ccov folders and
    # data collection not supported yet, so if attempting to run with --code-coverage
    # flag locally, that is not supported yet
    if config.get("code_coverage", False):
        if browser_config["develop"]:
            raise TalosError("Aborting: talos --code-coverage flag is only "
                             "supported in production")
        else:
            browser_config["code_coverage"] = True

    # set defaults
    testdate = config.get("testdate", "")

    # get the process name from the path to the browser
    if not browser_config["process"]:
        browser_config["process"] = os.path.basename(
            browser_config["browser_path"])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config["extensions"] = [
        utils.interpolate(i) for i in browser_config["extensions"]
    ]
    browser_config["bcontroller_config"] = utils.interpolate(
        browser_config["bcontroller_config"])

    # normalize browser path to work across platforms
    browser_config["browser_path"] = os.path.normpath(
        browser_config["browser_path"])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config["browser_name"] = version_info["application_name"]
    browser_config["browser_version"] = version_info["application_version"]
    browser_config["buildid"] = version_info["application_buildid"]
    try:
        browser_config["repository"] = version_info["application_repository"]
        browser_config["sourcestamp"] = version_info["application_changeset"]
    except KeyError:
        if not browser_config["develop"]:
            print("Abort: unable to find changeset or repository: %s" %
                  version_info)
            sys.exit(1)
        else:
            browser_config["repository"] = "develop"
            browser_config["sourcestamp"] = "develop"

    # get test date in seconds since epoch
    if testdate:
        date = int(
            time.mktime(time.strptime(testdate, "%a, %d %b %Y %H:%M:%S GMT")))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config["develop"] and not config["gecko_profile"]:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            output_urls=["local.json"], )
    else:
        # local mode, output to files
        results_urls = dict(output_urls=[os.path.abspath("local.json")])

    httpd = setup_webserver(browser_config["webserver"])
    httpd.start()

    # legacy still required for perfherder data
    talos_results.add_extra_option("e10s")
    talos_results.add_extra_option("stylo")

    # measuring the difference of a a certain thread level
    if config.get("stylothreads", 0) > 0:
        talos_results.add_extra_option("%s_thread" % config["stylothreads"])

    if config["gecko_profile"]:
        talos_results.add_extra_option("gecko-profile")

    # differentiate fission vs non-fission results in perfherder
    if browser_config.get("enable_fission", False):
        talos_results.add_extra_option("fission")

    # differentiate webrender from non-webrender results
    if browser_config["preferences"].get("gfx.webrender.software", False):
        talos_results.add_extra_option("webrender-sw")
    elif browser_config.get("enable_webrender", False):
        talos_results.add_extra_option("webrender")

    # differentiate webgl from webgl-ipc results
    if browser_config["preferences"].get("webgl.out-of-process", False):
        talos_results.add_extra_option("webgl-ipc")

    testname = None

    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test["name"] for test in tests])
    try:
        for test in tests:
            testname = test["name"]
            LOG.test_start(testname)

            if not test.get("url"):
                # set browser prefs for pageloader test setings (doesn't use cmd line args / url)
                test["url"] = None
                set_tp_preferences(test, browser_config)

            mytest = TTest()

            # some tests like ts_paint return multiple results in a single iteration
            if test.get("firstpaint", False) or test.get("userready", None):
                # we need a 'testeventmap' to tell us which tests each event should map to
                multi_value_result = None
                separate_results_list = []

                test_event_map = test.get("testeventmap", None)
                if test_event_map is None:
                    raise TalosError("Need 'testeventmap' in test.py for %s" %
                                     test.get("name"))

                # run the test
                multi_value_result = mytest.runTest(browser_config, test)
                if multi_value_result is None:
                    raise TalosError("Abort: no results returned for %s" %
                                     test.get("name"))

                # parse out the multi-value results, and 'fake it' to appear like separate tests
                separate_results_list = convert_to_separate_test_results(
                    multi_value_result, test_event_map)

                # now we have three separate test results, store them
                for test_result in separate_results_list:
                    talos_results.add(test_result)

            # some tests like bloom_basic run two separate tests and then compare those values
            # we want the results in perfherder to only be the actual difference between those
            # and store the base and reference test replicates in results.json for upload
            elif test.get("base_vs_ref", False):
                # run the test, results will be reported for each page like two tests in the suite
                base_and_reference_results = mytest.runTest(
                    browser_config, test)
                # now compare each test, and create a new test object for the comparison
                talos_results.add(
                    make_comparison_result(base_and_reference_results))
            else:
                # just expecting regular test - one result value per iteration
                talos_results.add(mytest.runTest(browser_config, test))
            LOG.test_end(testname, status="OK")

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname,
                     status="FAIL",
                     message=str(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname,
                     status="ERROR",
                     message=str(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # output results
    if results_urls and not browser_config["no_upload_results"]:
        talos_results.output(results_urls)
        if browser_config["develop"] or config["gecko_profile"]:
            print("Thanks for running Talos locally. Results are in %s" %
                  (results_urls["output_urls"]))

    # when running talos locally with gecko profiling on, use the view-gecko-profile
    # tool to automatically load the latest gecko profile in profiler.firefox.com
    if config["gecko_profile"] and browser_config["develop"]:
        if os.environ.get("DISABLE_PROFILE_LAUNCH", "0") == "1":
            LOG.info(
                "Not launching profiler.firefox.com because DISABLE_PROFILE_LAUNCH=1"
            )
        else:
            view_gecko_profile_from_talos()

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0
Exemplo n.º 15
0
    def _runTest(self, browser_config, test_config, setup):
        minidump_dir = os.path.join(setup.profile_dir, 'minidumps')
        counters = test_config.get('%s_counters' % self._get_counter_prefix(),
                                   [])
        resolution = test_config['resolution']

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config['mainthread']:
            mainthread_io = os.path.join(here, "mainthread_io.log")
            setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io

        if browser_config['disable_stylo']:
            if browser_config['stylothreads']:
                raise TalosError(
                    "--disable-stylo conflicts with --stylo-threads")
            if browser_config['enable_stylo']:
                raise TalosError(
                    "--disable-stylo conflicts with --enable-stylo")

        # As we transition to Stylo, we need to set env vars and output data properly
        if browser_config['enable_stylo']:
            setup.env['STYLO_FORCE_ENABLED'] = '1'
        if browser_config['disable_stylo']:
            setup.env['STYLO_FORCE_DISABLED'] = '1'

        # During the Stylo transition, measure different number of threads
        if browser_config.get('stylothreads', 0) > 0:
            setup.env['STYLO_THREADS'] = str(browser_config['stylothreads'])

        test_config['url'] = utils.interpolate(
            test_config['url'],
            profile=setup.profile_dir,
            firefox=browser_config['browser_path'])

        # setup global (cross-cycle) counters:
        # shutdown, responsiveness
        global_counters = {}
        if browser_config.get('xperf_path'):
            for c in test_config.get('xperf_counters', []):
                global_counters[c] = []

        if test_config['shutdown']:
            global_counters['shutdown'] = []
        if test_config.get('responsiveness') and \
           platform.system() != "Darwin":
            # ignore osx for now as per bug 1245793
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10'
            global_counters['responsiveness'] = []

        setup.env['JSGC_DISABLE_POISONING'] = '1'
        setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1'

        # if using mitmproxy we must allow access to 'external' sites
        if browser_config.get('mitmproxy', False):
            LOG.info(
                "Using mitmproxy so setting MOZ_DISABLE_NONLOCAL_CONNECTIONS to 0"
            )
            setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '0'

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters,
                                           browser_config.get('framework'))

        for i in range(test_config['cycles']):
            LOG.info("Running cycle %d/%d for %s test..." %
                     (i + 1, test_config['cycles'], test_config['name']))

            # remove the browser  error file
            mozfile.remove(browser_config['error_filename'])

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get('reinstall', ''):
                for keep in test_config['reinstall']:
                    origin = os.path.join(test_config['profile_path'], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    LOG.debug("Reinstalling %s on top of %s" % (origin, dest))
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get('timeout', 7200)  # 2 hours default
            if setup.gecko_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60

            command_args = utils.GenerateBrowserCommandLine(
                browser_config["browser_path"],
                browser_config["extra_args"],
                setup.profile_dir,
                test_config['url'],
                profiling_info=(setup.gecko_profile.profiling_info
                                if setup.gecko_profile else None))

            mainthread_error_count = 0
            if test_config['setup']:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(['python'] + test_config['setup'].split(), )

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config['process'], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    minidump_dir,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                )
            except:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config['name'])
                raise
            finally:
                if counter_management:
                    counter_management.stop()

            if test_config['mainthread']:
                rawlog = os.path.join(here, "mainthread_io.log")
                if os.path.exists(rawlog):
                    processedlog = \
                        os.path.join(here, 'mainthread_io.json')
                    xre_path = \
                        os.path.dirname(browser_config['browser_path'])
                    mtio_py = os.path.join(here, 'mainthreadio.py')
                    command = [
                        'python', mtio_py, rawlog, processedlog, xre_path
                    ]
                    mtio = subprocess.Popen(command,
                                            env=os.environ.copy(),
                                            stdout=subprocess.PIPE)
                    output, stderr = mtio.communicate()
                    for line in output.split('\n'):
                        if line.strip() == "":
                            continue

                        print(line)
                        mainthread_error_count += 1
                    mozfile.remove(rawlog)

            if test_config['cleanup']:
                # HACK: add the pid to support xperf where we require
                # the pid in post processing
                talosconfig.generateTalosConfig(command_args,
                                                browser_config,
                                                test_config,
                                                pid=pcontext.pid)
                subprocess.call([sys.executable] +
                                test_config['cleanup'].split())

            # For startup tests, we launch the browser multiple times
            # with the same profile
            for fname in ('sessionstore.js', '.parentlock',
                          'sessionstore.bak'):
                mozfile.remove(os.path.join(setup.profile_dir, fname))

            # check for xperf errors
            if os.path.exists(browser_config['error_filename']) or \
               mainthread_error_count > 0:
                raise TalosRegression(
                    "Talos has found a regression, if you have questions"
                    " ask for help in irc on #perf")

            # add the results from the browser output
            test_results.add('\n'.join(pcontext.output),
                             counter_results=(counter_management.results()
                                              if counter_management else None))

            if setup.gecko_profile:
                setup.gecko_profile.symbolicate(i)

            self.check_for_crashes(browser_config, minidump_dir,
                                   test_config['name'])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                LOG.debug("COUNTER %r: %s" % (key, value))

        # return results
        return test_results
Exemplo n.º 16
0
class TTest(object):

    if platform.system() == "Linux":
        platform_type = 'linux_'
    elif platform.system() in ("Windows", "Microsoft"):
        if '5.1' in platform.version():  # winxp
            platform_type = 'win_'
        elif '6.1' in platform.version():  # w7
            platform_type = 'w7_'
        elif '6.2' in platform.version():  # w8
            platform_type = 'w8_'
        else:
            raise TalosError('unsupported windows version')
    elif platform.system() == "Darwin":
        platform_type = 'mac_'

    def check_for_crashes(self, browser_config, profile_dir, test_name):
        # check for minidumps
        minidumpdir = os.path.join(profile_dir, 'minidumps')
        found = mozcrash.check_for_crashes(minidumpdir,
                                           browser_config['symbols_path'],
                                           test_name=test_name)
        mozfile.remove(minidumpdir)

        if found:
            raise TalosCrash("Found crashes after test run, terminating test")

    def runTest(self, browser_config, test_config):
        """
            Runs an url based test on the browser as specified in the
            browser_config dictionary

        Args:
            browser_config:  Dictionary of configuration options for the
                             browser (paths, prefs, etc)
            test_config   :  Dictionary of configuration for the given
                             test (url, cycles, counters, etc)

        """

        logging.debug("operating with platform_type : %s", self.platform_type)
        with FFSetup(browser_config, test_config) as setup:
            return self._runTest(browser_config, test_config, setup)

    def _runTest(self, browser_config, test_config, setup):
        counters = test_config.get(self.platform_type + 'counters', [])
        resolution = test_config['resolution']

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config['mainthread']:
            mainthread_io = os.path.join(here, "mainthread_io.log")
            setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io

        test_config['url'] = utils.interpolate(
            test_config['url'],
            profile=setup.profile_dir,
            firefox=browser_config['browser_path'])

        # setup global (cross-cycle) counters:
        # shutdown, responsiveness
        global_counters = {}
        if browser_config.get('xperf_path'):
            for c in test_config.get('xperf_counters', []):
                global_counters[c] = []

        if test_config['shutdown']:
            global_counters['shutdown'] = []
        if test_config.get('responsiveness') and \
                platform.system() != "Linux":
            # ignore responsiveness tests on linux until we fix
            # Bug 710296
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10'
            global_counters['responsiveness'] = []

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters)

        for i in range(test_config['cycles']):
            logging.info("Running cycle %d/%d for %s test...", i + 1,
                         test_config['cycles'], test_config['name'])

            # remove the browser  error file
            mozfile.remove(browser_config['error_filename'])

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get('reinstall', ''):
                for keep in test_config['reinstall']:
                    origin = os.path.join(test_config['profile_path'], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    logging.debug("Reinstalling %s on top of %s", origin, dest)
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get('timeout', 7200)  # 2 hours default
            if setup.sps_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60

            command_args = utils.GenerateBrowserCommandLine(
                browser_config["browser_path"],
                browser_config["extra_args"],
                setup.profile_dir,
                test_config['url'],
                profiling_info=(setup.sps_profile.profiling_info
                                if setup.sps_profile else None))

            mainthread_error_count = 0
            if test_config['setup']:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(['python'] + test_config['setup'].split(), )

            mm_httpd = None

            if test_config['name'] == 'media_tests':
                from startup_test.media import media_manager
                mm_httpd = media_manager.run_server(
                    os.path.dirname(os.path.realpath(__file__)))

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config['process'], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                )
            finally:
                if counter_management:
                    counter_management.stop()
                if mm_httpd:
                    mm_httpd.stop()

            if test_config['mainthread']:
                rawlog = os.path.join(here, "mainthread_io.log")
                if os.path.exists(rawlog):
                    processedlog = \
                        os.path.join(here, 'mainthread_io.json')
                    xre_path = \
                        os.path.dirname(browser_config['browser_path'])
                    mtio_py = os.path.join(here, 'mainthreadio.py')
                    command = [
                        'python', mtio_py, rawlog, processedlog, xre_path
                    ]
                    mtio = subprocess.Popen(command,
                                            env=os.environ.copy(),
                                            stdout=subprocess.PIPE)
                    output, stderr = mtio.communicate()
                    for line in output.split('\n'):
                        if line.strip() == "":
                            continue

                        print line
                        mainthread_error_count += 1
                    mozfile.remove(rawlog)

            if test_config['cleanup']:
                # HACK: add the pid to support xperf where we require
                # the pid in post processing
                talosconfig.generateTalosConfig(command_args,
                                                browser_config,
                                                test_config,
                                                pid=pcontext.pid)
                subprocess.call([sys.executable] +
                                test_config['cleanup'].split())

            # For startup tests, we launch the browser multiple times
            # with the same profile
            for fname in ('sessionstore.js', '.parentlock',
                          'sessionstore.bak'):
                mozfile.remove(os.path.join(setup.profile_dir, fname))

            # check for xperf errors
            if os.path.exists(browser_config['error_filename']) or \
               mainthread_error_count > 0:
                raise TalosRegression(
                    "Talos has found a regression, if you have questions"
                    " ask for help in irc on #perf")

            # add the results from the browser output
            test_results.add('\n'.join(pcontext.output),
                             counter_results=(counter_management.results()
                                              if counter_management else None))

            if setup.sps_profile:
                setup.sps_profile.symbolicate(i)

            self.check_for_crashes(browser_config, setup.profile_dir,
                                   test_config['name'])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                logging.debug("COUNTER %r: %s", key, value)

        # return results
        return test_results
Exemplo n.º 17
0
def run_tests(config, browser_config):
    """Runs the talos tests on the given configuration and generates a report.
    """
    # get the test data
    tests = config['tests']
    tests = useBaseTestDefaults(config.get('basetest', {}), tests)
    paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup']

    for test in tests:
        # Check for profile_path, tpmanifest and interpolate based on Talos
        # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711
        # Build command line from config
        for path in paths:
            if test.get(path):
                if path == 'extensions':
                    for _index, _ext in enumerate(test['extensions']):
                        test['extensions'][_index] = utils.interpolate(_ext)
                else:
                    test[path] = utils.interpolate(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = \
                os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'],
                                               '/\\t:\\')))
            test['preferences']['talos.tpmanifest'] = test['tpmanifest']

        # if using firstNonBlankPaint, set test preference for it
        # so that the browser pref will be turned on (in ffsetup)
        if test.get('fnbpaint', False):
            LOG.info(
                "Test is using firstNonBlankPaint, browser pref will be turned on"
            )
            test['preferences'][
                'dom.performance.time_to_non_blank_paint.enabled'] = True

        test['setup'] = utils.interpolate(test['setup'])
        test['cleanup'] = utils.interpolate(test['cleanup'])

        if not test.get('profile', False):
            test['profile'] = config.get('profile')

    if mozinfo.os == 'win':
        browser_config['extra_args'] = ['-wait-for-browser', '-no-deelevate']
    else:
        browser_config['extra_args'] = []

    # pass --no-remote to firefox launch, if --develop is specified
    # we do that to allow locally the user to have another running firefox
    # instance
    if browser_config['develop']:
        browser_config['extra_args'].append('--no-remote')

    # Pass subtests filter argument via a preference
    if browser_config['subtests']:
        browser_config['preferences']['talos.subtests'] = browser_config[
            'subtests']

    # If --code-coverage files are expected, set flag in browser config so ffsetup knows
    # that it needs to delete any ccov files resulting from browser initialization
    # NOTE: This is only supported in production; local setup of ccov folders and
    # data collection not supported yet, so if attempting to run with --code-coverage
    # flag locally, that is not supported yet
    if config.get('code_coverage', False):
        if browser_config['develop']:
            raise TalosError('Aborting: talos --code-coverage flag is only '
                             'supported in production')
        else:
            browser_config['code_coverage'] = True

    # set defaults
    testdate = config.get('testdate', '')

    # get the process name from the path to the browser
    if not browser_config['process']:
        browser_config['process'] = \
            os.path.basename(browser_config['browser_path'])

    # fix paths to substitute
    # `os.path.dirname(os.path.abspath(__file__))` for ${talos}
    # https://bugzilla.mozilla.org/show_bug.cgi?id=705809
    browser_config['extensions'] = [
        utils.interpolate(i) for i in browser_config['extensions']
    ]
    browser_config['bcontroller_config'] = \
        utils.interpolate(browser_config['bcontroller_config'])

    # normalize browser path to work across platforms
    browser_config['browser_path'] = \
        os.path.normpath(browser_config['browser_path'])

    binary = browser_config["browser_path"]
    version_info = mozversion.get_version(binary=binary)
    browser_config['browser_name'] = version_info['application_name']
    browser_config['browser_version'] = version_info['application_version']
    browser_config['buildid'] = version_info['application_buildid']
    try:
        browser_config['repository'] = version_info['application_repository']
        browser_config['sourcestamp'] = version_info['application_changeset']
    except KeyError:
        if not browser_config['develop']:
            print("Abort: unable to find changeset or repository: %s" %
                  version_info)
            sys.exit(1)
        else:
            browser_config['repository'] = 'develop'
            browser_config['sourcestamp'] = 'develop'

    # get test date in seconds since epoch
    if testdate:
        date = int(
            time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())
    LOG.debug("using testdate: %d" % date)
    LOG.debug("actual date: %d" % int(time.time()))

    # results container
    talos_results = TalosResults()

    # results links
    if not browser_config['develop'] and not config['gecko_profile']:
        results_urls = dict(
            # another hack; datazilla stands for Perfherder
            # and do not require url, but a non empty dict is required...
            output_urls=['local.json'], )
    else:
        # local mode, output to files
        results_urls = dict(output_urls=[os.path.abspath('local.json')])

    httpd = setup_webserver(browser_config['webserver'])
    httpd.start()

    # legacy still required for perfherder data
    talos_results.add_extra_option('e10s')
    talos_results.add_extra_option('stylo')

    # measuring the difference of a a certain thread level
    if config.get('stylothreads', 0) > 0:
        talos_results.add_extra_option('%s_thread' % config['stylothreads'])

    if config['gecko_profile']:
        talos_results.add_extra_option('geckoProfile')

    # some tests use mitmproxy to playback pages
    mitmproxy_recordings_list = config.get('mitmproxy', False)
    if mitmproxy_recordings_list is not False:
        # needed so can tell talos ttest to allow external connections
        browser_config['mitmproxy'] = True

        # start mitmproxy playback; this also generates the CA certificate
        mitmdump_path = config.get('mitmdumpPath', False)
        if mitmdump_path is False:
            # cannot continue, need path for mitmdump playback tool
            raise TalosError(
                'Aborting: mitmdumpPath not provided on cmd line but is required'
            )

        mitmproxy_recording_path = os.path.join(here, 'mitmproxy')
        mitmproxy_proc = mitmproxy.start_mitmproxy_playback(
            mitmdump_path, mitmproxy_recording_path,
            mitmproxy_recordings_list.split(), browser_config['browser_path'])

        # install the generated CA certificate into Firefox
        # mitmproxy cert setup needs path to mozharness install; mozharness has set this
        mitmproxy.install_mitmproxy_cert(mitmproxy_proc,
                                         browser_config['browser_path'])

    testname = None

    # run the tests
    timer = utils.Timer()
    LOG.suite_start(tests=[test['name'] for test in tests])
    try:
        for test in tests:
            testname = test['name']
            LOG.test_start(testname)

            if not test.get('url'):
                # set browser prefs for pageloader test setings (doesn't use cmd line args / url)
                test['url'] = None
                set_tp_preferences(test, browser_config)

            mytest = TTest()

            # some tests like ts_paint return multiple results in a single iteration
            if test.get('firstpaint', False) or test.get('userready', None):
                # we need a 'testeventmap' to tell us which tests each event should map to
                multi_value_result = None
                separate_results_list = []

                test_event_map = test.get('testeventmap', None)
                if test_event_map is None:
                    raise TalosError("Need 'testeventmap' in test.py for %s" %
                                     test.get('name'))

                # run the test
                multi_value_result = mytest.runTest(browser_config, test)
                if multi_value_result is None:
                    raise TalosError("Abort: no results returned for %s" %
                                     test.get('name'))

                # parse out the multi-value results, and 'fake it' to appear like separate tests
                separate_results_list = convert_to_separate_test_results(
                    multi_value_result, test_event_map)

                # now we have three separate test results, store them
                for test_result in separate_results_list:
                    talos_results.add(test_result)

            # some tests like bloom_basic run two separate tests and then compare those values
            # we want the results in perfherder to only be the actual difference between those
            # and store the base and reference test replicates in results.json for upload
            elif test.get('base_vs_ref', False):
                # run the test, results will be reported for each page like two tests in the suite
                base_and_reference_results = mytest.runTest(
                    browser_config, test)
                # now compare each test, and create a new test object for the comparison
                talos_results.add(
                    make_comparison_result(base_and_reference_results))
            else:
                # just expecting regular test - one result value per iteration
                talos_results.add(mytest.runTest(browser_config, test))
            LOG.test_end(testname, status='OK')

    except TalosRegression as exc:
        LOG.error("Detected a regression for %s" % testname)
        # by returning 1, we report an orange to buildbot
        # http://docs.buildbot.net/latest/developer/results.html
        LOG.test_end(testname,
                     status='FAIL',
                     message=str(exc),
                     stack=traceback.format_exc())
        return 1
    except Exception as exc:
        # NOTE: if we get into this condition, talos has an internal
        # problem and cannot continue
        #       this will prevent future tests from running
        LOG.test_end(testname,
                     status='ERROR',
                     message=str(exc),
                     stack=traceback.format_exc())
        # indicate a failure to buildbot, turn the job red
        return 2
    finally:
        LOG.suite_end()
        httpd.stop()

    LOG.info("Completed test suite (%s)" % timer.elapsed())

    # if mitmproxy was used for page playback, stop it
    if mitmproxy_recordings_list is not False:
        mitmproxy.stop_mitmproxy_playback(mitmproxy_proc)

    # output results
    if results_urls and not browser_config['no_upload_results']:
        talos_results.output(results_urls)
        if browser_config['develop'] or config['gecko_profile']:
            print("Thanks for running Talos locally. Results are in %s" %
                  (results_urls['output_urls']))

    # when running talos locally with gecko profiling on, use the view-gecko-profile
    # tool to automatically load the latest gecko profile in perf-html.io
    if config['gecko_profile'] and browser_config['develop']:
        if os.environ.get('DISABLE_PROFILE_LAUNCH', '0') == '1':
            LOG.info(
                "Not launching perf-html.io because DISABLE_PROFILE_LAUNCH=1")
        else:
            view_gecko_profile(config['browser_path'])

    # we will stop running tests on a failed test, or we will return 0 for
    # green
    return 0