Beispiel #1
0
    def _runTest(self, browser_config, test_config, setup):
        minidump_dir = os.path.join(setup.profile_dir, 'minidumps')
        counters = test_config.get(self.platform_type + 'counters', [])
        resolution = test_config['resolution']

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config['mainthread']:
            mainthread_io = os.path.join(here, "mainthread_io.log")
            setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io

        test_config['url'] = utils.interpolate(
            test_config['url'],
            profile=setup.profile_dir,
            firefox=browser_config['browser_path'])

        # setup global (cross-cycle) counters:
        # shutdown, responsiveness
        global_counters = {}
        if browser_config.get('xperf_path'):
            for c in test_config.get('xperf_counters', []):
                global_counters[c] = []

        if test_config['shutdown']:
            global_counters['shutdown'] = []
        if test_config.get('responsiveness') and \
                platform.system() != "Linux":
            # ignore responsiveness tests on linux until we fix
            # Bug 710296
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10'
            global_counters['responsiveness'] = []

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters)

        for i in range(test_config['cycles']):
            LOG.info("Running cycle %d/%d for %s test..." %
                     (i + 1, test_config['cycles'], test_config['name']))

            # remove the browser  error file
            mozfile.remove(browser_config['error_filename'])

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get('reinstall', ''):
                for keep in test_config['reinstall']:
                    origin = os.path.join(test_config['profile_path'], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    LOG.debug("Reinstalling %s on top of %s" % (origin, dest))
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get('timeout', 7200)  # 2 hours default
            if setup.sps_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60

            command_args = utils.GenerateBrowserCommandLine(
                browser_config["browser_path"],
                browser_config["extra_args"],
                setup.profile_dir,
                test_config['url'],
                profiling_info=(setup.sps_profile.profiling_info
                                if setup.sps_profile else None))

            mainthread_error_count = 0
            if test_config['setup']:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(['python'] + test_config['setup'].split(), )

            mm_httpd = None

            if test_config['name'] == 'media_tests':
                from startup_test.media import media_manager
                mm_httpd = media_manager.run_server(
                    os.path.dirname(os.path.realpath(__file__)))

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config['process'], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    minidump_dir,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                )
            finally:
                if counter_management:
                    counter_management.stop()
                if mm_httpd:
                    mm_httpd.stop()

            if test_config['mainthread']:
                rawlog = os.path.join(here, "mainthread_io.log")
                if os.path.exists(rawlog):
                    processedlog = \
                        os.path.join(here, 'mainthread_io.json')
                    xre_path = \
                        os.path.dirname(browser_config['browser_path'])
                    mtio_py = os.path.join(here, 'mainthreadio.py')
                    command = [
                        'python', mtio_py, rawlog, processedlog, xre_path
                    ]
                    mtio = subprocess.Popen(command,
                                            env=os.environ.copy(),
                                            stdout=subprocess.PIPE)
                    output, stderr = mtio.communicate()
                    for line in output.split('\n'):
                        if line.strip() == "":
                            continue

                        print line
                        mainthread_error_count += 1
                    mozfile.remove(rawlog)

            if test_config['cleanup']:
                # HACK: add the pid to support xperf where we require
                # the pid in post processing
                talosconfig.generateTalosConfig(command_args,
                                                browser_config,
                                                test_config,
                                                pid=pcontext.pid)
                subprocess.call([sys.executable] +
                                test_config['cleanup'].split())

            # For startup tests, we launch the browser multiple times
            # with the same profile
            for fname in ('sessionstore.js', '.parentlock',
                          'sessionstore.bak'):
                mozfile.remove(os.path.join(setup.profile_dir, fname))

            # check for xperf errors
            if os.path.exists(browser_config['error_filename']) or \
               mainthread_error_count > 0:
                raise TalosRegression(
                    "Talos has found a regression, if you have questions"
                    " ask for help in irc on #perf")

            # add the results from the browser output
            test_results.add('\n'.join(pcontext.output),
                             counter_results=(counter_management.results()
                                              if counter_management else None))

            if setup.sps_profile:
                setup.sps_profile.symbolicate(i)

            self.check_for_crashes(browser_config, minidump_dir,
                                   test_config['name'])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                LOG.debug("COUNTER %r: %s" % (key, value))

        # return results
        return test_results
Beispiel #2
0
    def _runTest(self, browser_config, test_config, setup):
        minidump_dir = os.path.join(setup.profile_dir, 'minidumps')
        counters = test_config.get('%s_counters' % self._get_counter_prefix(),
                                   [])
        resolution = test_config['resolution']

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config['mainthread']:
            mainthread_io = os.path.join(here, 'mainthread_io.log')
            setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io

        # Stylo is on by default
        setup.env['STYLO_FORCE_ENABLED'] = '1'

        # During the Stylo transition, measure different number of threads
        if browser_config.get('stylothreads', 0) > 0:
            setup.env['STYLO_THREADS'] = str(browser_config['stylothreads'])

        # set url if there is one (i.e. receiving a test page, not a manifest/pageloader test)
        if test_config.get('url', None) is not None:
            test_config['url'] = utils.interpolate(
                test_config['url'],
                profile=setup.profile_dir,
                firefox=browser_config['browser_path'])
        else:
            setup.env['MOZ_USE_PAGELOADER'] = '1'

        # setup global (cross-cycle) responsiveness counters
        global_counters = {}
        if browser_config.get('xperf_path'):
            for c in test_config.get('xperf_counters', []):
                global_counters[c] = []

        if test_config.get('responsiveness') and \
           platform.system() != 'Darwin':
            # ignore osx for now as per bug 1245793
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10'
            global_counters['responsiveness'] = []

        setup.env['JSGC_DISABLE_POISONING'] = '1'
        setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1'

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters,
                                           browser_config.get('framework'))

        for i in range(test_config['cycles']):
            time.sleep(0.25)
            LOG.info('Running cycle %d/%d for %s test...' %
                     (i + 1, test_config['cycles'], test_config['name']))

            # remove the browser  error file
            mozfile.remove(browser_config['error_filename'])

            # individual tests can have different frameworks
            # TODO: ensure that we don't run >1 test with custom frameworks
            if test_config.get('perfherder_framework', None) is not None:
                test_results.framework = test_config['perfherder_framework']

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get('reinstall', ''):
                for keep in test_config['reinstall']:
                    origin = os.path.join(test_config['profile_path'], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    LOG.debug('Reinstalling %s on top of %s' % (origin, dest))
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get('timeout', 7200)  # 2 hours default
            if setup.gecko_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60
                # store profiling info for pageloader; too late to add it as browser pref
                setup.env["TPPROFILINGINFO"] = json.dumps(
                    setup.gecko_profile.profiling_info)

            command_args = utils.GenerateBrowserCommandLine(
                browser_config['browser_path'],
                browser_config['extra_args'],
                setup.profile_dir,
                test_config['url'],
                profiling_info=(setup.gecko_profile.profiling_info
                                if setup.gecko_profile else None))

            mainthread_error_count = 0
            if test_config['setup']:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(['python'] + test_config['setup'].split(), )

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config['process'], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    minidump_dir,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                    debug=browser_config['debug'],
                    debugger=browser_config['debugger'],
                    debugger_args=browser_config['debugger_args'])
            except Exception:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config['name'])
                raise
            finally:
                if counter_management:
                    counter_management.stop()

            if test_config['mainthread']:
                rawlog = os.path.join(here, 'mainthread_io.log')
                if os.path.exists(rawlog):
                    processedlog = \
                        os.path.join(here, 'mainthread_io.json')
                    xre_path = \
                        os.path.dirname(browser_config['browser_path'])
                    mtio_py = os.path.join(here, 'mainthreadio.py')
                    command = [
                        'python', mtio_py, rawlog, processedlog, xre_path
                    ]
                    mtio = subprocess.Popen(command,
                                            env=os.environ.copy(),
                                            stdout=subprocess.PIPE)
                    output, stderr = mtio.communicate()
                    for line in output.split('\n'):
                        if line.strip() == '':
                            continue

                        print(line)
                        mainthread_error_count += 1
                    mozfile.remove(rawlog)

            if test_config['cleanup']:
                # HACK: add the pid to support xperf where we require
                # the pid in post processing
                talosconfig.generateTalosConfig(command_args,
                                                browser_config,
                                                test_config,
                                                pid=pcontext.pid)
                subprocess.call([sys.executable] +
                                test_config['cleanup'].split())

            # For startup tests, we launch the browser multiple times
            # with the same profile
            for fname in ('sessionstore.js', '.parentlock',
                          'sessionstore.bak'):
                mozfile.remove(os.path.join(setup.profile_dir, fname))

            # check for xperf errors
            if os.path.exists(browser_config['error_filename']) or \
               mainthread_error_count > 0:
                raise TalosRegression(
                    'Talos has found a regression, if you have questions'
                    ' ask for help in irc on #perf')

            # add the results from the browser output
            if not run_in_debug_mode(browser_config):
                test_results.add(
                    '\n'.join(pcontext.output),
                    counter_results=(counter_management.results()
                                     if counter_management else None))

            if setup.gecko_profile:
                setup.gecko_profile.symbolicate(i)

            self.check_for_crashes(browser_config, minidump_dir,
                                   test_config['name'])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                LOG.debug('COUNTER %r: %s' % (key, value))

        # if running against a code-coverage instrumented build, move the
        # produced gcda files to a folder where they will be collected later
        if browser_config.get('code_coverage', False):
            setup.collect_or_clean_ccov()

        # return results
        return test_results
Beispiel #3
0
    def _runTest(self, browser_config, test_config, setup):
        minidump_dir = os.path.join(setup.profile_dir, 'minidumps')
        counters = test_config.get('%s_counters' % self._get_counter_prefix(),
                                   [])
        resolution = test_config['resolution']

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config['mainthread']:
            mainthread_io = os.path.join(here, "mainthread_io.log")
            setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io

        if browser_config['disable_stylo']:
            if browser_config['stylothreads']:
                raise TalosError(
                    "--disable-stylo conflicts with --stylo-threads")
            if browser_config['enable_stylo']:
                raise TalosError(
                    "--disable-stylo conflicts with --enable-stylo")

        # As we transition to Stylo, we need to set env vars and output data properly
        if browser_config['enable_stylo']:
            setup.env['STYLO_FORCE_ENABLED'] = '1'
        if browser_config['disable_stylo']:
            setup.env['STYLO_FORCE_DISABLED'] = '1'

        # During the Stylo transition, measure different number of threads
        if browser_config.get('stylothreads', 0) > 0:
            setup.env['STYLO_THREADS'] = str(browser_config['stylothreads'])

        test_config['url'] = utils.interpolate(
            test_config['url'],
            profile=setup.profile_dir,
            firefox=browser_config['browser_path'])

        # setup global (cross-cycle) counters:
        # shutdown, responsiveness
        global_counters = {}
        if browser_config.get('xperf_path'):
            for c in test_config.get('xperf_counters', []):
                global_counters[c] = []

        if test_config['shutdown']:
            global_counters['shutdown'] = []
        if test_config.get('responsiveness') and \
           platform.system() != "Darwin":
            # ignore osx for now as per bug 1245793
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10'
            global_counters['responsiveness'] = []

        setup.env['JSGC_DISABLE_POISONING'] = '1'
        setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1'

        # if using mitmproxy we must allow access to 'external' sites
        if browser_config.get('mitmproxy', False):
            LOG.info(
                "Using mitmproxy so setting MOZ_DISABLE_NONLOCAL_CONNECTIONS to 0"
            )
            setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '0'

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters,
                                           browser_config.get('framework'))

        for i in range(test_config['cycles']):
            LOG.info("Running cycle %d/%d for %s test..." %
                     (i + 1, test_config['cycles'], test_config['name']))

            # remove the browser  error file
            mozfile.remove(browser_config['error_filename'])

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get('reinstall', ''):
                for keep in test_config['reinstall']:
                    origin = os.path.join(test_config['profile_path'], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    LOG.debug("Reinstalling %s on top of %s" % (origin, dest))
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get('timeout', 7200)  # 2 hours default
            if setup.gecko_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60

            command_args = utils.GenerateBrowserCommandLine(
                browser_config["browser_path"],
                browser_config["extra_args"],
                setup.profile_dir,
                test_config['url'],
                profiling_info=(setup.gecko_profile.profiling_info
                                if setup.gecko_profile else None))

            mainthread_error_count = 0
            if test_config['setup']:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(['python'] + test_config['setup'].split(), )

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config['process'], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    minidump_dir,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                )
            except:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config['name'])
                raise
            finally:
                if counter_management:
                    counter_management.stop()

            if test_config['mainthread']:
                rawlog = os.path.join(here, "mainthread_io.log")
                if os.path.exists(rawlog):
                    processedlog = \
                        os.path.join(here, 'mainthread_io.json')
                    xre_path = \
                        os.path.dirname(browser_config['browser_path'])
                    mtio_py = os.path.join(here, 'mainthreadio.py')
                    command = [
                        'python', mtio_py, rawlog, processedlog, xre_path
                    ]
                    mtio = subprocess.Popen(command,
                                            env=os.environ.copy(),
                                            stdout=subprocess.PIPE)
                    output, stderr = mtio.communicate()
                    for line in output.split('\n'):
                        if line.strip() == "":
                            continue

                        print(line)
                        mainthread_error_count += 1
                    mozfile.remove(rawlog)

            if test_config['cleanup']:
                # HACK: add the pid to support xperf where we require
                # the pid in post processing
                talosconfig.generateTalosConfig(command_args,
                                                browser_config,
                                                test_config,
                                                pid=pcontext.pid)
                subprocess.call([sys.executable] +
                                test_config['cleanup'].split())

            # For startup tests, we launch the browser multiple times
            # with the same profile
            for fname in ('sessionstore.js', '.parentlock',
                          'sessionstore.bak'):
                mozfile.remove(os.path.join(setup.profile_dir, fname))

            # check for xperf errors
            if os.path.exists(browser_config['error_filename']) or \
               mainthread_error_count > 0:
                raise TalosRegression(
                    "Talos has found a regression, if you have questions"
                    " ask for help in irc on #perf")

            # add the results from the browser output
            test_results.add('\n'.join(pcontext.output),
                             counter_results=(counter_management.results()
                                              if counter_management else None))

            if setup.gecko_profile:
                setup.gecko_profile.symbolicate(i)

            self.check_for_crashes(browser_config, minidump_dir,
                                   test_config['name'])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                LOG.debug("COUNTER %r: %s" % (key, value))

        # return results
        return test_results
Beispiel #4
0
    def _runTest(self, browser_config, test_config, setup):
        minidump_dir = os.path.join(setup.profile_dir, "minidumps")
        counters = test_config.get("%s_counters" % self._get_counter_prefix(),
                                   [])
        resolution = test_config["resolution"]

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config["mainthread"]:
            mainthread_io = os.path.join(here, "mainthread_io.log")
            setup.env["MOZ_MAIN_THREAD_IO_LOG"] = mainthread_io

        # Stylo is on by default
        setup.env["STYLO_FORCE_ENABLED"] = "1"

        # During the Stylo transition, measure different number of threads
        if browser_config.get("stylothreads", 0) > 0:
            setup.env["STYLO_THREADS"] = str(browser_config["stylothreads"])

        if browser_config["enable_webrender"]:
            setup.env["MOZ_WEBRENDER"] = "1"
            setup.env["MOZ_ACCELERATED"] = "1"
        else:
            setup.env["MOZ_WEBRENDER"] = "0"

        # set url if there is one (i.e. receiving a test page, not a manifest/pageloader test)
        if test_config.get("url", None) is not None:
            test_config["url"] = utils.interpolate(
                test_config["url"],
                profile=setup.profile_dir,
                firefox=browser_config["browser_path"],
            )
        else:
            setup.env["MOZ_USE_PAGELOADER"] = "1"

        # setup global (cross-cycle) responsiveness counters
        global_counters = {}
        if browser_config.get("xperf_path"):
            for c in test_config.get("xperf_counters", []):
                global_counters[c] = []

        if test_config.get("responsiveness") and platform.system() != "Darwin":
            # ignore osx for now as per bug 1245793
            setup.env["MOZ_INSTRUMENT_EVENT_LOOP"] = "1"
            setup.env["MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD"] = "20"
            setup.env["MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL"] = "10"
            global_counters["responsiveness"] = []

        setup.env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters,
                                           browser_config.get("framework"))

        for i in six.moves.range(test_config["cycles"]):
            time.sleep(0.25)
            LOG.info("Running cycle %d/%d for %s test..." %
                     (i + 1, test_config["cycles"], test_config["name"]))

            # remove the browser  error file
            mozfile.remove(browser_config["error_filename"])

            # individual tests can have different frameworks
            # TODO: ensure that we don't run >1 test with custom frameworks
            if test_config.get("perfherder_framework", None) is not None:
                test_results.framework = test_config["perfherder_framework"]

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get("reinstall", ""):
                for keep in test_config["reinstall"]:
                    origin = os.path.join(test_config["profile_path"], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    LOG.debug("Reinstalling %s on top of %s" % (origin, dest))
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get("timeout", 600)  # 10 minutes default
            if setup.gecko_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60
                # store profiling info for pageloader; too late to add it as browser pref
                setup.env["TPPROFILINGINFO"] = json.dumps(
                    setup.gecko_profile.profiling_info)

            command_args = utils.GenerateBrowserCommandLine(
                browser_config["browser_path"],
                browser_config["extra_args"],
                setup.profile_dir,
                test_config["url"],
                profiling_info=(setup.gecko_profile.profiling_info
                                if setup.gecko_profile else None),
            )

            mainthread_error_count = 0
            if test_config["setup"]:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(["python"] + test_config["setup"].split(), )

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config["process"], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    minidump_dir,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                    debug=browser_config["debug"],
                    debugger=browser_config["debugger"],
                    debugger_args=browser_config["debugger_args"],
                )
            except Exception:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config["name"])
                raise
            finally:
                if counter_management:
                    counter_management.stop()

            try:
                if test_config["mainthread"]:
                    rawlog = os.path.join(here, "mainthread_io.log")
                    if os.path.exists(rawlog):
                        processedlog = os.path.join(here, "mainthread_io.json")
                        xre_path = os.path.dirname(
                            browser_config["browser_path"])
                        mtio_py = os.path.join(here, "mainthreadio.py")
                        command = [
                            "python", mtio_py, rawlog, processedlog, xre_path
                        ]
                        mtio = subprocess.Popen(command,
                                                env=os.environ.copy(),
                                                stdout=subprocess.PIPE)
                        output, stderr = mtio.communicate()
                        for line in output.split(b"\n"):
                            if line.strip() == b"":
                                continue

                            print(line)
                            mainthread_error_count += 1
                        mozfile.remove(rawlog)

                if test_config["cleanup"]:
                    # HACK: add the pid to support xperf where we require
                    # the pid in post processing
                    talosconfig.generateTalosConfig(command_args,
                                                    browser_config,
                                                    test_config,
                                                    pid=pcontext.pid)
                    subprocess.call([sys.executable] +
                                    test_config["cleanup"].split())

                # For startup tests, we launch the browser multiple times
                # with the same profile
                for fname in ("sessionstore.js", ".parentlock",
                              "sessionstore.bak"):
                    mozfile.remove(os.path.join(setup.profile_dir, fname))

                # check for xperf errors
                if (os.path.exists(browser_config["error_filename"])
                        or mainthread_error_count > 0):
                    raise TalosRegression(
                        "Talos has found a regression, if you have questions"
                        " ask for help in irc on #perf")

                # add the results from the browser output
                if not run_in_debug_mode(browser_config):
                    test_results.add(
                        "\n".join(pcontext.output),
                        counter_results=(counter_management.results()
                                         if counter_management else None),
                    )

                if setup.gecko_profile:
                    setup.gecko_profile.symbolicate(i)

            finally:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config["name"])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                LOG.debug("COUNTER %r: %s" % (key, value))

        # if running against a code-coverage instrumented build, move the
        # produced gcda files to a folder where they will be collected later
        if browser_config.get("code_coverage", False):
            setup.collect_or_clean_ccov()

        # return results
        return test_results