Example #1
0
    def test_errors(self):
        # Tests if errors are correctly raised.

        # Testing that error is correctly raised or not if xperf_path is missing
        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        del browser_config_copy['xperf_path']
        talosconfig.generateTalosConfig(command_args, browser_config_copy,
                                        test_config_copy)
        yaml = YAML()
        content = yaml.read(browser_config['bcontroller_config'])

        with self.assertRaises(KeyError):
            self.validate(
                content['xperf_path'],
                "C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe"
            )

        # Test to see if keyerror is raised or not for calling testname when xperf_path is missing
        with self.assertRaises(KeyError):
            self.validate(content['testname'], "tp5n")

        # Testing that error is correctly raised or not if xperf_providers is missing
        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        del test_config_copy['xperf_providers']
        talosconfig.generateTalosConfig(command_args, browser_config_copy,
                                        test_config_copy)
        yaml = YAML()
        content = yaml.read(browser_config['bcontroller_config'])

        # Test to see if keyerror is raised or not when calling xperf_providers
        with self.assertRaises(KeyError):
            self.validate(content['xperf_providers'], [
                'PROC_THREAD', 'LOADER', 'HARD_FAULTS', 'FILENAME', 'FILE_IO',
                'FILE_IO_INIT'
            ])

        # Test to see if keyerror is raised or not when calling xperf_user_providers when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(
                content['xperf_user_providers'],
                ['Mozilla Generic Provider', 'Microsoft-Windows-TCPIP'])

        # Test to see if keyerror is raised or not when calling xperf_stackwalk when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content['xperf_stackwalk'], [
                'FileCreate', 'FileRead', 'FileWrite', 'FileFlush', 'FileClose'
            ])

        # Test to see if keyerror is raised or not when calling processID when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content['processID'], "None")

        # Test to see if keyerror is raised or not when calling approot when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content['approot'], "test/path/to")
    def test_errors(self):
        # Tests if errors are correctly raised.

        # Testing that error is correctly raised or not if xperf_path is missing
        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        del browser_config_copy['xperf_path']
        talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)
        yaml = YAML()
        content = yaml.read(browser_config['bcontroller_config'])

        with self.assertRaises(KeyError):
            self.validate(content['xperf_path'],
                          "C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe")

        # Test to see if keyerror is raised or not for calling testname when xperf_path is missing
        with self.assertRaises(KeyError):
            self.validate(content['testname'], "tp5n")

        # Testing that error is correctly raised or not if xperf_providers is missing
        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        del test_config_copy['xperf_providers']
        talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)
        yaml = YAML()
        content = yaml.read(browser_config['bcontroller_config'])

        # Checking keyerror when calling xperf_providers
        with self.assertRaises(KeyError):
            self.validate(content['xperf_providers'], ['PROC_THREAD', 'LOADER', 'HARD_FAULTS',
                                                       'FILENAME', 'FILE_IO', 'FILE_IO_INIT'])

        # Checking keyerror when calling xperf_user_providers when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content['xperf_user_providers'],
                          ['Mozilla Generic Provider', 'Microsoft-Windows-TCPIP'])

        # Checking keyerror when calling xperf_stackwalk when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content['xperf_stackwalk'],
                          ['FileCreate', 'FileRead', 'FileWrite', 'FileFlush', 'FileClose'])

        # Checking keyerror when calling processID when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content['processID'], "None")

        # Checking keyerror when calling approot when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content['approot'], "test/path/to")
Example #3
0
    def test_errors(self):
        # Tests if errors are correctly raised.

        # Testing that error is correctly raised or not if xperf_path is missing
        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        del browser_config_copy["xperf_path"]
        talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)
        yaml = YAML()
        content = yaml.read(browser_config["bcontroller_config"])

        with self.assertRaises(KeyError):
            self.validate(content["xperf_path"], "C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe")

        # Test to see if keyerror is raised or not for calling testname when xperf_path is missing
        with self.assertRaises(KeyError):
            self.validate(content["testname"], "tp5n")

        # Testing that error is correctly raised or not if xperf_providers is missing
        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        del test_config_copy["xperf_providers"]
        talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)
        yaml = YAML()
        content = yaml.read(browser_config["bcontroller_config"])

        # Test to see if keyerror is raised or not when calling xperf_providers
        with self.assertRaises(KeyError):
            self.validate(
                content["xperf_providers"],
                ["PROC_THREAD", "LOADER", "HARD_FAULTS", "FILENAME", "FILE_IO", "FILE_IO_INIT"],
            )

        # Test to see if keyerror is raised or not when calling xperf_user_providers when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content["xperf_user_providers"], ["Mozilla Generic Provider", "Microsoft-Windows-TCPIP"])

        # Test to see if keyerror is raised or not when calling xperf_stackwalk when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content["xperf_stackwalk"], ["FileCreate", "FileRead", "FileWrite", "FileFlush", "FileClose"])

        # Test to see if keyerror is raised or not when calling processID when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content["processID"], "None")

        # Test to see if keyerror is raised or not when calling approot when xperf_providers is missing
        with self.assertRaises(KeyError):
            self.validate(content["approot"], "test/path/to")
    def test_talosconfig(self):
        # This function stimulates a call to generateTalosConfig in talosconfig.py . It is then tested whether the output generated is correct or not.

        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        test = talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)

        # ensure that the output generated in yaml file is as expected or not.
        yaml = YAML()
        content = yaml.read(browser_config['bcontroller_config'])
        self.validate(content['command'],"test/path/to/firefox -profile pathtoprofile -tp pathtotpmanifest -tpchrome -tpmozafterpaint -tpnoisy -rss -tpcycles 1 -tppagecycles 1")
        self.validate(content['child_process'],"plugin-container")
        self.validate(content['process'],"firefox.exe")
        self.validate(content['browser_wait'],5)
        self.validate(content['test_timeout'],1200)
        self.validate(content['browser_log'],"browser_output.txt")
        self.validate(content['browser_path'],"test/path/to/firefox")
        self.validate(content['error_filename'],"pathtoerrorfile")
        self.validate(content['xperf_path'],"C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe")
        self.validate(content['buildid'],20131205075310L)
        self.validate(content['sourcestamp'],"39faf812aaec")
        self.validate(content['repository'],"http://hg.mozilla.org/releases/mozilla-release")
        self.validate(content['title'],"qm-pxp01")
        self.validate(content['testname'],"tp5n")
        self.validate(content['xperf_providers'],['PROC_THREAD', 'LOADER', 'HARD_FAULTS', 'FILENAME', 'FILE_IO', 'FILE_IO_INIT'])
        self.validate(content['xperf_user_providers'],['Mozilla Generic Provider', 'Microsoft-Windows-TCPIP'])
        self.validate(content['xperf_stackwalk'],['FileCreate', 'FileRead', 'FileWrite', 'FileFlush', 'FileClose'])
        self.validate(content['processID'],"None")
        self.validate(content['approot'],"test/path/to")
Example #5
0
    def test_talosconfig(self):
        # This function stimulates a call to generateTalosConfig in talosconfig.py . It is then tested whether the output generated is correct or not.

        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        test = talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)

        # ensure that the output generated in yaml file is as expected or not.
        yaml = YAML()
        content = yaml.read(browser_config["bcontroller_config"])
        self.validate(
            content["command"],
            "test/path/to/firefox -profile pathtoprofile -tp pathtotpmanifest -tpchrome -tpmozafterpaint -tpnoisy -rss -tpcycles 1 -tppagecycles 1",
        )
        self.validate(content["child_process"], "plugin-container")
        self.validate(content["process"], "firefox.exe")
        self.validate(content["browser_wait"], 5)
        self.validate(content["test_timeout"], 1200)
        self.validate(content["browser_log"], "browser_output.txt")
        self.validate(content["browser_path"], "test/path/to/firefox")
        self.validate(content["error_filename"], "pathtoerrorfile")
        self.validate(content["xperf_path"], "C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe")
        self.validate(content["buildid"], 20131205075310L)
        self.validate(content["sourcestamp"], "39faf812aaec")
        self.validate(content["repository"], "http://hg.mozilla.org/releases/mozilla-release")
        self.validate(content["title"], "qm-pxp01")
        self.validate(content["testname"], "tp5n")
        self.validate(
            content["xperf_providers"], ["PROC_THREAD", "LOADER", "HARD_FAULTS", "FILENAME", "FILE_IO", "FILE_IO_INIT"]
        )
        self.validate(content["xperf_user_providers"], ["Mozilla Generic Provider", "Microsoft-Windows-TCPIP"])
        self.validate(content["xperf_stackwalk"], ["FileCreate", "FileRead", "FileWrite", "FileFlush", "FileClose"])
        self.validate(content["processID"], "None")
        self.validate(content["approot"], "test/path/to")
Example #6
0
    def test_talosconfig(self):
        # This function stimulates a call to generateTalosConfig in talosconfig.py . It is then tested whether the output generated is correct or not.

        browser_config_copy = browser_config.copy()
        test_config_copy = test_config.copy()
        test = talosconfig.generateTalosConfig(command_args, browser_config_copy, test_config_copy)

        # ensure that the output generated in yaml file is as expected or not.
        yaml = YAML()
        content = yaml.read(browser_config['bcontroller_config'])
        self.validate(content['command'],"test/path/to/firefox -profile pathtoprofile -tp pathtotpmanifest -tpchrome -tpmozafterpaint -tpnoisy -rss -tpcycles 1 -tppagecycles 1")
        self.validate(content['child_process'],"firefox-webcontent")
        self.validate(content['process'],"firefox.exe")
        self.validate(content['browser_wait'],5)
        self.validate(content['test_timeout'],1200)
        self.validate(content['browser_log'],"browser_output.txt")
        self.validate(content['browser_path'],"test/path/to/firefox")
        self.validate(content['error_filename'],"pathtoerrorfile")
        self.validate(content['xperf_path'],"C:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe")
        self.validate(content['buildid'],20131205075310L)
        self.validate(content['sourcestamp'],"39faf812aaec")
        self.validate(content['repository'],"http://hg.mozilla.org/releases/mozilla-release")
        self.validate(content['title'],"qm-pxp01")
        self.validate(content['testname'],"tp5n")
        self.validate(content['xperf_providers'],['PROC_THREAD', 'LOADER', 'HARD_FAULTS', 'FILENAME', 'FILE_IO', 'FILE_IO_INIT'])
        self.validate(content['xperf_user_providers'],['Mozilla Generic Provider', 'Microsoft-Windows-TCPIP'])
        self.validate(content['xperf_stackwalk'],['FileCreate', 'FileRead', 'FileWrite', 'FileFlush', 'FileClose'])
        self.validate(content['processID'],"None")
        self.validate(content['approot'],"test/path/to")
Example #7
0
    def _runTest(self, browser_config, test_config, setup):
        minidump_dir = os.path.join(setup.profile_dir, 'minidumps')
        counters = test_config.get('%s_counters' % self._get_counter_prefix(),
                                   [])
        resolution = test_config['resolution']

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config['mainthread']:
            mainthread_io = os.path.join(here, 'mainthread_io.log')
            setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io

        # Stylo is on by default
        setup.env['STYLO_FORCE_ENABLED'] = '1'

        # During the Stylo transition, measure different number of threads
        if browser_config.get('stylothreads', 0) > 0:
            setup.env['STYLO_THREADS'] = str(browser_config['stylothreads'])

        # set url if there is one (i.e. receiving a test page, not a manifest/pageloader test)
        if test_config.get('url', None) is not None:
            test_config['url'] = utils.interpolate(
                test_config['url'],
                profile=setup.profile_dir,
                firefox=browser_config['browser_path'])
        else:
            setup.env['MOZ_USE_PAGELOADER'] = '1'

        # setup global (cross-cycle) responsiveness counters
        global_counters = {}
        if browser_config.get('xperf_path'):
            for c in test_config.get('xperf_counters', []):
                global_counters[c] = []

        if test_config.get('responsiveness') and \
           platform.system() != 'Darwin':
            # ignore osx for now as per bug 1245793
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20'
            setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10'
            global_counters['responsiveness'] = []

        setup.env['JSGC_DISABLE_POISONING'] = '1'
        setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1'

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters,
                                           browser_config.get('framework'))

        for i in range(test_config['cycles']):
            time.sleep(0.25)
            LOG.info('Running cycle %d/%d for %s test...' %
                     (i + 1, test_config['cycles'], test_config['name']))

            # remove the browser  error file
            mozfile.remove(browser_config['error_filename'])

            # individual tests can have different frameworks
            # TODO: ensure that we don't run >1 test with custom frameworks
            if test_config.get('perfherder_framework', None) is not None:
                test_results.framework = test_config['perfherder_framework']

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get('reinstall', ''):
                for keep in test_config['reinstall']:
                    origin = os.path.join(test_config['profile_path'], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    LOG.debug('Reinstalling %s on top of %s' % (origin, dest))
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get('timeout', 7200)  # 2 hours default
            if setup.gecko_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60
                # store profiling info for pageloader; too late to add it as browser pref
                setup.env["TPPROFILINGINFO"] = json.dumps(
                    setup.gecko_profile.profiling_info)

            command_args = utils.GenerateBrowserCommandLine(
                browser_config['browser_path'],
                browser_config['extra_args'],
                setup.profile_dir,
                test_config['url'],
                profiling_info=(setup.gecko_profile.profiling_info
                                if setup.gecko_profile else None))

            mainthread_error_count = 0
            if test_config['setup']:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(['python'] + test_config['setup'].split(), )

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config['process'], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    minidump_dir,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                    debug=browser_config['debug'],
                    debugger=browser_config['debugger'],
                    debugger_args=browser_config['debugger_args'])
            except Exception:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config['name'])
                raise
            finally:
                if counter_management:
                    counter_management.stop()

            if test_config['mainthread']:
                rawlog = os.path.join(here, 'mainthread_io.log')
                if os.path.exists(rawlog):
                    processedlog = \
                        os.path.join(here, 'mainthread_io.json')
                    xre_path = \
                        os.path.dirname(browser_config['browser_path'])
                    mtio_py = os.path.join(here, 'mainthreadio.py')
                    command = [
                        'python', mtio_py, rawlog, processedlog, xre_path
                    ]
                    mtio = subprocess.Popen(command,
                                            env=os.environ.copy(),
                                            stdout=subprocess.PIPE)
                    output, stderr = mtio.communicate()
                    for line in output.split('\n'):
                        if line.strip() == '':
                            continue

                        print(line)
                        mainthread_error_count += 1
                    mozfile.remove(rawlog)

            if test_config['cleanup']:
                # HACK: add the pid to support xperf where we require
                # the pid in post processing
                talosconfig.generateTalosConfig(command_args,
                                                browser_config,
                                                test_config,
                                                pid=pcontext.pid)
                subprocess.call([sys.executable] +
                                test_config['cleanup'].split())

            # For startup tests, we launch the browser multiple times
            # with the same profile
            for fname in ('sessionstore.js', '.parentlock',
                          'sessionstore.bak'):
                mozfile.remove(os.path.join(setup.profile_dir, fname))

            # check for xperf errors
            if os.path.exists(browser_config['error_filename']) or \
               mainthread_error_count > 0:
                raise TalosRegression(
                    'Talos has found a regression, if you have questions'
                    ' ask for help in irc on #perf')

            # add the results from the browser output
            if not run_in_debug_mode(browser_config):
                test_results.add(
                    '\n'.join(pcontext.output),
                    counter_results=(counter_management.results()
                                     if counter_management else None))

            if setup.gecko_profile:
                setup.gecko_profile.symbolicate(i)

            self.check_for_crashes(browser_config, minidump_dir,
                                   test_config['name'])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                LOG.debug('COUNTER %r: %s' % (key, value))

        # if running against a code-coverage instrumented build, move the
        # produced gcda files to a folder where they will be collected later
        if browser_config.get('code_coverage', False):
            setup.collect_or_clean_ccov()

        # return results
        return test_results
Example #8
0
    def _runTest(self, browser_config, test_config, setup):
        minidump_dir = os.path.join(setup.profile_dir, "minidumps")
        counters = test_config.get("%s_counters" % self._get_counter_prefix(),
                                   [])
        resolution = test_config["resolution"]

        # add the mainthread_io to the environment variable, as defined
        # in test.py configs
        here = os.path.dirname(os.path.realpath(__file__))
        if test_config["mainthread"]:
            mainthread_io = os.path.join(here, "mainthread_io.log")
            setup.env["MOZ_MAIN_THREAD_IO_LOG"] = mainthread_io

        # Stylo is on by default
        setup.env["STYLO_FORCE_ENABLED"] = "1"

        # During the Stylo transition, measure different number of threads
        if browser_config.get("stylothreads", 0) > 0:
            setup.env["STYLO_THREADS"] = str(browser_config["stylothreads"])

        if browser_config["enable_webrender"]:
            setup.env["MOZ_WEBRENDER"] = "1"
            setup.env["MOZ_ACCELERATED"] = "1"
        else:
            setup.env["MOZ_WEBRENDER"] = "0"

        # set url if there is one (i.e. receiving a test page, not a manifest/pageloader test)
        if test_config.get("url", None) is not None:
            test_config["url"] = utils.interpolate(
                test_config["url"],
                profile=setup.profile_dir,
                firefox=browser_config["browser_path"],
            )
        else:
            setup.env["MOZ_USE_PAGELOADER"] = "1"

        # setup global (cross-cycle) responsiveness counters
        global_counters = {}
        if browser_config.get("xperf_path"):
            for c in test_config.get("xperf_counters", []):
                global_counters[c] = []

        if test_config.get("responsiveness") and platform.system() != "Darwin":
            # ignore osx for now as per bug 1245793
            setup.env["MOZ_INSTRUMENT_EVENT_LOOP"] = "1"
            setup.env["MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD"] = "20"
            setup.env["MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL"] = "10"
            global_counters["responsiveness"] = []

        setup.env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"

        # instantiate an object to hold test results
        test_results = results.TestResults(test_config, global_counters,
                                           browser_config.get("framework"))

        for i in six.moves.range(test_config["cycles"]):
            time.sleep(0.25)
            LOG.info("Running cycle %d/%d for %s test..." %
                     (i + 1, test_config["cycles"], test_config["name"]))

            # remove the browser  error file
            mozfile.remove(browser_config["error_filename"])

            # individual tests can have different frameworks
            # TODO: ensure that we don't run >1 test with custom frameworks
            if test_config.get("perfherder_framework", None) is not None:
                test_results.framework = test_config["perfherder_framework"]

            # reinstall any file whose stability we need to ensure across
            # the cycles
            if test_config.get("reinstall", ""):
                for keep in test_config["reinstall"]:
                    origin = os.path.join(test_config["profile_path"], keep)
                    dest = os.path.join(setup.profile_dir, keep)
                    LOG.debug("Reinstalling %s on top of %s" % (origin, dest))
                    shutil.copy(origin, dest)

            # Run the test
            timeout = test_config.get("timeout", 600)  # 10 minutes default
            if setup.gecko_profile:
                # When profiling, give the browser some extra time
                # to dump the profile.
                timeout += 5 * 60
                # store profiling info for pageloader; too late to add it as browser pref
                setup.env["TPPROFILINGINFO"] = json.dumps(
                    setup.gecko_profile.profiling_info)

            command_args = utils.GenerateBrowserCommandLine(
                browser_config["browser_path"],
                browser_config["extra_args"],
                setup.profile_dir,
                test_config["url"],
                profiling_info=(setup.gecko_profile.profiling_info
                                if setup.gecko_profile else None),
            )

            mainthread_error_count = 0
            if test_config["setup"]:
                # Generate bcontroller.json for xperf
                talosconfig.generateTalosConfig(command_args, browser_config,
                                                test_config)
                subprocess.call(["python"] + test_config["setup"].split(), )

            counter_management = None
            if counters:
                counter_management = CounterManagement(
                    browser_config["process"], counters, resolution)

            try:
                pcontext = run_browser(
                    command_args,
                    minidump_dir,
                    timeout=timeout,
                    env=setup.env,
                    # start collecting counters as soon as possible
                    on_started=(counter_management.start
                                if counter_management else None),
                    debug=browser_config["debug"],
                    debugger=browser_config["debugger"],
                    debugger_args=browser_config["debugger_args"],
                )
            except Exception:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config["name"])
                raise
            finally:
                if counter_management:
                    counter_management.stop()

            try:
                if test_config["mainthread"]:
                    rawlog = os.path.join(here, "mainthread_io.log")
                    if os.path.exists(rawlog):
                        processedlog = os.path.join(here, "mainthread_io.json")
                        xre_path = os.path.dirname(
                            browser_config["browser_path"])
                        mtio_py = os.path.join(here, "mainthreadio.py")
                        command = [
                            "python", mtio_py, rawlog, processedlog, xre_path
                        ]
                        mtio = subprocess.Popen(command,
                                                env=os.environ.copy(),
                                                stdout=subprocess.PIPE)
                        output, stderr = mtio.communicate()
                        for line in output.split(b"\n"):
                            if line.strip() == b"":
                                continue

                            print(line)
                            mainthread_error_count += 1
                        mozfile.remove(rawlog)

                if test_config["cleanup"]:
                    # HACK: add the pid to support xperf where we require
                    # the pid in post processing
                    talosconfig.generateTalosConfig(command_args,
                                                    browser_config,
                                                    test_config,
                                                    pid=pcontext.pid)
                    subprocess.call([sys.executable] +
                                    test_config["cleanup"].split())

                # For startup tests, we launch the browser multiple times
                # with the same profile
                for fname in ("sessionstore.js", ".parentlock",
                              "sessionstore.bak"):
                    mozfile.remove(os.path.join(setup.profile_dir, fname))

                # check for xperf errors
                if (os.path.exists(browser_config["error_filename"])
                        or mainthread_error_count > 0):
                    raise TalosRegression(
                        "Talos has found a regression, if you have questions"
                        " ask for help in irc on #perf")

                # add the results from the browser output
                if not run_in_debug_mode(browser_config):
                    test_results.add(
                        "\n".join(pcontext.output),
                        counter_results=(counter_management.results()
                                         if counter_management else None),
                    )

                if setup.gecko_profile:
                    setup.gecko_profile.symbolicate(i)

            finally:
                self.check_for_crashes(browser_config, minidump_dir,
                                       test_config["name"])

        # include global (cross-cycle) counters
        test_results.all_counter_results.extend([{
            key: value
        } for key, value in global_counters.items()])
        for c in test_results.all_counter_results:
            for key, value in c.items():
                LOG.debug("COUNTER %r: %s" % (key, value))

        # if running against a code-coverage instrumented build, move the
        # produced gcda files to a folder where they will be collected later
        if browser_config.get("code_coverage", False):
            setup.collect_or_clean_ccov()

        # return results
        return test_results