def run_tests(config, browser_config): """Runs the talos tests on the given configuration and generates a report.""" # get the test data tests = config["tests"] tests = useBaseTestDefaults(config.get("basetest", {}), tests) paths = ["profile_path", "tpmanifest", "extensions", "setup", "cleanup"] for test in tests: # Check for profile_path, tpmanifest and interpolate based on Talos # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711 # Build command line from config for path in paths: if test.get(path): if path == "extensions": for _index, _ext in enumerate(test["extensions"]): test["extensions"][_index] = utils.interpolate(_ext) else: test[path] = utils.interpolate(test[path]) if test.get("tpmanifest"): test["tpmanifest"] = os.path.normpath( "file:/%s" % (six.moves.urllib.parse.quote(test["tpmanifest"], "/\\t:\\"))) test["preferences"]["talos.tpmanifest"] = test["tpmanifest"] # if using firstNonBlankPaint, set test preference for it # so that the browser pref will be turned on (in ffsetup) if test.get("fnbpaint", False): LOG.info( "Test is using firstNonBlankPaint, browser pref will be turned on" ) test["preferences"][ "dom.performance.time_to_non_blank_paint.enabled"] = True test["setup"] = utils.interpolate(test["setup"]) test["cleanup"] = utils.interpolate(test["cleanup"]) if not test.get("profile", False): test["profile"] = config.get("profile") if mozinfo.os == "win": browser_config["extra_args"] = ["-wait-for-browser", "-no-deelevate"] else: browser_config["extra_args"] = [] # pass --no-remote to firefox launch, if --develop is specified # we do that to allow locally the user to have another running firefox # instance if browser_config["develop"]: browser_config["extra_args"].append("--no-remote") # Pass subtests filter argument via a preference if browser_config["subtests"]: browser_config["preferences"]["talos.subtests"] = browser_config[ "subtests"] if browser_config.get("enable_fission", False): browser_config["preferences"]["fission.autostart"] = True browser_config["preferences"][ "dom.serviceWorkers.parent_intercept"] = True browser_config["preferences"]["network.proxy.type"] = 2 browser_config["preferences"]["network.proxy.autoconfig_url"] = ( """data:text/plain, function FindProxyForURL(url, host) { if (url.startsWith('http')) { return 'PROXY %s'; } return 'DIRECT'; }""" % browser_config["webserver"]) # If --code-coverage files are expected, set flag in browser config so ffsetup knows # that it needs to delete any ccov files resulting from browser initialization # NOTE: This is only supported in production; local setup of ccov folders and # data collection not supported yet, so if attempting to run with --code-coverage # flag locally, that is not supported yet if config.get("code_coverage", False): if browser_config["develop"]: raise TalosError("Aborting: talos --code-coverage flag is only " "supported in production") else: browser_config["code_coverage"] = True # set defaults testdate = config.get("testdate", "") # get the process name from the path to the browser if not browser_config["process"]: browser_config["process"] = os.path.basename( browser_config["browser_path"]) # fix paths to substitute # `os.path.dirname(os.path.abspath(__file__))` for ${talos} # https://bugzilla.mozilla.org/show_bug.cgi?id=705809 browser_config["extensions"] = [ utils.interpolate(i) for i in browser_config["extensions"] ] browser_config["bcontroller_config"] = utils.interpolate( browser_config["bcontroller_config"]) # normalize browser path to work across platforms browser_config["browser_path"] = os.path.normpath( browser_config["browser_path"]) binary = browser_config["browser_path"] version_info = mozversion.get_version(binary=binary) browser_config["browser_name"] = version_info["application_name"] browser_config["browser_version"] = version_info["application_version"] browser_config["buildid"] = version_info["application_buildid"] try: browser_config["repository"] = version_info["application_repository"] browser_config["sourcestamp"] = version_info["application_changeset"] except KeyError: if not browser_config["develop"]: print("Abort: unable to find changeset or repository: %s" % version_info) sys.exit(1) else: browser_config["repository"] = "develop" browser_config["sourcestamp"] = "develop" # get test date in seconds since epoch if testdate: date = int( time.mktime(time.strptime(testdate, "%a, %d %b %Y %H:%M:%S GMT"))) else: date = int(time.time()) LOG.debug("using testdate: %d" % date) LOG.debug("actual date: %d" % int(time.time())) # results container talos_results = TalosResults() # results links if not browser_config["develop"] and not config["gecko_profile"]: results_urls = dict( # another hack; datazilla stands for Perfherder # and do not require url, but a non empty dict is required... output_urls=["local.json"], ) else: # local mode, output to files results_urls = dict(output_urls=[os.path.abspath("local.json")]) httpd = setup_webserver(browser_config["webserver"]) httpd.start() # legacy still required for perfherder data talos_results.add_extra_option("e10s") talos_results.add_extra_option("stylo") # measuring the difference of a a certain thread level if config.get("stylothreads", 0) > 0: talos_results.add_extra_option("%s_thread" % config["stylothreads"]) if config["gecko_profile"]: talos_results.add_extra_option("gecko-profile") # differentiate fission vs non-fission results in perfherder if browser_config.get("enable_fission", False): talos_results.add_extra_option("fission") # differentiate webrender from non-webrender results if browser_config["preferences"].get("gfx.webrender.software", False): talos_results.add_extra_option("webrender-sw") elif browser_config.get("enable_webrender", False): talos_results.add_extra_option("webrender") # differentiate webgl from webgl-ipc results if browser_config["preferences"].get("webgl.out-of-process", False): talos_results.add_extra_option("webgl-ipc") testname = None # run the tests timer = utils.Timer() LOG.suite_start(tests=[test["name"] for test in tests]) try: for test in tests: testname = test["name"] LOG.test_start(testname) if not test.get("url"): # set browser prefs for pageloader test setings (doesn't use cmd line args / url) test["url"] = None set_tp_preferences(test, browser_config) mytest = TTest() # some tests like ts_paint return multiple results in a single iteration if test.get("firstpaint", False) or test.get("userready", None): # we need a 'testeventmap' to tell us which tests each event should map to multi_value_result = None separate_results_list = [] test_event_map = test.get("testeventmap", None) if test_event_map is None: raise TalosError("Need 'testeventmap' in test.py for %s" % test.get("name")) # run the test multi_value_result = mytest.runTest(browser_config, test) if multi_value_result is None: raise TalosError("Abort: no results returned for %s" % test.get("name")) # parse out the multi-value results, and 'fake it' to appear like separate tests separate_results_list = convert_to_separate_test_results( multi_value_result, test_event_map) # now we have three separate test results, store them for test_result in separate_results_list: talos_results.add(test_result) # some tests like bloom_basic run two separate tests and then compare those values # we want the results in perfherder to only be the actual difference between those # and store the base and reference test replicates in results.json for upload elif test.get("base_vs_ref", False): # run the test, results will be reported for each page like two tests in the suite base_and_reference_results = mytest.runTest( browser_config, test) # now compare each test, and create a new test object for the comparison talos_results.add( make_comparison_result(base_and_reference_results)) else: # just expecting regular test - one result value per iteration talos_results.add(mytest.runTest(browser_config, test)) LOG.test_end(testname, status="OK") except TalosRegression as exc: LOG.error("Detected a regression for %s" % testname) # by returning 1, we report an orange to buildbot # http://docs.buildbot.net/latest/developer/results.html LOG.test_end(testname, status="FAIL", message=str(exc), stack=traceback.format_exc()) return 1 except Exception as exc: # NOTE: if we get into this condition, talos has an internal # problem and cannot continue # this will prevent future tests from running LOG.test_end(testname, status="ERROR", message=str(exc), stack=traceback.format_exc()) # indicate a failure to buildbot, turn the job red return 2 finally: LOG.suite_end() httpd.stop() LOG.info("Completed test suite (%s)" % timer.elapsed()) # output results if results_urls and not browser_config["no_upload_results"]: talos_results.output(results_urls) if browser_config["develop"] or config["gecko_profile"]: print("Thanks for running Talos locally. Results are in %s" % (results_urls["output_urls"])) # when running talos locally with gecko profiling on, use the view-gecko-profile # tool to automatically load the latest gecko profile in profiler.firefox.com if config["gecko_profile"] and browser_config["develop"]: if os.environ.get("DISABLE_PROFILE_LAUNCH", "0") == "1": LOG.info( "Not launching profiler.firefox.com because DISABLE_PROFILE_LAUNCH=1" ) else: view_gecko_profile_from_talos() # we will stop running tests on a failed test, or we will return 0 for # green return 0
def run_tests(config, browser_config): """Runs the talos tests on the given configuration and generates a report. """ # get the test data tests = config['tests'] tests = useBaseTestDefaults(config.get('basetest', {}), tests) paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup'] for test in tests: # Check for profile_path, tpmanifest and interpolate based on Talos # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711 # Build command line from config for path in paths: if test.get(path): if path == 'extensions': for _index, _ext in enumerate(test['extensions']): test['extensions'][_index] = utils.interpolate(_ext) else: test[path] = utils.interpolate(test[path]) if test.get('tpmanifest'): test['tpmanifest'] = \ os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'], '/\\t:\\'))) test['preferences']['talos.tpmanifest'] = test['tpmanifest'] # if using firstNonBlankPaint, set test preference for it # so that the browser pref will be turned on (in ffsetup) if test.get('fnbpaint', False): LOG.info( "Test is using firstNonBlankPaint, browser pref will be turned on" ) test['preferences'][ 'dom.performance.time_to_non_blank_paint.enabled'] = True test['setup'] = utils.interpolate(test['setup']) test['cleanup'] = utils.interpolate(test['cleanup']) if not test.get('profile', False): test['profile'] = config.get('profile') if mozinfo.os == 'win': browser_config['extra_args'] = ['-wait-for-browser', '-no-deelevate'] else: browser_config['extra_args'] = [] # pass --no-remote to firefox launch, if --develop is specified # we do that to allow locally the user to have another running firefox # instance if browser_config['develop']: browser_config['extra_args'].append('--no-remote') # Pass subtests filter argument via a preference if browser_config['subtests']: browser_config['preferences']['talos.subtests'] = browser_config[ 'subtests'] # If --code-coverage files are expected, set flag in browser config so ffsetup knows # that it needs to delete any ccov files resulting from browser initialization # NOTE: This is only supported in production; local setup of ccov folders and # data collection not supported yet, so if attempting to run with --code-coverage # flag locally, that is not supported yet if config.get('code_coverage', False): if browser_config['develop']: raise TalosError('Aborting: talos --code-coverage flag is only ' 'supported in production') else: browser_config['code_coverage'] = True # set defaults testdate = config.get('testdate', '') # get the process name from the path to the browser if not browser_config['process']: browser_config['process'] = \ os.path.basename(browser_config['browser_path']) # fix paths to substitute # `os.path.dirname(os.path.abspath(__file__))` for ${talos} # https://bugzilla.mozilla.org/show_bug.cgi?id=705809 browser_config['extensions'] = [ utils.interpolate(i) for i in browser_config['extensions'] ] browser_config['bcontroller_config'] = \ utils.interpolate(browser_config['bcontroller_config']) # normalize browser path to work across platforms browser_config['browser_path'] = \ os.path.normpath(browser_config['browser_path']) binary = browser_config["browser_path"] version_info = mozversion.get_version(binary=binary) browser_config['browser_name'] = version_info['application_name'] browser_config['browser_version'] = version_info['application_version'] browser_config['buildid'] = version_info['application_buildid'] try: browser_config['repository'] = version_info['application_repository'] browser_config['sourcestamp'] = version_info['application_changeset'] except KeyError: if not browser_config['develop']: print("Abort: unable to find changeset or repository: %s" % version_info) sys.exit(1) else: browser_config['repository'] = 'develop' browser_config['sourcestamp'] = 'develop' # get test date in seconds since epoch if testdate: date = int( time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) LOG.debug("using testdate: %d" % date) LOG.debug("actual date: %d" % int(time.time())) # results container talos_results = TalosResults() # results links if not browser_config['develop'] and not config['gecko_profile']: results_urls = dict( # another hack; datazilla stands for Perfherder # and do not require url, but a non empty dict is required... output_urls=['local.json'], ) else: # local mode, output to files results_urls = dict(output_urls=[os.path.abspath('local.json')]) httpd = setup_webserver(browser_config['webserver']) httpd.start() # legacy still required for perfherder data talos_results.add_extra_option('e10s') talos_results.add_extra_option('stylo') # measuring the difference of a a certain thread level if config.get('stylothreads', 0) > 0: talos_results.add_extra_option('%s_thread' % config['stylothreads']) if config['gecko_profile']: talos_results.add_extra_option('geckoProfile') # some tests use mitmproxy to playback pages mitmproxy_recordings_list = config.get('mitmproxy', False) if mitmproxy_recordings_list is not False: # needed so can tell talos ttest to allow external connections browser_config['mitmproxy'] = True # start mitmproxy playback; this also generates the CA certificate mitmdump_path = config.get('mitmdumpPath', False) if mitmdump_path is False: # cannot continue, need path for mitmdump playback tool raise TalosError( 'Aborting: mitmdumpPath not provided on cmd line but is required' ) mitmproxy_recording_path = os.path.join(here, 'mitmproxy') mitmproxy_proc = mitmproxy.start_mitmproxy_playback( mitmdump_path, mitmproxy_recording_path, mitmproxy_recordings_list.split(), browser_config['browser_path']) # install the generated CA certificate into Firefox # mitmproxy cert setup needs path to mozharness install; mozharness has set this mitmproxy.install_mitmproxy_cert(mitmproxy_proc, browser_config['browser_path']) testname = None # run the tests timer = utils.Timer() LOG.suite_start(tests=[test['name'] for test in tests]) try: for test in tests: testname = test['name'] LOG.test_start(testname) if not test.get('url'): # set browser prefs for pageloader test setings (doesn't use cmd line args / url) test['url'] = None set_tp_preferences(test, browser_config) mytest = TTest() # some tests like ts_paint return multiple results in a single iteration if test.get('firstpaint', False) or test.get('userready', None): # we need a 'testeventmap' to tell us which tests each event should map to multi_value_result = None separate_results_list = [] test_event_map = test.get('testeventmap', None) if test_event_map is None: raise TalosError("Need 'testeventmap' in test.py for %s" % test.get('name')) # run the test multi_value_result = mytest.runTest(browser_config, test) if multi_value_result is None: raise TalosError("Abort: no results returned for %s" % test.get('name')) # parse out the multi-value results, and 'fake it' to appear like separate tests separate_results_list = convert_to_separate_test_results( multi_value_result, test_event_map) # now we have three separate test results, store them for test_result in separate_results_list: talos_results.add(test_result) # some tests like bloom_basic run two separate tests and then compare those values # we want the results in perfherder to only be the actual difference between those # and store the base and reference test replicates in results.json for upload elif test.get('base_vs_ref', False): # run the test, results will be reported for each page like two tests in the suite base_and_reference_results = mytest.runTest( browser_config, test) # now compare each test, and create a new test object for the comparison talos_results.add( make_comparison_result(base_and_reference_results)) else: # just expecting regular test - one result value per iteration talos_results.add(mytest.runTest(browser_config, test)) LOG.test_end(testname, status='OK') except TalosRegression as exc: LOG.error("Detected a regression for %s" % testname) # by returning 1, we report an orange to buildbot # http://docs.buildbot.net/latest/developer/results.html LOG.test_end(testname, status='FAIL', message=str(exc), stack=traceback.format_exc()) return 1 except Exception as exc: # NOTE: if we get into this condition, talos has an internal # problem and cannot continue # this will prevent future tests from running LOG.test_end(testname, status='ERROR', message=str(exc), stack=traceback.format_exc()) # indicate a failure to buildbot, turn the job red return 2 finally: LOG.suite_end() httpd.stop() LOG.info("Completed test suite (%s)" % timer.elapsed()) # if mitmproxy was used for page playback, stop it if mitmproxy_recordings_list is not False: mitmproxy.stop_mitmproxy_playback(mitmproxy_proc) # output results if results_urls and not browser_config['no_upload_results']: talos_results.output(results_urls) if browser_config['develop'] or config['gecko_profile']: print("Thanks for running Talos locally. Results are in %s" % (results_urls['output_urls'])) # when running talos locally with gecko profiling on, use the view-gecko-profile # tool to automatically load the latest gecko profile in perf-html.io if config['gecko_profile'] and browser_config['develop']: if os.environ.get('DISABLE_PROFILE_LAUNCH', '0') == '1': LOG.info( "Not launching perf-html.io because DISABLE_PROFILE_LAUNCH=1") else: view_gecko_profile(config['browser_path']) # we will stop running tests on a failed test, or we will return 0 for # green return 0
def test_timer(self): timer = utils.Timer() timer._start_time -= 3 # remove three seconds for the test self.assertEquals(timer.elapsed(), '00:00:03')