def get_test(config, global_overrides, counters, test_instance): mozAfterPaint = getattr(test_instance, 'tpmozafterpaint', None) test_instance.update(**global_overrides) # update original value of mozAfterPaint, this could be 'false', # so check for None if mozAfterPaint is not None: test_instance.tpmozafterpaint = mozAfterPaint # fix up url url = getattr(test_instance, 'url', None) if url: test_instance.url = utils.interpolate(convert_url(config, url)) # fix up tpmanifest tpmanifest = getattr(test_instance, 'tpmanifest', None) if tpmanifest: test_instance.tpmanifest = \ build_manifest(config, utils.interpolate(tpmanifest)) # add any counters if counters: keys = ('linux_counters', 'mac_counters', 'win_counters', 'w7_counters', 'xperf_counters') for key in keys: if key not in test_instance.keys: # only populate attributes that will be output continue if not isinstance(getattr(test_instance, key, None), list): setattr(test_instance, key, []) _counters = getattr(test_instance, key) _counters.extend( [counter for counter in counters if counter not in _counters]) return dict(test_instance.items())
def get_test(config, global_overrides, counters, test_instance): mozAfterPaint = getattr(test_instance, 'tpmozafterpaint', None) test_instance.update(**global_overrides) # update original value of mozAfterPaint, this could be 'false', # so check for None if mozAfterPaint is not None: test_instance.tpmozafterpaint = mozAfterPaint # fix up url url = getattr(test_instance, 'url', None) if url: test_instance.url = utils.interpolate(convert_url(config, url)) # fix up tpmanifest tpmanifest = getattr(test_instance, 'tpmanifest', None) if tpmanifest: test_instance.tpmanifest = \ build_manifest(config, utils.interpolate(tpmanifest)) # add any counters if counters: keys = ('linux_counters', 'mac_counters', 'win_counters', 'w7_counters', 'xperf_counters') for key in keys: if key not in test_instance.keys: # only populate attributes that will be output continue if not isinstance(getattr(test_instance, key, None), list): setattr(test_instance, key, []) _counters = getattr(test_instance, key) _counters.extend([counter for counter in counters if counter not in _counters]) return dict(test_instance.items())
def get_test(config, global_overrides, counters, test_instance): mozAfterPaint = getattr(test_instance, "tpmozafterpaint", None) hero = getattr(test_instance, "tphero", None) firstPaint = getattr(test_instance, "firstpaint", None) userReady = getattr(test_instance, "userready", None) firstNonBlankPaint = getattr(test_instance, "fnbpaint", None) pdfPaint = getattr(test_instance, "pdfpaint", None) test_instance.update(**global_overrides) # update original value of mozAfterPaint, this could be 'false', # so check for None if mozAfterPaint is not None: test_instance.tpmozafterpaint = mozAfterPaint if firstNonBlankPaint is not None: test_instance.fnbpaint = firstNonBlankPaint if firstPaint is not None: test_instance.firstpaint = firstPaint if userReady is not None: test_instance.userready = userReady if hero is not None: test_instance.tphero = hero if pdfPaint is not None: test_instance.pdfpaint = pdfPaint # fix up url url = getattr(test_instance, "url", None) if url: test_instance.url = utils.interpolate(convert_url(config, url)) # fix up tpmanifest tpmanifest = getattr(test_instance, "tpmanifest", None) if tpmanifest: is_multidomain = getattr(test_instance, "multidomain", False) test_instance.tpmanifest = build_manifest( config, is_multidomain, utils.interpolate(tpmanifest) ) # add any counters if counters: keys = ( "linux_counters", "mac_counters", "win_counters", "w7_counters", "xperf_counters", ) for key in keys: if key not in test_instance.keys: # only populate attributes that will be output continue if not isinstance(getattr(test_instance, key, None), list): setattr(test_instance, key, []) _counters = getattr(test_instance, key) _counters.extend( [counter for counter in counters if counter not in _counters] ) return dict(test_instance.items())
def _init_profile(self): preferences = dict(self.browser_config['preferences']) if self.test_config.get('preferences'): test_prefs = dict([ (i, utils.parse_pref(j)) for i, j in self.test_config['preferences'].items() ]) preferences.update(test_prefs) # interpolate webserver value in prefs webserver = self.browser_config['webserver'] if '://' not in webserver: webserver = 'http://' + webserver for name, value in preferences.items(): if type(value) is str: value = utils.interpolate(value, webserver=webserver) preferences[name] = value extensions = self.browser_config['extensions'][:] if self.test_config.get('extensions'): extensions.append(self.test_config['extensions']) # downloading a profile instead of using the empty one if self.test_config['profile'] is not None: path = heavy.download_profile(self.test_config['profile']) self.test_config['profile_path'] = path profile = Profile.clone(os.path.normpath( self.test_config['profile_path']), self.profile_dir, restore=False) profile.set_preferences(preferences) # installing addons profile.addon_manager.install_addons(extensions) # installing webextensions webextensions = self.test_config.get('webextensions', None) if isinstance(webextensions, basestring): webextensions = [webextensions] if webextensions is not None: for webext in webextensions: filename = utils.interpolate(webext) if mozinfo.os == 'win': filename = filename.replace('/', '\\') if not filename.endswith('.xpi'): continue if not os.path.exists(filename): continue profile.addon_manager.install_from_path(filename)
def _init_profile(self): preferences = dict(self.browser_config['preferences']) if self.test_config.get('preferences'): test_prefs = dict( [(i, utils.parse_pref(j)) for i, j in self.test_config['preferences'].items()] ) preferences.update(test_prefs) # interpolate webserver value in prefs webserver = self.browser_config['webserver'] if '://' not in webserver: webserver = 'http://' + webserver for name, value in preferences.items(): if type(value) is str: value = utils.interpolate(value, webserver=webserver) preferences[name] = value extensions = self.browser_config['extensions'][:] if self.test_config.get('extensions'): extensions.append(self.test_config['extensions']) profile = Profile.clone( os.path.normpath(self.test_config['profile_path']), self.profile_dir, restore=False) profile.set_preferences(preferences) profile.addon_manager.install_addons(extensions)
def _init_profile(self): preferences = dict(self.browser_config['preferences']) if self.test_config.get('preferences'): test_prefs = dict([ (i, utils.parse_pref(j)) for i, j in self.test_config['preferences'].items() ]) preferences.update(test_prefs) # interpolate webserver value in prefs webserver = self.browser_config['webserver'] if '://' not in webserver: webserver = 'http://' + webserver for name, value in preferences.items(): if type(value) is str: value = utils.interpolate(value, webserver=webserver) preferences[name] = value extensions = self.browser_config['extensions'][:] if self.test_config.get('extensions'): extensions.append(self.test_config['extensions']) if self.browser_config['develop'] or \ self.browser_config['branch_name'] == 'Try': extensions = [os.path.dirname(i) for i in extensions] profile = Profile.clone(os.path.normpath( self.test_config['profile_path']), self.profile_dir, restore=False) profile.set_preferences(preferences) profile.addon_manager.install_addons(extensions)
def get_test(config, global_overrides, counters, test_instance): mozAfterPaint = getattr(test_instance, "tpmozafterpaint", None) # add test_name_extension to config if mozAfterPaint: test_instance.test_name_extension = "_paint" test_instance.update(**global_overrides) # update original value of mozAfterPaint, this could be 'false', # so check for None if mozAfterPaint is not None: test_instance.tpmozafterpaint = mozAfterPaint # fix up url url = getattr(test_instance, "url", None) if url: test_instance.url = utils.interpolate(convert_url(config, url)) # fix up tpmanifest tpmanifest = getattr(test_instance, "tpmanifest", None) if tpmanifest and config.get("develop"): test_instance.tpmanifest = build_manifest(config, utils.interpolate(tpmanifest)) # add any counters if counters: keys = ("linux_counters", "mac_counters", "win_counters", "w7_counters", "xperf_counters") for key in keys: if key not in test_instance.keys: # only populate attributes that will be output continue if not isinstance(getattr(test_instance, key, None), list): setattr(test_instance, key, []) _counters = getattr(test_instance, key) _counters.extend([counter for counter in counters if counter not in _counters]) return dict(test_instance.items())
def test_interpolate_custom_placeholders(self): self.assertEquals(utils.interpolate('${talos} ${foo} abc', foo='bar', unused=1), utils.here + ' bar abc')
def test_interpolate_talos_is_always_defines(self): self.assertEquals(utils.interpolate('${talos}'), utils.here)
def _init_profile(self): preferences = dict(self.browser_config['preferences']) if self.test_config.get('preferences'): test_prefs = dict( [(i, utils.parse_pref(j)) for i, j in self.test_config['preferences'].items()] ) preferences.update(test_prefs) # interpolate webserver value in prefs webserver = self.browser_config['webserver'] if '://' not in webserver: webserver = 'http://' + webserver for name, value in preferences.items(): if type(value) is str: value = utils.interpolate(value, webserver=webserver) preferences[name] = value extensions = self.browser_config['extensions'][:] if self.test_config.get('extensions'): extensions.append(self.test_config['extensions']) # downloading a profile instead of using the empty one if self.test_config['profile'] is not None: path = heavy.download_profile(self.test_config['profile']) self.test_config['profile_path'] = path profile_path = os.path.normpath(self.test_config['profile_path']) LOG.info("Cloning profile located at %s" % profile_path) def _feedback(directory, content): # Called by shutil.copytree on each visited directory. # Used here to display info. # # Returns the items that should be ignored by # shutil.copytree when copying the tree, so always returns # an empty list. sub = directory.split(profile_path)[-1].lstrip("/") if sub: LOG.info("=> %s" % sub) return [] profile = Profile.clone(profile_path, self.profile_dir, ignore=_feedback, restore=False) profile.set_preferences(preferences) # installing addons LOG.info("Installing Add-ons") profile.addon_manager.install_addons(extensions) # installing webextensions webextensions = self.test_config.get('webextensions', None) if isinstance(webextensions, basestring): webextensions = [webextensions] if webextensions is not None: LOG.info("Installing Webextensions") for webext in webextensions: filename = utils.interpolate(webext) if mozinfo.os == 'win': filename = filename.replace('/', '\\') if not filename.endswith('.xpi'): continue if not os.path.exists(filename): continue profile.addon_manager.install_from_path(filename)
def _init_profile(self): preferences = dict(self.browser_config['preferences']) if self.test_config.get('preferences'): test_prefs = dict( [(i, utils.parse_pref(j)) for i, j in self.test_config['preferences'].items()] ) preferences.update(test_prefs) # interpolate webserver value in prefs webserver = self.browser_config['webserver'] if '://' not in webserver: webserver = 'http://' + webserver for name, value in preferences.items(): if type(value) is str: value = utils.interpolate(value, webserver=webserver) preferences[name] = value extensions = self.browser_config['extensions'][:] if self.test_config.get('extensions'): extensions.extend(self.test_config['extensions']) # downloading a profile instead of using the empty one if self.test_config['profile'] is not None: path = heavy.download_profile(self.test_config['profile']) self.test_config['profile_path'] = path profile_path = os.path.normpath(self.test_config['profile_path']) LOG.info("Cloning profile located at %s" % profile_path) def _feedback(directory, content): # Called by shutil.copytree on each visited directory. # Used here to display info. # # Returns the items that should be ignored by # shutil.copytree when copying the tree, so always returns # an empty list. sub = directory.split(profile_path)[-1].lstrip("/") if sub: LOG.info("=> %s" % sub) return [] profile = Profile.clone(profile_path, self.profile_dir, ignore=_feedback, restore=False) profile.set_preferences(preferences) # installing addons LOG.info("Installing Add-ons:") LOG.info(extensions) profile.addon_manager.install_addons(extensions) # installing webextensions webextensions = self.test_config.get('webextensions', None) if isinstance(webextensions, basestring): webextensions = [webextensions] if webextensions is not None: LOG.info("Installing Webextensions:") for webext in webextensions: filename = utils.interpolate(webext) if mozinfo.os == 'win': filename = filename.replace('/', '\\') if not filename.endswith('.xpi'): continue if not os.path.exists(filename): continue LOG.info(filename) profile.addon_manager.install_from_path(filename)
def run_tests(config, browser_config): """Runs the talos tests on the given configuration and generates a report. """ # get the test data tests = config['tests'] tests = useBaseTestDefaults(config.get('basetest', {}), tests) paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup'] for test in tests: # Check for profile_path, tpmanifest and interpolate based on Talos # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711 # Build command line from config for path in paths: if test.get(path): if path == 'extensions': for _index, _ext in enumerate(test['extensions']): test['extensions'][_index] = utils.interpolate(_ext) else: test[path] = utils.interpolate(test[path]) if test.get('tpmanifest'): test['tpmanifest'] = \ os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'], '/\\t:\\'))) test['preferences']['talos.tpmanifest'] = test['tpmanifest'] # if using firstNonBlankPaint, set test preference for it # so that the browser pref will be turned on (in ffsetup) if test.get('fnbpaint', False): LOG.info( "Test is using firstNonBlankPaint, browser pref will be turned on" ) test['preferences'][ 'dom.performance.time_to_non_blank_paint.enabled'] = True test['setup'] = utils.interpolate(test['setup']) test['cleanup'] = utils.interpolate(test['cleanup']) if not test.get('profile', False): test['profile'] = config.get('profile') if mozinfo.os == 'win': browser_config['extra_args'] = ['-wait-for-browser', '-no-deelevate'] else: browser_config['extra_args'] = [] # pass --no-remote to firefox launch, if --develop is specified # we do that to allow locally the user to have another running firefox # instance if browser_config['develop']: browser_config['extra_args'].append('--no-remote') # Pass subtests filter argument via a preference if browser_config['subtests']: browser_config['preferences']['talos.subtests'] = browser_config[ 'subtests'] # If --code-coverage files are expected, set flag in browser config so ffsetup knows # that it needs to delete any ccov files resulting from browser initialization # NOTE: This is only supported in production; local setup of ccov folders and # data collection not supported yet, so if attempting to run with --code-coverage # flag locally, that is not supported yet if config.get('code_coverage', False): if browser_config['develop']: raise TalosError('Aborting: talos --code-coverage flag is only ' 'supported in production') else: browser_config['code_coverage'] = True # set defaults testdate = config.get('testdate', '') # get the process name from the path to the browser if not browser_config['process']: browser_config['process'] = \ os.path.basename(browser_config['browser_path']) # fix paths to substitute # `os.path.dirname(os.path.abspath(__file__))` for ${talos} # https://bugzilla.mozilla.org/show_bug.cgi?id=705809 browser_config['extensions'] = [ utils.interpolate(i) for i in browser_config['extensions'] ] browser_config['bcontroller_config'] = \ utils.interpolate(browser_config['bcontroller_config']) # normalize browser path to work across platforms browser_config['browser_path'] = \ os.path.normpath(browser_config['browser_path']) binary = browser_config["browser_path"] version_info = mozversion.get_version(binary=binary) browser_config['browser_name'] = version_info['application_name'] browser_config['browser_version'] = version_info['application_version'] browser_config['buildid'] = version_info['application_buildid'] try: browser_config['repository'] = version_info['application_repository'] browser_config['sourcestamp'] = version_info['application_changeset'] except KeyError: if not browser_config['develop']: print("Abort: unable to find changeset or repository: %s" % version_info) sys.exit(1) else: browser_config['repository'] = 'develop' browser_config['sourcestamp'] = 'develop' # get test date in seconds since epoch if testdate: date = int( time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) LOG.debug("using testdate: %d" % date) LOG.debug("actual date: %d" % int(time.time())) # results container talos_results = TalosResults() # results links if not browser_config['develop'] and not config['gecko_profile']: results_urls = dict( # another hack; datazilla stands for Perfherder # and do not require url, but a non empty dict is required... output_urls=['local.json'], ) else: # local mode, output to files results_urls = dict(output_urls=[os.path.abspath('local.json')]) httpd = setup_webserver(browser_config['webserver']) httpd.start() # legacy still required for perfherder data talos_results.add_extra_option('e10s') talos_results.add_extra_option('stylo') # measuring the difference of a a certain thread level if config.get('stylothreads', 0) > 0: talos_results.add_extra_option('%s_thread' % config['stylothreads']) if config['gecko_profile']: talos_results.add_extra_option('geckoProfile') # some tests use mitmproxy to playback pages mitmproxy_recordings_list = config.get('mitmproxy', False) if mitmproxy_recordings_list is not False: # needed so can tell talos ttest to allow external connections browser_config['mitmproxy'] = True # start mitmproxy playback; this also generates the CA certificate mitmdump_path = config.get('mitmdumpPath', False) if mitmdump_path is False: # cannot continue, need path for mitmdump playback tool raise TalosError( 'Aborting: mitmdumpPath not provided on cmd line but is required' ) mitmproxy_recording_path = os.path.join(here, 'mitmproxy') mitmproxy_proc = mitmproxy.start_mitmproxy_playback( mitmdump_path, mitmproxy_recording_path, mitmproxy_recordings_list.split(), browser_config['browser_path']) # install the generated CA certificate into Firefox # mitmproxy cert setup needs path to mozharness install; mozharness has set this mitmproxy.install_mitmproxy_cert(mitmproxy_proc, browser_config['browser_path']) testname = None # run the tests timer = utils.Timer() LOG.suite_start(tests=[test['name'] for test in tests]) try: for test in tests: testname = test['name'] LOG.test_start(testname) if not test.get('url'): # set browser prefs for pageloader test setings (doesn't use cmd line args / url) test['url'] = None set_tp_preferences(test, browser_config) mytest = TTest() # some tests like ts_paint return multiple results in a single iteration if test.get('firstpaint', False) or test.get('userready', None): # we need a 'testeventmap' to tell us which tests each event should map to multi_value_result = None separate_results_list = [] test_event_map = test.get('testeventmap', None) if test_event_map is None: raise TalosError("Need 'testeventmap' in test.py for %s" % test.get('name')) # run the test multi_value_result = mytest.runTest(browser_config, test) if multi_value_result is None: raise TalosError("Abort: no results returned for %s" % test.get('name')) # parse out the multi-value results, and 'fake it' to appear like separate tests separate_results_list = convert_to_separate_test_results( multi_value_result, test_event_map) # now we have three separate test results, store them for test_result in separate_results_list: talos_results.add(test_result) # some tests like bloom_basic run two separate tests and then compare those values # we want the results in perfherder to only be the actual difference between those # and store the base and reference test replicates in results.json for upload elif test.get('base_vs_ref', False): # run the test, results will be reported for each page like two tests in the suite base_and_reference_results = mytest.runTest( browser_config, test) # now compare each test, and create a new test object for the comparison talos_results.add( make_comparison_result(base_and_reference_results)) else: # just expecting regular test - one result value per iteration talos_results.add(mytest.runTest(browser_config, test)) LOG.test_end(testname, status='OK') except TalosRegression as exc: LOG.error("Detected a regression for %s" % testname) # by returning 1, we report an orange to buildbot # http://docs.buildbot.net/latest/developer/results.html LOG.test_end(testname, status='FAIL', message=str(exc), stack=traceback.format_exc()) return 1 except Exception as exc: # NOTE: if we get into this condition, talos has an internal # problem and cannot continue # this will prevent future tests from running LOG.test_end(testname, status='ERROR', message=str(exc), stack=traceback.format_exc()) # indicate a failure to buildbot, turn the job red return 2 finally: LOG.suite_end() httpd.stop() LOG.info("Completed test suite (%s)" % timer.elapsed()) # if mitmproxy was used for page playback, stop it if mitmproxy_recordings_list is not False: mitmproxy.stop_mitmproxy_playback(mitmproxy_proc) # output results if results_urls and not browser_config['no_upload_results']: talos_results.output(results_urls) if browser_config['develop'] or config['gecko_profile']: print("Thanks for running Talos locally. Results are in %s" % (results_urls['output_urls'])) # when running talos locally with gecko profiling on, use the view-gecko-profile # tool to automatically load the latest gecko profile in perf-html.io if config['gecko_profile'] and browser_config['develop']: if os.environ.get('DISABLE_PROFILE_LAUNCH', '0') == '1': LOG.info( "Not launching perf-html.io because DISABLE_PROFILE_LAUNCH=1") else: view_gecko_profile(config['browser_path']) # we will stop running tests on a failed test, or we will return 0 for # green return 0
def _runTest(self, browser_config, test_config, setup): minidump_dir = os.path.join(setup.profile_dir, 'minidumps') counters = test_config.get('%s_counters' % self._get_counter_prefix(), []) resolution = test_config['resolution'] # add the mainthread_io to the environment variable, as defined # in test.py configs here = os.path.dirname(os.path.realpath(__file__)) if test_config['mainthread']: mainthread_io = os.path.join(here, 'mainthread_io.log') setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io # Stylo is on by default setup.env['STYLO_FORCE_ENABLED'] = '1' # During the Stylo transition, measure different number of threads if browser_config.get('stylothreads', 0) > 0: setup.env['STYLO_THREADS'] = str(browser_config['stylothreads']) # set url if there is one (i.e. receiving a test page, not a manifest/pageloader test) if test_config.get('url', None) is not None: test_config['url'] = utils.interpolate( test_config['url'], profile=setup.profile_dir, firefox=browser_config['browser_path']) else: setup.env['MOZ_USE_PAGELOADER'] = '1' # setup global (cross-cycle) responsiveness counters global_counters = {} if browser_config.get('xperf_path'): for c in test_config.get('xperf_counters', []): global_counters[c] = [] if test_config.get('responsiveness') and \ platform.system() != 'Darwin': # ignore osx for now as per bug 1245793 setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1' setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20' setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10' global_counters['responsiveness'] = [] setup.env['JSGC_DISABLE_POISONING'] = '1' setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1' # instantiate an object to hold test results test_results = results.TestResults(test_config, global_counters, browser_config.get('framework')) for i in range(test_config['cycles']): time.sleep(0.25) LOG.info('Running cycle %d/%d for %s test...' % (i + 1, test_config['cycles'], test_config['name'])) # remove the browser error file mozfile.remove(browser_config['error_filename']) # individual tests can have different frameworks # TODO: ensure that we don't run >1 test with custom frameworks if test_config.get('perfherder_framework', None) is not None: test_results.framework = test_config['perfherder_framework'] # reinstall any file whose stability we need to ensure across # the cycles if test_config.get('reinstall', ''): for keep in test_config['reinstall']: origin = os.path.join(test_config['profile_path'], keep) dest = os.path.join(setup.profile_dir, keep) LOG.debug('Reinstalling %s on top of %s' % (origin, dest)) shutil.copy(origin, dest) # Run the test timeout = test_config.get('timeout', 7200) # 2 hours default if setup.gecko_profile: # When profiling, give the browser some extra time # to dump the profile. timeout += 5 * 60 # store profiling info for pageloader; too late to add it as browser pref setup.env["TPPROFILINGINFO"] = json.dumps( setup.gecko_profile.profiling_info) command_args = utils.GenerateBrowserCommandLine( browser_config['browser_path'], browser_config['extra_args'], setup.profile_dir, test_config['url'], profiling_info=(setup.gecko_profile.profiling_info if setup.gecko_profile else None)) mainthread_error_count = 0 if test_config['setup']: # Generate bcontroller.json for xperf talosconfig.generateTalosConfig(command_args, browser_config, test_config) subprocess.call(['python'] + test_config['setup'].split(), ) counter_management = None if counters: counter_management = CounterManagement( browser_config['process'], counters, resolution) try: pcontext = run_browser( command_args, minidump_dir, timeout=timeout, env=setup.env, # start collecting counters as soon as possible on_started=(counter_management.start if counter_management else None), debug=browser_config['debug'], debugger=browser_config['debugger'], debugger_args=browser_config['debugger_args']) except Exception: self.check_for_crashes(browser_config, minidump_dir, test_config['name']) raise finally: if counter_management: counter_management.stop() if test_config['mainthread']: rawlog = os.path.join(here, 'mainthread_io.log') if os.path.exists(rawlog): processedlog = \ os.path.join(here, 'mainthread_io.json') xre_path = \ os.path.dirname(browser_config['browser_path']) mtio_py = os.path.join(here, 'mainthreadio.py') command = [ 'python', mtio_py, rawlog, processedlog, xre_path ] mtio = subprocess.Popen(command, env=os.environ.copy(), stdout=subprocess.PIPE) output, stderr = mtio.communicate() for line in output.split('\n'): if line.strip() == '': continue print(line) mainthread_error_count += 1 mozfile.remove(rawlog) if test_config['cleanup']: # HACK: add the pid to support xperf where we require # the pid in post processing talosconfig.generateTalosConfig(command_args, browser_config, test_config, pid=pcontext.pid) subprocess.call([sys.executable] + test_config['cleanup'].split()) # For startup tests, we launch the browser multiple times # with the same profile for fname in ('sessionstore.js', '.parentlock', 'sessionstore.bak'): mozfile.remove(os.path.join(setup.profile_dir, fname)) # check for xperf errors if os.path.exists(browser_config['error_filename']) or \ mainthread_error_count > 0: raise TalosRegression( 'Talos has found a regression, if you have questions' ' ask for help in irc on #perf') # add the results from the browser output if not run_in_debug_mode(browser_config): test_results.add( '\n'.join(pcontext.output), counter_results=(counter_management.results() if counter_management else None)) if setup.gecko_profile: setup.gecko_profile.symbolicate(i) self.check_for_crashes(browser_config, minidump_dir, test_config['name']) # include global (cross-cycle) counters test_results.all_counter_results.extend([{ key: value } for key, value in global_counters.items()]) for c in test_results.all_counter_results: for key, value in c.items(): LOG.debug('COUNTER %r: %s' % (key, value)) # if running against a code-coverage instrumented build, move the # produced gcda files to a folder where they will be collected later if browser_config.get('code_coverage', False): setup.collect_or_clean_ccov() # return results return test_results
def run_tests(config, browser_config): """Runs the talos tests on the given configuration and generates a report.""" # get the test data tests = config["tests"] tests = useBaseTestDefaults(config.get("basetest", {}), tests) paths = ["profile_path", "tpmanifest", "extensions", "setup", "cleanup"] for test in tests: # Check for profile_path, tpmanifest and interpolate based on Talos # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711 # Build command line from config for path in paths: if test.get(path): if path == "extensions": for _index, _ext in enumerate(test["extensions"]): test["extensions"][_index] = utils.interpolate(_ext) else: test[path] = utils.interpolate(test[path]) if test.get("tpmanifest"): test["tpmanifest"] = os.path.normpath( "file:/%s" % (six.moves.urllib.parse.quote(test["tpmanifest"], "/\\t:\\"))) test["preferences"]["talos.tpmanifest"] = test["tpmanifest"] # if using firstNonBlankPaint, set test preference for it # so that the browser pref will be turned on (in ffsetup) if test.get("fnbpaint", False): LOG.info( "Test is using firstNonBlankPaint, browser pref will be turned on" ) test["preferences"][ "dom.performance.time_to_non_blank_paint.enabled"] = True test["setup"] = utils.interpolate(test["setup"]) test["cleanup"] = utils.interpolate(test["cleanup"]) if not test.get("profile", False): test["profile"] = config.get("profile") if mozinfo.os == "win": browser_config["extra_args"] = ["-wait-for-browser", "-no-deelevate"] else: browser_config["extra_args"] = [] # pass --no-remote to firefox launch, if --develop is specified # we do that to allow locally the user to have another running firefox # instance if browser_config["develop"]: browser_config["extra_args"].append("--no-remote") # Pass subtests filter argument via a preference if browser_config["subtests"]: browser_config["preferences"]["talos.subtests"] = browser_config[ "subtests"] if browser_config.get("enable_fission", False): browser_config["preferences"]["fission.autostart"] = True browser_config["preferences"][ "dom.serviceWorkers.parent_intercept"] = True browser_config["preferences"]["network.proxy.type"] = 2 browser_config["preferences"]["network.proxy.autoconfig_url"] = ( """data:text/plain, function FindProxyForURL(url, host) { if (url.startsWith('http')) { return 'PROXY %s'; } return 'DIRECT'; }""" % browser_config["webserver"]) # If --code-coverage files are expected, set flag in browser config so ffsetup knows # that it needs to delete any ccov files resulting from browser initialization # NOTE: This is only supported in production; local setup of ccov folders and # data collection not supported yet, so if attempting to run with --code-coverage # flag locally, that is not supported yet if config.get("code_coverage", False): if browser_config["develop"]: raise TalosError("Aborting: talos --code-coverage flag is only " "supported in production") else: browser_config["code_coverage"] = True # set defaults testdate = config.get("testdate", "") # get the process name from the path to the browser if not browser_config["process"]: browser_config["process"] = os.path.basename( browser_config["browser_path"]) # fix paths to substitute # `os.path.dirname(os.path.abspath(__file__))` for ${talos} # https://bugzilla.mozilla.org/show_bug.cgi?id=705809 browser_config["extensions"] = [ utils.interpolate(i) for i in browser_config["extensions"] ] browser_config["bcontroller_config"] = utils.interpolate( browser_config["bcontroller_config"]) # normalize browser path to work across platforms browser_config["browser_path"] = os.path.normpath( browser_config["browser_path"]) binary = browser_config["browser_path"] version_info = mozversion.get_version(binary=binary) browser_config["browser_name"] = version_info["application_name"] browser_config["browser_version"] = version_info["application_version"] browser_config["buildid"] = version_info["application_buildid"] try: browser_config["repository"] = version_info["application_repository"] browser_config["sourcestamp"] = version_info["application_changeset"] except KeyError: if not browser_config["develop"]: print("Abort: unable to find changeset or repository: %s" % version_info) sys.exit(1) else: browser_config["repository"] = "develop" browser_config["sourcestamp"] = "develop" # get test date in seconds since epoch if testdate: date = int( time.mktime(time.strptime(testdate, "%a, %d %b %Y %H:%M:%S GMT"))) else: date = int(time.time()) LOG.debug("using testdate: %d" % date) LOG.debug("actual date: %d" % int(time.time())) # results container talos_results = TalosResults() # results links if not browser_config["develop"] and not config["gecko_profile"]: results_urls = dict( # another hack; datazilla stands for Perfherder # and do not require url, but a non empty dict is required... output_urls=["local.json"], ) else: # local mode, output to files results_urls = dict(output_urls=[os.path.abspath("local.json")]) httpd = setup_webserver(browser_config["webserver"]) httpd.start() # legacy still required for perfherder data talos_results.add_extra_option("e10s") talos_results.add_extra_option("stylo") # measuring the difference of a a certain thread level if config.get("stylothreads", 0) > 0: talos_results.add_extra_option("%s_thread" % config["stylothreads"]) if config["gecko_profile"]: talos_results.add_extra_option("gecko-profile") # differentiate fission vs non-fission results in perfherder if browser_config.get("enable_fission", False): talos_results.add_extra_option("fission") # differentiate webrender from non-webrender results if browser_config["preferences"].get("gfx.webrender.software", False): talos_results.add_extra_option("webrender-sw") elif browser_config.get("enable_webrender", False): talos_results.add_extra_option("webrender") # differentiate webgl from webgl-ipc results if browser_config["preferences"].get("webgl.out-of-process", False): talos_results.add_extra_option("webgl-ipc") testname = None # run the tests timer = utils.Timer() LOG.suite_start(tests=[test["name"] for test in tests]) try: for test in tests: testname = test["name"] LOG.test_start(testname) if not test.get("url"): # set browser prefs for pageloader test setings (doesn't use cmd line args / url) test["url"] = None set_tp_preferences(test, browser_config) mytest = TTest() # some tests like ts_paint return multiple results in a single iteration if test.get("firstpaint", False) or test.get("userready", None): # we need a 'testeventmap' to tell us which tests each event should map to multi_value_result = None separate_results_list = [] test_event_map = test.get("testeventmap", None) if test_event_map is None: raise TalosError("Need 'testeventmap' in test.py for %s" % test.get("name")) # run the test multi_value_result = mytest.runTest(browser_config, test) if multi_value_result is None: raise TalosError("Abort: no results returned for %s" % test.get("name")) # parse out the multi-value results, and 'fake it' to appear like separate tests separate_results_list = convert_to_separate_test_results( multi_value_result, test_event_map) # now we have three separate test results, store them for test_result in separate_results_list: talos_results.add(test_result) # some tests like bloom_basic run two separate tests and then compare those values # we want the results in perfherder to only be the actual difference between those # and store the base and reference test replicates in results.json for upload elif test.get("base_vs_ref", False): # run the test, results will be reported for each page like two tests in the suite base_and_reference_results = mytest.runTest( browser_config, test) # now compare each test, and create a new test object for the comparison talos_results.add( make_comparison_result(base_and_reference_results)) else: # just expecting regular test - one result value per iteration talos_results.add(mytest.runTest(browser_config, test)) LOG.test_end(testname, status="OK") except TalosRegression as exc: LOG.error("Detected a regression for %s" % testname) # by returning 1, we report an orange to buildbot # http://docs.buildbot.net/latest/developer/results.html LOG.test_end(testname, status="FAIL", message=str(exc), stack=traceback.format_exc()) return 1 except Exception as exc: # NOTE: if we get into this condition, talos has an internal # problem and cannot continue # this will prevent future tests from running LOG.test_end(testname, status="ERROR", message=str(exc), stack=traceback.format_exc()) # indicate a failure to buildbot, turn the job red return 2 finally: LOG.suite_end() httpd.stop() LOG.info("Completed test suite (%s)" % timer.elapsed()) # output results if results_urls and not browser_config["no_upload_results"]: talos_results.output(results_urls) if browser_config["develop"] or config["gecko_profile"]: print("Thanks for running Talos locally. Results are in %s" % (results_urls["output_urls"])) # when running talos locally with gecko profiling on, use the view-gecko-profile # tool to automatically load the latest gecko profile in profiler.firefox.com if config["gecko_profile"] and browser_config["develop"]: if os.environ.get("DISABLE_PROFILE_LAUNCH", "0") == "1": LOG.info( "Not launching profiler.firefox.com because DISABLE_PROFILE_LAUNCH=1" ) else: view_gecko_profile_from_talos() # we will stop running tests on a failed test, or we will return 0 for # green return 0
def _init_profile(self): extensions = self.browser_config["extensions"][:] if self.test_config.get("extensions"): extensions.extend(self.test_config["extensions"]) # downloading a profile instead of using the empty one if self.test_config["profile"] is not None: path = heavy.download_profile(self.test_config["profile"]) self.test_config["profile_path"] = path profile_path = os.path.normpath(self.test_config["profile_path"]) LOG.info("Cloning profile located at %s" % profile_path) def _feedback(directory, content): # Called by shutil.copytree on each visited directory. # Used here to display info. # # Returns the items that should be ignored by # shutil.copytree when copying the tree, so always returns # an empty list. sub = directory.split(profile_path)[-1].lstrip("/") if sub: LOG.info("=> %s" % sub) return [] profile = Profile.clone( profile_path, self.profile_dir, ignore=_feedback, restore=False ) # build pref interpolation context webserver = self.browser_config["webserver"] if "://" not in webserver: webserver = "http://" + webserver interpolation = { "webserver": webserver, } # merge base profiles with open(os.path.join(self.profile_data_dir, "profiles.json"), "r") as fh: base_profiles = json.load(fh)["talos"] for name in base_profiles: path = os.path.join(self.profile_data_dir, name) LOG.info("Merging profile: {}".format(path)) profile.merge(path, interpolation=interpolation) # set test preferences preferences = self.browser_config.get("preferences", {}).copy() if self.test_config.get("preferences"): test_prefs = dict( [ (i, utils.parse_pref(j)) for i, j in self.test_config["preferences"].items() ] ) preferences.update(test_prefs) for name, value in preferences.items(): if type(value) is str: value = utils.interpolate(value, **interpolation) preferences[name] = value profile.set_preferences(preferences) # installing addons LOG.info("Installing Add-ons:") LOG.info(extensions) profile.addons.install(extensions) # installing webextensions webextensions_to_install = [] webextensions_folder = self.test_config.get("webextensions_folder", None) if isinstance(webextensions_folder, six.string_types): folder = utils.interpolate(webextensions_folder) for file in os.listdir(folder): if file.endswith(".xpi"): webextensions_to_install.append(os.path.join(folder, file)) webextensions = self.test_config.get("webextensions", None) if isinstance(webextensions, six.string_types): webextensions_to_install.append(webextensions) if webextensions_to_install is not None: LOG.info("Installing Webextensions:") for webext in webextensions_to_install: filename = utils.interpolate(webext) if mozinfo.os == "win": filename = filename.replace("/", "\\") if not filename.endswith(".xpi"): continue if not os.path.exists(filename): continue LOG.info(filename) profile.addons.install(filename)
def _runTest(self, browser_config, test_config, setup): minidump_dir = os.path.join(setup.profile_dir, "minidumps") counters = test_config.get("%s_counters" % self._get_counter_prefix(), []) resolution = test_config["resolution"] # add the mainthread_io to the environment variable, as defined # in test.py configs here = os.path.dirname(os.path.realpath(__file__)) if test_config["mainthread"]: mainthread_io = os.path.join(here, "mainthread_io.log") setup.env["MOZ_MAIN_THREAD_IO_LOG"] = mainthread_io # Stylo is on by default setup.env["STYLO_FORCE_ENABLED"] = "1" # During the Stylo transition, measure different number of threads if browser_config.get("stylothreads", 0) > 0: setup.env["STYLO_THREADS"] = str(browser_config["stylothreads"]) if browser_config["enable_webrender"]: setup.env["MOZ_WEBRENDER"] = "1" setup.env["MOZ_ACCELERATED"] = "1" else: setup.env["MOZ_WEBRENDER"] = "0" # set url if there is one (i.e. receiving a test page, not a manifest/pageloader test) if test_config.get("url", None) is not None: test_config["url"] = utils.interpolate( test_config["url"], profile=setup.profile_dir, firefox=browser_config["browser_path"], ) else: setup.env["MOZ_USE_PAGELOADER"] = "1" # setup global (cross-cycle) responsiveness counters global_counters = {} if browser_config.get("xperf_path"): for c in test_config.get("xperf_counters", []): global_counters[c] = [] if test_config.get("responsiveness") and platform.system() != "Darwin": # ignore osx for now as per bug 1245793 setup.env["MOZ_INSTRUMENT_EVENT_LOOP"] = "1" setup.env["MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD"] = "20" setup.env["MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL"] = "10" global_counters["responsiveness"] = [] setup.env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1" # instantiate an object to hold test results test_results = results.TestResults(test_config, global_counters, browser_config.get("framework")) for i in six.moves.range(test_config["cycles"]): time.sleep(0.25) LOG.info("Running cycle %d/%d for %s test..." % (i + 1, test_config["cycles"], test_config["name"])) # remove the browser error file mozfile.remove(browser_config["error_filename"]) # individual tests can have different frameworks # TODO: ensure that we don't run >1 test with custom frameworks if test_config.get("perfherder_framework", None) is not None: test_results.framework = test_config["perfherder_framework"] # reinstall any file whose stability we need to ensure across # the cycles if test_config.get("reinstall", ""): for keep in test_config["reinstall"]: origin = os.path.join(test_config["profile_path"], keep) dest = os.path.join(setup.profile_dir, keep) LOG.debug("Reinstalling %s on top of %s" % (origin, dest)) shutil.copy(origin, dest) # Run the test timeout = test_config.get("timeout", 600) # 10 minutes default if setup.gecko_profile: # When profiling, give the browser some extra time # to dump the profile. timeout += 5 * 60 # store profiling info for pageloader; too late to add it as browser pref setup.env["TPPROFILINGINFO"] = json.dumps( setup.gecko_profile.profiling_info) command_args = utils.GenerateBrowserCommandLine( browser_config["browser_path"], browser_config["extra_args"], setup.profile_dir, test_config["url"], profiling_info=(setup.gecko_profile.profiling_info if setup.gecko_profile else None), ) mainthread_error_count = 0 if test_config["setup"]: # Generate bcontroller.json for xperf talosconfig.generateTalosConfig(command_args, browser_config, test_config) subprocess.call(["python"] + test_config["setup"].split(), ) counter_management = None if counters: counter_management = CounterManagement( browser_config["process"], counters, resolution) try: pcontext = run_browser( command_args, minidump_dir, timeout=timeout, env=setup.env, # start collecting counters as soon as possible on_started=(counter_management.start if counter_management else None), debug=browser_config["debug"], debugger=browser_config["debugger"], debugger_args=browser_config["debugger_args"], ) except Exception: self.check_for_crashes(browser_config, minidump_dir, test_config["name"]) raise finally: if counter_management: counter_management.stop() try: if test_config["mainthread"]: rawlog = os.path.join(here, "mainthread_io.log") if os.path.exists(rawlog): processedlog = os.path.join(here, "mainthread_io.json") xre_path = os.path.dirname( browser_config["browser_path"]) mtio_py = os.path.join(here, "mainthreadio.py") command = [ "python", mtio_py, rawlog, processedlog, xre_path ] mtio = subprocess.Popen(command, env=os.environ.copy(), stdout=subprocess.PIPE) output, stderr = mtio.communicate() for line in output.split(b"\n"): if line.strip() == b"": continue print(line) mainthread_error_count += 1 mozfile.remove(rawlog) if test_config["cleanup"]: # HACK: add the pid to support xperf where we require # the pid in post processing talosconfig.generateTalosConfig(command_args, browser_config, test_config, pid=pcontext.pid) subprocess.call([sys.executable] + test_config["cleanup"].split()) # For startup tests, we launch the browser multiple times # with the same profile for fname in ("sessionstore.js", ".parentlock", "sessionstore.bak"): mozfile.remove(os.path.join(setup.profile_dir, fname)) # check for xperf errors if (os.path.exists(browser_config["error_filename"]) or mainthread_error_count > 0): raise TalosRegression( "Talos has found a regression, if you have questions" " ask for help in irc on #perf") # add the results from the browser output if not run_in_debug_mode(browser_config): test_results.add( "\n".join(pcontext.output), counter_results=(counter_management.results() if counter_management else None), ) if setup.gecko_profile: setup.gecko_profile.symbolicate(i) finally: self.check_for_crashes(browser_config, minidump_dir, test_config["name"]) # include global (cross-cycle) counters test_results.all_counter_results.extend([{ key: value } for key, value in global_counters.items()]) for c in test_results.all_counter_results: for key, value in c.items(): LOG.debug("COUNTER %r: %s" % (key, value)) # if running against a code-coverage instrumented build, move the # produced gcda files to a folder where they will be collected later if browser_config.get("code_coverage", False): setup.collect_or_clean_ccov() # return results return test_results
def _init_profile(self): extensions = self.browser_config['extensions'][:] if self.test_config.get('extensions'): extensions.extend(self.test_config['extensions']) # downloading a profile instead of using the empty one if self.test_config['profile'] is not None: path = heavy.download_profile(self.test_config['profile']) self.test_config['profile_path'] = path profile_path = os.path.normpath(self.test_config['profile_path']) LOG.info("Cloning profile located at %s" % profile_path) def _feedback(directory, content): # Called by shutil.copytree on each visited directory. # Used here to display info. # # Returns the items that should be ignored by # shutil.copytree when copying the tree, so always returns # an empty list. sub = directory.split(profile_path)[-1].lstrip("/") if sub: LOG.info("=> %s" % sub) return [] profile = Profile.clone(profile_path, self.profile_dir, ignore=_feedback, restore=False) # build pref interpolation context webserver = self.browser_config['webserver'] if '://' not in webserver: webserver = 'http://' + webserver interpolation = { 'webserver': webserver, } # merge base profiles with open(os.path.join(self.profile_data_dir, 'profiles.json'), 'r') as fh: base_profiles = json.load(fh)['talos'] for name in base_profiles: path = os.path.join(self.profile_data_dir, name) LOG.info("Merging profile: {}".format(path)) profile.merge(path, interpolation=interpolation) # set test preferences preferences = self.browser_config.get('preferences', {}).copy() if self.test_config.get('preferences'): test_prefs = dict([ (i, utils.parse_pref(j)) for i, j in self.test_config['preferences'].items() ]) preferences.update(test_prefs) for name, value in preferences.items(): if type(value) is str: value = utils.interpolate(value, **interpolation) preferences[name] = value profile.set_preferences(preferences) # installing addons LOG.info("Installing Add-ons:") LOG.info(extensions) profile.addons.install(extensions) # installing webextensions webextensions = self.test_config.get('webextensions', None) if isinstance(webextensions, basestring): webextensions = [webextensions] if webextensions is not None: LOG.info("Installing Webextensions:") for webext in webextensions: filename = utils.interpolate(webext) if mozinfo.os == 'win': filename = filename.replace('/', '\\') if not filename.endswith('.xpi'): continue if not os.path.exists(filename): continue LOG.info(filename) profile.addons.install(filename)