def test_with_ini_files_on_osx(self): self._write_ini_files() platform = sys.platform sys.platform = 'darwin' try: # get_version is working with ini files next to the binary self._check_version(get_version(binary=self.binary)) # or if they are in the Resources dir # in this case the binary must be in a Contents dir, next # to the Resources dir contents_dir = os.path.join(self.tempdir, 'Contents') os.mkdir(contents_dir) moved_binary = os.path.join(contents_dir, os.path.basename(self.binary)) shutil.move(self.binary, moved_binary) resources_dir = os.path.join(self.tempdir, 'Resources') os.mkdir(resources_dir) for ini_file in ('application.ini', 'platform.ini'): shutil.move(os.path.join(self.tempdir, ini_file), resources_dir) self._check_version(get_version(binary=moved_binary)) finally: sys.platform = platform
def test_without_platform_file(self): """With a missing platform file no exception should be thrown""" with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f: f.writelines(self.application_ini) v = get_version(self.binary) self.assertTrue(isinstance(v, dict))
def __init__(self, device_serial=None): self.device_serial = device_serial self._logger = structured.get_default_logger(component='b2gmonkey') if not self._logger: self._logger = mozlog.getLogger('b2gmonkey') self.version = mozversion.get_version( dm_type='adb', device_serial=device_serial) device_id = self.version.get('device_id') if not device_id: raise B2GMonkeyError('Firefox OS device not found.') self.device_properties = DEVICE_PROPERTIES.get(device_id) if not self.device_properties: raise B2GMonkeyError('Unsupported device: \'%s\'' % device_id) android_version = self.version.get('device_firmware_version_release') if device_id == 'flame' and android_version == '4.4.2': self.device_properties.update(DEVICE_PROPERTIES.get('flame-kk')) self.temp_dir = tempfile.mkdtemp() if 'MINIDUMP_SAVE_PATH' not in os.environ: self.crash_dumps_path = os.path.join(self.temp_dir, 'crashes') os.environ['MINIDUMP_SAVE_PATH'] = self.crash_dumps_path else: self.crash_dumps_path = os.environ['MINIDUMP_SAVE_PATH']
def test_basic(self): with mozfile.NamedTemporaryFile() as f: with zipfile.ZipFile(f.name, 'w') as z: self.create_apk_zipfiles(z) v = get_version(f.name) self.assertEqual(v.get('application_changeset'), self.application_changeset) self.assertEqual(v.get('platform_changeset'), self.platform_changeset)
def test_with_package_name(self): with mozfile.NamedTemporaryFile() as f: with zipfile.ZipFile(f.name, 'w') as z: self.create_apk_zipfiles(z) z.writestr('package-name.txt', "org.mozilla.fennec") v = get_version(f.name) self.assertEqual(v.get('package_name'), "org.mozilla.fennec")
def __init__(self, marionette, datazilla_config=None, sources=None, log_level='INFO'): # Set up logging handler = mozlog.StreamHandler() handler.setFormatter(mozlog.MozFormatter(include_timestamp=True)) self.logger = mozlog.getLogger(self.__class__.__name__, handler) self.logger.setLevel(getattr(mozlog, log_level.upper())) self.marionette = marionette settings = gaiatest.GaiaData(self.marionette).all_settings mac_address = self.marionette.execute_script( 'return navigator.mozWifiManager && ' 'navigator.mozWifiManager.macAddress;') self.submit_report = True self.ancillary_data = {} self.ancillary_data['generated_by'] = 'b2gperf %s' % __version__ self.device = gaiatest.GaiaDevice(self.marionette) dm = mozdevice.DeviceManagerADB() self.device.add_device_manager(dm) version = mozversion.get_version(sources=sources, dm_type='adb') self.ancillary_data['build_revision'] = version.get('build_changeset') self.ancillary_data['gaia_revision'] = version.get('gaia_changeset') self.ancillary_data['gecko_revision'] = version.get('gecko_changeset') self.ancillary_data['ro.build.version.incremental'] = version.get( 'device_firmware_version_incremental') self.ancillary_data['ro.build.version.release'] = version.get( 'device_firmware_version_release') self.ancillary_data['ro.build.date.utc'] = version.get( 'device_firmware_date') self.required = { 'generated by': self.ancillary_data.get('generated_by'), 'gaia revision': self.ancillary_data.get('gaia_revision'), 'gecko revision': self.ancillary_data.get('gecko_revision'), 'build revision': self.ancillary_data.get('build_revision'), 'protocol': datazilla_config['protocol'], 'host': datazilla_config['host'], 'project': datazilla_config['project'], 'branch': datazilla_config['branch'], 'oauth key': datazilla_config['oauth_key'], 'oauth secret': datazilla_config['oauth_secret'], 'machine name': datazilla_config['machine_name'] or mac_address, 'device name': datazilla_config['device_name'], 'os version': settings.get('deviceinfo.os'), 'id': settings.get('deviceinfo.platform_build_id')} for key, value in self.required.items(): if value: self.logger.debug('DataZilla field: %s (%s)' % (key, value)) if not value: self.submit_report = False self.logger.warn('Missing required DataZilla field: %s' % key) if not self.submit_report: self.logger.info('Reports will not be submitted to DataZilla')
def test_binary(self): with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f: f.writelines(self.application_ini) with open(os.path.join(self.tempdir, 'platform.ini'), 'w') as f: f.writelines(self.platform_ini) self._check_version(get_version(self.binary))
def test_binary_in_current_path(self): with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f: f.writelines(self.application_ini) with open(os.path.join(self.tempdir, 'platform.ini'), 'w') as f: f.writelines(self.platform_ini) os.chdir(self.tempdir) self._check_version(get_version())
def safe_get_version(**kwargs): # some really old firefox builds are not supported by mozversion # and let's be paranoid and handle any error (but report them!) try: return mozversion.get_version(**kwargs) except mozversion.VersionError, exc: LOG.warning("Unable to get app version: %s" % exc) return {}
def _install(self, dest): # get info now, as dest may be removed self.app_info = mozversion.get_version(binary=dest) self.package_name = self.app_info.get("package_name", "org.mozilla.fennec") self.adb = ADBAndroid() self.adb.uninstall_app(self.package_name) self.adb.install_app(dest)
def version_info(self): if not self._version_info: try: # TODO: Get version_info in Fennec case self._version_info = mozversion.get_version(binary=self.bin) except Exception: self.logger.warning("Failed to retrieve version information for {}".format( self.bin)) return self._version_info
def run_info_browser_version(binary): try: version_info = mozversion.get_version(binary) except mozversion.errors.VersionError: version_info = None if version_info: return {"browser_build_id": version_info.get("application_buildid", None), "browser_changeset": version_info.get("application_changeset", None)} return {}
def test_with_exe(self): """Test that we can resolve .exe files""" self._write_ini_files() exe_name_unprefixed = self.binary + '1' exe_name = exe_name_unprefixed + '.exe' with open(exe_name, 'w') as f: f.write('foobar') self._check_version(get_version(exe_name_unprefixed))
def test_sources_in_current_directory(self): with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f: f.writelines(self.application_ini) with open(os.path.join(self.tempdir, 'sources.xml'), 'w') as f: f.writelines(self.sources_xml) os.chdir(self.tempdir) self._check_version(get_version())
def get_version_info(self, input_version_info=None): if input_version_info is None: self.saved_version_info = mozversion.get_version(binary=self.bin, sources=self.sources, dm_type=os.environ.get('DM_TRANS', 'adb'), device_serial=self.device_serial) mozversion.get_version = self._new_get_version_info else: self.saved_version_info = input_version_info return self.saved_version_info
def test_sources(self): with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f: f.writelines(self.application_ini) sources = os.path.join(self.tempdir, 'sources.xml') with open(sources, 'w') as f: f.writelines(self.sources_xml) os.chdir(self.tempdir) self._check_version(get_version(sources=sources))
def test_basic(self): with mozfile.NamedTemporaryFile() as f: with zipfile.ZipFile(f.name, 'w') as z: z.writestr('application.ini', """[App]\nSourceStamp=%s\n""" % self.application_changeset) z.writestr('platform.ini', """[Build]\nSourceStamp=%s\n""" % self.platform_changeset) z.writestr('AndroidManifest.xml', '') v = get_version(f.name) self.assertEqual(v.get('application_changeset'), self.application_changeset) self.assertEqual(v.get('platform_changeset'), self.platform_changeset)
def _install(self, dest): if self._get_device_status(): self.adb = ADBAndroid() if "y" != raw_input("WARNING: bisecting nightly fennec builds will" " clobber your existing nightly profile." " Continue? (y or n)"): raise Exception("Aborting!") self.adb.uninstall_app("org.mozilla.fennec") self.adb.install_app(dest) # get info now, as dest may be removed self.app_info = mozversion.get_version(binary=dest)
def run_info_browser_version(binary): try: version_info = mozversion.get_version(binary) except mozversion.errors.VersionError: version_info = None if version_info: return { "browser_build_id": version_info.get("application_buildid", None), "browser_changeset": version_info.get("application_changeset", None) } return {}
def create(cls, app=None, *args, **kwargs): try: if not app: app_id = mozversion.get_version(binary=kwargs.get('bin'))['application_id'] app = app_ids[app_id] instance_class = apps[app] except KeyError: msg = 'Application "{0}" unknown (should be one of {1})' raise NotImplementedError(msg.format(app, apps.keys())) return instance_class(*args, **kwargs)
def run_info_browser_version(**kwargs): try: version_info = mozversion.get_version(kwargs["binary"]) except mozversion.errors.VersionError: version_info = None if version_info: rv = {"browser_build_id": version_info.get("application_buildid", None), "browser_changeset": version_info.get("application_changeset", None)} if "browser_version" not in kwargs: rv["browser_version"] = version_info.get("application_version") return rv return {}
def __init__(self, marionette, datazilla_config=None, sources=None, log_level="INFO"): # Set up logging handler = mozlog.StreamHandler() handler.setFormatter(mozlog.MozFormatter(include_timestamp=True)) self.logger = mozlog.getLogger(self.__class__.__name__, handler) self.logger.setLevel(getattr(mozlog, log_level.upper())) self.marionette = marionette settings = gaiatest.GaiaData(self.marionette).all_settings mac_address = self.marionette.execute_script( "return navigator.mozWifiManager && " "navigator.mozWifiManager.macAddress;" ) self.submit_report = True self.ancillary_data = {} self.device = gaiatest.GaiaDevice(self.marionette) dm = mozdevice.DeviceManagerADB() self.device.add_device_manager(dm) version = mozversion.get_version(sources=sources, dm_type="adb") self.ancillary_data["build_revision"] = version.get("build_changeset") self.ancillary_data["gaia_revision"] = version.get("gaia_changeset") self.ancillary_data["gecko_revision"] = version.get("gecko_changeset") self.ancillary_data["ro.build.version.incremental"] = version.get("device_firmware_version_incremental") self.ancillary_data["ro.build.version.release"] = version.get("device_firmware_version_release") self.ancillary_data["ro.build.date.utc"] = version.get("device_firmware_date") self.required = { "gaia revision": self.ancillary_data.get("gaia_revision"), "gecko revision": self.ancillary_data.get("gecko_revision"), "build revision": self.ancillary_data.get("build_revision"), "protocol": datazilla_config["protocol"], "host": datazilla_config["host"], "project": datazilla_config["project"], "branch": datazilla_config["branch"], "oauth key": datazilla_config["oauth_key"], "oauth secret": datazilla_config["oauth_secret"], "machine name": datazilla_config["machine_name"] or mac_address, "device name": datazilla_config["device_name"], "os version": settings.get("deviceinfo.os"), "id": settings.get("deviceinfo.platform_build_id"), } for key, value in self.required.items(): if value: self.logger.debug("DataZilla field: %s (%s)" % (key, value)) if not value: self.submit_report = False self.logger.warn("Missing required DataZilla field: %s" % key) if not self.submit_report: self.logger.info("Reports will not be submitted to DataZilla")
def collect_job_info(job, binary='', installer=''): """ Set job attributes (build, machine, revision, etc.) formatted to match Treeherder UI expectations. Using mozinfo and mozversion ref: https://github.com/mozilla/treeherder/blob/master/ui/js/values.js job - TestJob binary - path to firefox-bin installer - installer filename """ if not binary: raise ValueError('Missing argument: binary.') build = mozversion.get_version(binary=binary) machine = mozinfo.info machine_string = build_string = ' '.join([machine['os'], machine['version'], str(machine['bits'])]) # Narrow down build architecture; doesn't necessarily match platform if installer: job.build['package'] = installer if '64' in installer: build_string = ' '.join([machine['os'], machine['version'], '64']) if '32' in installer: build_string = ' '.join([machine['os'], machine['version'], '32']) # These don't match the expected Treeherder display; better than nothing. backup_attributes = { 'platform': ' '.join([machine['os'].capitalize(), machine['version'], machine['processor']]), 'os_name': machine['os'], 'architecture': machine['processor'] } job.build.update(backup_attributes) job.machine.update(backup_attributes) platform_attributes = get_platform_attributes(machine_string) if platform_attributes: job.machine.update(platform_attributes) platform_attributes = get_platform_attributes(build_string) if platform_attributes: job.build.update(platform_attributes) job.machine['host'] = node() job.build['product'] = build['application_name'] repo_exp = re.compile(r'https://hg.mozilla.org/.*(mozilla-\w+)$') repo_match = repo_exp.match(build['application_repository']) if repo_match: job.build['repo'] = repo_match.group(1) else: repo_url = build['application_repository'].rsplit('/') job.build['repo'] = repo_url[-1] job.build['release'] = releases[job.build['repo']] job.build['revision'] = build['application_changeset'] job.build['build_id'] = build['application_buildid']
def _install(self, dest): # get info now, as dest may be removed self.app_info = mozversion.get_version(binary=dest) self.package_name = self.app_info.get("package_name", "org.mozilla.fennec") self.adb = ADBAndroid() try: self.adb.uninstall_app(self.package_name) except ADBError, msg: LOG.warning( "Failed to uninstall %s (%s)\nThis is normal if it is the" " first time the application is installed." % (self.package_name, msg))
def test_with_exe(self): """Test that we can resolve .exe files""" with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f: f.writelines(self.application_ini) with open(os.path.join(self.tempdir, 'platform.ini'), 'w') as f: f.writelines(self.platform_ini) exe_name_unprefixed = self.binary + '1' exe_name = exe_name_unprefixed + '.exe' with open(exe_name, 'w') as f: f.write('foobar') self._check_version(get_version(exe_name_unprefixed))
def test_valid_properties(self): binary = self.marionette.bin version_info = mozversion.get_version(binary=binary) self.assertEqual(self.appinfo.ID, version_info['application_id']) self.assertEqual(self.appinfo.name, version_info['application_name']) self.assertEqual(self.appinfo.vendor, version_info['application_vendor']) self.assertEqual(self.appinfo.version, version_info['application_version']) self.assertEqual(self.appinfo.platformBuildID, version_info['platform_buildid']) self.assertEqual(self.appinfo.platformVersion, version_info['platform_version']) self.assertIsNotNone(self.appinfo.locale) self.assertIsNotNone(self.appinfo.user_agent) self.assertIsNotNone(self.appinfo.XPCOMABI)
def firefox(pytestconfig, tmpdir_factory): binary = pytestconfig.getoption('firefox') if binary is None: cache_dir = str(pytestconfig.cache.makedir('firefox')) scraper = FactoryScraper('daily', destination=cache_dir) build_path = scraper.download() install_path = str(tmpdir_factory.mktemp('firefox')) install_dir = mozinstall.install(src=build_path, dest=install_path) binary = mozinstall.get_binary(install_dir, 'firefox') version = mozversion.get_version(binary) if hasattr(pytestconfig, '_metadata'): pytestconfig._metadata.update(version) return binary
def create(cls, app=None, *args, **kwargs): try: if not app and kwargs["bin"] is not None: app_id = mozversion.get_version(binary=kwargs["bin"])["application_id"] app = app_ids[app_id] instance_class = apps[app] except (IOError, KeyError): exc, val, tb = sys.exc_info() msg = 'Application "{0}" unknown (should be one of {1})' reraise(NotImplementedError, msg.format(app, apps.keys()), tb) return instance_class(*args, **kwargs)
def create(cls, app=None, *args, **kwargs): try: if not app: app_id = mozversion.get_version( binary=kwargs.get('bin'))['application_id'] app = app_ids[app_id] instance_class = apps[app] except KeyError: msg = 'Application "{0}" unknown (should be one of {1})' raise NotImplementedError(msg.format(app, apps.keys())) return instance_class(*args, **kwargs)
def test_symlinked_binary(self): self._write_ini_files() # create a symlink of the binary in another directory and check # version against this symlink tempdir = tempfile.mkdtemp() try: browser_link = os.path.join(tempdir, os.path.basename(self.binary)) os.symlink(self.binary, browser_link) self._check_version(get_version(browser_link)) finally: mozfile.remove(tempdir)
def test_valid_properties(self): binary = self.marionette.bin version_info = mozversion.get_version(binary=binary) self.assertEqual(self.appinfo.ID, version_info["application_id"]) self.assertEqual(self.appinfo.name, version_info["application_name"]) self.assertEqual(self.appinfo.vendor, version_info["application_vendor"]) self.assertEqual(self.appinfo.version, version_info["application_version"]) # Bug 1298328 - Platform buildid mismatch due to incremental builds # self.assertEqual(self.appinfo.platformBuildID, version_info['platform_buildid']) self.assertEqual(self.appinfo.platformVersion, version_info["platform_version"]) self.assertIsNotNone(self.appinfo.locale) self.assertIsNotNone(self.appinfo.user_agent) self.assertIsNotNone(self.appinfo.XPCOMABI)
def firefox(pytestconfig, tmpdir_factory): binary = os.getenv('MOZREGRESSION_BINARY', pytestconfig.getoption('firefox')) if binary is None: cache_dir = str(pytestconfig.cache.makedir('firefox')) scraper = FactoryScraper('daily', destination=cache_dir) build_path = scraper.download() install_path = str(tmpdir_factory.mktemp('firefox')) install_dir = mozinstall.install(src=build_path, dest=install_path) binary = mozinstall.get_binary(install_dir, 'firefox') version = mozversion.get_version(binary) if hasattr(pytestconfig, '_metadata'): pytestconfig._metadata.update(version) return binary
def setupMozinfo(args): info = { "test_enabled": True, "crashreporter": True, "appname": "thunderbird" } info.update(setupExtensionInfo(args.obm, "obm")) info.update(setupExtensionInfo(args.lightning, "lightning")) tbversion = mozversion.get_version(args.thunderbird)['application_version'] info.update(createVersionProps(tbversion, "tb")) return info
def _install(self, dest): # get info now, as dest may be removed self.app_info = mozversion.get_version(binary=dest) self.package_name = self.app_info.get("package_name", "org.mozilla.fennec") self.adb = ADBAndroid() try: self.adb.uninstall_app(self.package_name) except ADBError, msg: LOG.warning( "Failed to uninstall %s (%s)\nThis is normal if it is the" " first time the application is installed." % (self.package_name, msg) )
def test_basic(self): with mozfile.NamedTemporaryFile() as f: with zipfile.ZipFile(f.name, 'w') as z: z.writestr( 'application.ini', """[App]\nSourceStamp=%s\n""" % self.application_changeset) z.writestr( 'platform.ini', """[Build]\nSourceStamp=%s\n""" % self.platform_changeset) z.writestr('AndroidManifest.xml', '') v = get_version(f.name) self.assertEqual(v.get('application_changeset'), self.application_changeset) self.assertEqual(v.get('platform_changeset'), self.platform_changeset)
def get_appinfo(self): """Collect application specific information.""" app_info = {} try: mozmill = jsbridge.JSObject(self.bridge, js_module_mozmill) app_info = json.loads(mozmill.getApplicationDetails()) app_info.update(mozversion.get_version(self.runner.binary)) except jsbridge.ConnectionError: # We don't have to call report_disconnect here because # start_runner() will handle this exception pass return app_info
def get_appinfo(self): """Collect application specific information.""" app_info = { } try: mozmill = jsbridge.JSObject(self.bridge, js_module_mozmill) app_info = json.loads(mozmill.getApplicationDetails()) app_info.update(mozversion.get_version(self.runner.binary)) except jsbridge.ConnectionError: # We don't have to call report_disconnect here because # start_runner() will handle this exception pass return app_info
def test_valid_properties(self): binary = self.marionette.bin version_info = mozversion.get_version(binary=binary) self.assertEqual(self.puppeteer.appinfo.ID, version_info['application_id']) self.assertEqual(self.puppeteer.appinfo.name, version_info['application_name']) self.assertEqual(self.puppeteer.appinfo.vendor, version_info['application_vendor']) self.assertEqual(self.puppeteer.appinfo.version, version_info['application_version']) # Bug 1298328 - Platform buildid mismatch due to incremental builds # self.assertEqual(self.puppeteer.appinfo.platformBuildID, # version_info['platform_buildid']) self.assertEqual(self.puppeteer.appinfo.platformVersion, version_info['platform_version']) self.assertIsNotNone(self.puppeteer.appinfo.locale) self.assertIsNotNone(self.puppeteer.appinfo.user_agent) self.assertIsNotNone(self.puppeteer.appinfo.XPCOMABI)
def run(self): """ Run tests for all specified builds. """ try: self.prepare_application(self.binary) version_info = mozversion.get_version(self._application) self.mozlogger.info('Application: %s %s (%s)' % ( version_info.get('application_display_name'), version_info.get('application_version'), self._application)) self.mozlogger.info('Platform: %s %s %sbit' % ( str(mozinfo.os).capitalize(), mozinfo.version, mozinfo.bits)) path = os.path.join(self.workspace, 'mozmill-tests') self.mozlogger.info('Cloning test repository to: %s' % path) self.repository.clone(path) # Update the mozmill-test repository to match the Gecko branch app_repository_url = version_info.get('application_repository') branch_name = application.get_mozmill_tests_branch(app_repository_url) self.mozlogger.info('Updating branch of test repository to: %s' % branch_name) self.repository.update(branch_name) if self.options.addons: self.prepare_addons() path = os.path.join(self.workspace, 'screenshots') if not os.path.isdir(path): os.makedirs(path) self.persisted["screenshotPath"] = path self.run_tests() except Exception, e: self.exception_type, self.exception, self.tb = sys.exc_info()
def get_browser_meta(self): """Returns the browser name and version in a tuple (name, version). On desktop, we use mozversion but a fallback method also exists for non-firefox browsers, where mozversion is known to fail. The methods are OS-specific, with windows being the outlier. """ browser_name = None browser_version = None try: meta = mozversion.get_version(binary=self.config["binary"]) browser_name = meta.get("application_name") browser_version = meta.get("application_version") except Exception as e: LOG.warning( "Failed to get browser meta data through mozversion: %s-%s" % (e.__class__.__name__, e)) LOG.info("Attempting to get version through fallback method...") # Fall-back method to get browser version on desktop try: if ("linux" in self.config["platform"] or "mac" in self.config["platform"]): command = [self.config["binary"], "--version"] proc = mozprocess.ProcessHandler(command) proc.run(timeout=10, outputTimeout=10) proc.wait() bmeta = proc.output meta_re = re.compile(r"([A-z\s]+)\s+([\w.]*)") if len(bmeta) != 0: match = meta_re.match(bmeta[0].decode("utf-8")) if match: browser_name = self.config["app"] browser_version = match.group(2) else: LOG.info("Couldn't get browser version and name") else: # On windows we need to use wimc to get the version command = r'wmic datafile where name="{0}"'.format( self.config["binary"].replace("\\", r"\\")) bmeta = subprocess.check_output(command) meta_re = re.compile(r"\s+([\d.a-z]+)\s+") match = meta_re.findall(bmeta.decode("utf-8")) if len(match) > 0: browser_name = self.config["app"] browser_version = match[-1] else: LOG.info("Couldn't get browser version and name") except Exception as e: LOG.warning( "Failed to get browser meta data through fallback method: %s-%s" % (e.__class__.__name__, e)) if not browser_name: LOG.warning("Could not find a browser name") else: LOG.info("Browser name: %s" % browser_name) if not browser_version: LOG.warning("Could not find a browser version") else: LOG.info("Browser version: %s" % browser_version) return (browser_name, browser_version)
def generate_html(self, results_list): tests = sum([results.testsRun for results in results_list]) failures = sum([len(results.failures) for results in results_list]) expected_failures = sum( [len(results.expectedFailures) for results in results_list]) skips = sum([len(results.skipped) for results in results_list]) + len( self.manifest_skipped_tests) errors = sum([len(results.errors) for results in results_list]) passes = sum([results.passed for results in results_list]) unexpected_passes = sum( [len(results.unexpectedSuccesses) for results in results_list]) test_time = self.elapsedtime test_logs = [] def _extract_html_from_result(result): _extract_html(result=result.result, test_name=result.name, test_class=result.test_class, duration=round(result.duration, 1), debug=result.debug, output='\n'.join(result.output)) def _extract_html_from_skipped_manifest_test(test): _extract_html(result='skipped', test_name=test['name'], output=test.get('disabled')) def _extract_html(result, test_name, test_class='', duration=0, debug=None, output=''): additional_html = [] debug = debug or {} links_html = [] result_map = { 'KNOWN-FAIL': 'expected failure', 'PASS': '******', 'UNEXPECTED-FAIL': 'failure', 'UNEXPECTED-PASS': '******' } if result.upper() in [ 'SKIPPED', 'UNEXPECTED-FAIL', 'KNOWN-FAIL', 'ERROR' ]: if debug.get('screenshot'): screenshot = 'data:image/png;base64,%s' % debug[ 'screenshot'] additional_html.append( html.div(html.a(html.img(src=screenshot), href="#"), class_='screenshot')) for name, content in debug.items(): try: if 'screenshot' in name: href = '#' else: # use base64 to avoid that some browser (such as Firefox, Opera) # treats '#' as the start of another link if the data URL contains. # use 'charset=utf-8' to show special characters like Chinese. href = 'data:text/plain;charset=utf-8;base64,%s' % base64.b64encode( content) links_html.append( html.a(name.title(), class_=name, href=href, target='_blank')) links_html.append(' ') except: pass log = html.div(class_='log') for line in output.splitlines(): separator = line.startswith(' ' * 10) if separator: log.append(line[:80]) else: if line.lower().find("error") != -1 or line.lower( ).find("exception") != -1: log.append( html.span(raw(cgi.escape(line)), class_='error')) else: log.append(raw(cgi.escape(line))) log.append(html.br()) additional_html.append(log) test_logs.append( html.tr([ html.td(result_map.get(result, result).title(), class_='col-result'), html.td(test_class, class_='col-class'), html.td(test_name, class_='col-name'), html.td(str(duration), class_='col-duration'), html.td(links_html, class_='col-links'), html.td(additional_html, class_='debug') ], class_=result_map.get(result, result).lower() + ' results-table-row')) for results in results_list: [_extract_html_from_result(test) for test in results.tests] for test in self.manifest_skipped_tests: _extract_html_from_skipped_manifest_test(test) generated = datetime.datetime.now() date_format = '%d %b %Y %H:%M:%S' version = {} if self.capabilities: version.update({ 'application_buildid': self.capabilities.get('appBuildId'), 'application_version': self.capabilities.get('version'), 'device_id': self.capabilities.get('device') }) if self.bin or self.capabilities.get('device') != 'desktop': version.update( mozversion.get_version(binary=self.bin, sources=self.sources, dm_type=os.environ.get( 'DM_TRANS', 'adb'))) configuration = { 'Gecko version': version.get('application_version'), 'Gecko build': version.get('application_buildid'), 'Gecko revision': version.get('application_revision'), 'Gaia date': version.get('gaia_date') and time.strftime( date_format, time.localtime(int(version.get('gaia_date')))), 'Device identifier': version.get('device_id'), 'Device firmware (date)': version.get('device_firmware_date') and time.strftime( date_format, time.localtime(int(version.get('device_firmware_date')))), 'Device firmware (incremental)': version.get('device_firmware_version_incremental'), 'Device firmware (release)': version.get('device_firmware_version_release') } if version.get('application_changeset') and version.get( 'application_repository'): configuration['Gecko revision'] = html.a( version.get('application_changeset'), href='/'.join([ version.get('application_repository'), version.get('application_changeset') ]), target='_blank') if version.get('gaia_changeset'): configuration['Gaia revision'] = html.a( version.get('gaia_changeset')[:12], href='https://github.com/mozilla-b2g/gaia/commit/%s' % version.get('gaia_changeset'), target='_blank') doc = html.html( html.head( html.meta(charset='utf-8'), html.title('Test Report'), #TODO: must redisgn this to use marionette's resourcs, instead of the caller folder's html.style(raw( pkg_resources.resource_string( __name__, os.path.sep.join( ['resources', 'htmlreport', 'style.css']))), type='text/css')), html.body( html.script(raw( pkg_resources.resource_string( __name__, os.path.sep.join( ['resources', 'htmlreport', 'jquery.js']))), type='text/javascript'), html.script(raw( pkg_resources.resource_string( __name__, os.path.sep.join( ['resources', 'htmlreport', 'main.js']))), type='text/javascript'), html.p('Report generated on %s at %s by %s version %s' % (generated.strftime('%d-%b-%Y'), generated.strftime('%H:%M:%S'), self.html_name, self.html_version)), html.h2('Configuration'), html.table([ html.tr(html.td(k), html.td(v)) for k, v in sorted(configuration.items()) if v ], id='configuration'), html.h2('Summary'), html.p( '%i tests ran in %i seconds.' % (tests, test_time), html.br(), html.span('%i passed' % passes, class_='passed'), ', ', html.span('%i skipped' % skips, class_='skipped'), ', ', html.span('%i failed' % failures, class_='failed'), ', ', html.span('%i errors' % errors, class_='error'), '.', html.br(), html.span('%i expected failures' % expected_failures, class_='expected failure'), ', ', html.span('%i unexpected passes' % unexpected_passes, class_='unexpected pass'), '.'), html.h2('Results'), html.table([ html.thead(html.tr([ html.th('Result', class_='sortable', col='result'), html.th('Class', class_='sortable', col='class'), html.th('Test Name', class_='sortable', col='name'), html.th('Duration', class_='sortable numeric', col='duration'), html.th('Links') ]), id='results-table-head'), html.tbody(test_logs, id='results-table-body') ], id='results-table'))) return doc.unicode(indent=2)
def run_tests(config, browser_config): """Runs the talos tests on the given configuration and generates a report. """ # get the test data tests = config['tests'] tests = useBaseTestDefaults(config.get('basetest', {}), tests) paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup'] for test in tests: # Check for profile_path, tpmanifest and interpolate based on Talos # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711 # Build command line from config for path in paths: if test.get(path): if path == 'extensions': for _index, _ext in enumerate(test['extensions']): test['extensions'][_index] = utils.interpolate(_ext) else: test[path] = utils.interpolate(test[path]) if test.get('tpmanifest'): test['tpmanifest'] = \ os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'], '/\\t:\\'))) test['preferences']['talos.tpmanifest'] = test['tpmanifest'] # if using firstNonBlankPaint, set test preference for it # so that the browser pref will be turned on (in ffsetup) if test.get('fnbpaint', False): LOG.info( "Test is using firstNonBlankPaint, browser pref will be turned on" ) test['preferences'][ 'dom.performance.time_to_non_blank_paint.enabled'] = True test['setup'] = utils.interpolate(test['setup']) test['cleanup'] = utils.interpolate(test['cleanup']) if not test.get('profile', False): test['profile'] = config.get('profile') if mozinfo.os == 'win': browser_config['extra_args'] = ['-wait-for-browser', '-no-deelevate'] else: browser_config['extra_args'] = [] # pass --no-remote to firefox launch, if --develop is specified # we do that to allow locally the user to have another running firefox # instance if browser_config['develop']: browser_config['extra_args'].append('--no-remote') # Pass subtests filter argument via a preference if browser_config['subtests']: browser_config['preferences']['talos.subtests'] = browser_config[ 'subtests'] # If --code-coverage files are expected, set flag in browser config so ffsetup knows # that it needs to delete any ccov files resulting from browser initialization # NOTE: This is only supported in production; local setup of ccov folders and # data collection not supported yet, so if attempting to run with --code-coverage # flag locally, that is not supported yet if config.get('code_coverage', False): if browser_config['develop']: raise TalosError('Aborting: talos --code-coverage flag is only ' 'supported in production') else: browser_config['code_coverage'] = True # set defaults testdate = config.get('testdate', '') # get the process name from the path to the browser if not browser_config['process']: browser_config['process'] = \ os.path.basename(browser_config['browser_path']) # fix paths to substitute # `os.path.dirname(os.path.abspath(__file__))` for ${talos} # https://bugzilla.mozilla.org/show_bug.cgi?id=705809 browser_config['extensions'] = [ utils.interpolate(i) for i in browser_config['extensions'] ] browser_config['bcontroller_config'] = \ utils.interpolate(browser_config['bcontroller_config']) # normalize browser path to work across platforms browser_config['browser_path'] = \ os.path.normpath(browser_config['browser_path']) binary = browser_config["browser_path"] version_info = mozversion.get_version(binary=binary) browser_config['browser_name'] = version_info['application_name'] browser_config['browser_version'] = version_info['application_version'] browser_config['buildid'] = version_info['application_buildid'] try: browser_config['repository'] = version_info['application_repository'] browser_config['sourcestamp'] = version_info['application_changeset'] except KeyError: if not browser_config['develop']: print("Abort: unable to find changeset or repository: %s" % version_info) sys.exit(1) else: browser_config['repository'] = 'develop' browser_config['sourcestamp'] = 'develop' # get test date in seconds since epoch if testdate: date = int( time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) LOG.debug("using testdate: %d" % date) LOG.debug("actual date: %d" % int(time.time())) # results container talos_results = TalosResults() # results links if not browser_config['develop'] and not config['gecko_profile']: results_urls = dict( # another hack; datazilla stands for Perfherder # and do not require url, but a non empty dict is required... output_urls=['local.json'], ) else: # local mode, output to files results_urls = dict(output_urls=[os.path.abspath('local.json')]) httpd = setup_webserver(browser_config['webserver']) httpd.start() # legacy still required for perfherder data talos_results.add_extra_option('e10s') talos_results.add_extra_option('stylo') # measuring the difference of a a certain thread level if config.get('stylothreads', 0) > 0: talos_results.add_extra_option('%s_thread' % config['stylothreads']) if config['gecko_profile']: talos_results.add_extra_option('geckoProfile') # some tests use mitmproxy to playback pages mitmproxy_recordings_list = config.get('mitmproxy', False) if mitmproxy_recordings_list is not False: # needed so can tell talos ttest to allow external connections browser_config['mitmproxy'] = True # start mitmproxy playback; this also generates the CA certificate mitmdump_path = config.get('mitmdumpPath', False) if mitmdump_path is False: # cannot continue, need path for mitmdump playback tool raise TalosError( 'Aborting: mitmdumpPath not provided on cmd line but is required' ) mitmproxy_recording_path = os.path.join(here, 'mitmproxy') mitmproxy_proc = mitmproxy.start_mitmproxy_playback( mitmdump_path, mitmproxy_recording_path, mitmproxy_recordings_list.split(), browser_config['browser_path']) # install the generated CA certificate into Firefox # mitmproxy cert setup needs path to mozharness install; mozharness has set this mitmproxy.install_mitmproxy_cert(mitmproxy_proc, browser_config['browser_path']) testname = None # run the tests timer = utils.Timer() LOG.suite_start(tests=[test['name'] for test in tests]) try: for test in tests: testname = test['name'] LOG.test_start(testname) if not test.get('url'): # set browser prefs for pageloader test setings (doesn't use cmd line args / url) test['url'] = None set_tp_preferences(test, browser_config) mytest = TTest() # some tests like ts_paint return multiple results in a single iteration if test.get('firstpaint', False) or test.get('userready', None): # we need a 'testeventmap' to tell us which tests each event should map to multi_value_result = None separate_results_list = [] test_event_map = test.get('testeventmap', None) if test_event_map is None: raise TalosError("Need 'testeventmap' in test.py for %s" % test.get('name')) # run the test multi_value_result = mytest.runTest(browser_config, test) if multi_value_result is None: raise TalosError("Abort: no results returned for %s" % test.get('name')) # parse out the multi-value results, and 'fake it' to appear like separate tests separate_results_list = convert_to_separate_test_results( multi_value_result, test_event_map) # now we have three separate test results, store them for test_result in separate_results_list: talos_results.add(test_result) # some tests like bloom_basic run two separate tests and then compare those values # we want the results in perfherder to only be the actual difference between those # and store the base and reference test replicates in results.json for upload elif test.get('base_vs_ref', False): # run the test, results will be reported for each page like two tests in the suite base_and_reference_results = mytest.runTest( browser_config, test) # now compare each test, and create a new test object for the comparison talos_results.add( make_comparison_result(base_and_reference_results)) else: # just expecting regular test - one result value per iteration talos_results.add(mytest.runTest(browser_config, test)) LOG.test_end(testname, status='OK') except TalosRegression as exc: LOG.error("Detected a regression for %s" % testname) # by returning 1, we report an orange to buildbot # http://docs.buildbot.net/latest/developer/results.html LOG.test_end(testname, status='FAIL', message=str(exc), stack=traceback.format_exc()) return 1 except Exception as exc: # NOTE: if we get into this condition, talos has an internal # problem and cannot continue # this will prevent future tests from running LOG.test_end(testname, status='ERROR', message=str(exc), stack=traceback.format_exc()) # indicate a failure to buildbot, turn the job red return 2 finally: LOG.suite_end() httpd.stop() LOG.info("Completed test suite (%s)" % timer.elapsed()) # if mitmproxy was used for page playback, stop it if mitmproxy_recordings_list is not False: mitmproxy.stop_mitmproxy_playback(mitmproxy_proc) # output results if results_urls and not browser_config['no_upload_results']: talos_results.output(results_urls) if browser_config['develop'] or config['gecko_profile']: print("Thanks for running Talos locally. Results are in %s" % (results_urls['output_urls'])) # when running talos locally with gecko profiling on, use the view-gecko-profile # tool to automatically load the latest gecko profile in perf-html.io if config['gecko_profile'] and browser_config['develop']: if os.environ.get('DISABLE_PROFILE_LAUNCH', '0') == '1': LOG.info( "Not launching perf-html.io because DISABLE_PROFILE_LAUNCH=1") else: view_gecko_profile(config['browser_path']) # we will stop running tests on a failed test, or we will return 0 for # green return 0
def run_single_test(self, testdir, testname): testpath = os.path.join(testdir, testname) self.log("Running test %s\n" % testname, True) # Read and parse the test file, merge it with the contents of the config # file, and write the combined output to a temporary file. f = open(testpath, 'r') testcontent = f.read() f.close() # We use yaml to parse the tests because it is a superset of json # but tolerates things like property names not being quoted, trailing # commas, etc. try: test = yaml.safe_load(testcontent) except Exception: test = yaml.safe_load( testcontent[testcontent.find('{'):testcontent.find('}') + 1]) self.preferences['tps.seconds_since_epoch'] = int(time.time()) # generate the profiles defined in the test, and a list of test phases profiles = {} phaselist = [] for phase in test: profilename = test[phase] # create the profile if necessary if profilename not in profiles: profiles[profilename] = Profile( preferences=self.preferences.copy(), addons=self.extensions) # create the test phase phaselist.append( TPSTestPhase(phase, profiles[profilename], testname, testpath, self.logfile, self.env, self.firefoxRunner, self.log, ignore_unused_engines=self.ignore_unused_engines)) # sort the phase list by name phaselist = sorted(phaselist, key=lambda phase: phase.phase) # run each phase in sequence, aborting at the first failure failed = False for phase in phaselist: phase.run() if phase.status != 'PASS': failed = True break for profilename in profiles: print("### Cleanup Profile ", profilename) cleanup_phase = TPSTestPhase('cleanup-' + profilename, profiles[profilename], testname, testpath, self.logfile, self.env, self.firefoxRunner, self.log) cleanup_phase.run() if cleanup_phase.status != 'PASS': failed = True # Keep going to run the remaining cleanup phases. if failed: self.handle_phase_failure(profiles) # grep the log for FF and sync versions f = open(self.logfile) logdata = f.read() match = self.syncVerRe.search(logdata) sync_version = match.group('syncversion') if match else 'unknown' match = self.ffVerRe.search(logdata) firefox_version = match.group('ffver') if match else 'unknown' match = self.ffBuildIDRe.search(logdata) firefox_buildid = match.group('ffbuildid') if match else 'unknown' f.close() if phase.status == 'PASS': logdata = '' else: # we only care about the log data for this specific test logdata = logdata[logdata.find('Running test %s' % (str(testname))):] result = { 'PASS': lambda x: ('TEST-PASS', ''), 'FAIL': lambda x: ('TEST-UNEXPECTED-FAIL', x.rstrip()), 'unknown': lambda x: ('TEST-UNEXPECTED-FAIL', 'test did not complete') }[phase.status](phase.errline) logstr = "\n%s | %s%s\n" % (result[0], testname, (' | %s' % result[1] if result[1] else '')) try: repoinfo = mozversion.get_version(self.binary) except Exception: repoinfo = {} apprepo = repoinfo.get('application_repository', '') appchangeset = repoinfo.get('application_changeset', '') # save logdata to a temporary file for posting to the db tmplogfile = None if logdata: tmplogfile = TempFile(prefix='tps_log_') tmplogfile.write(logdata) tmplogfile.close() self.errorlogs[testname] = tmplogfile resultdata = ({ 'productversion': { 'version': firefox_version, 'buildid': firefox_buildid, 'builddate': firefox_buildid[0:8], 'product': 'Firefox', 'repository': apprepo, 'changeset': appchangeset, }, 'addonversion': { 'version': sync_version, 'product': 'Firefox Sync' }, 'name': testname, 'message': result[1], 'state': result[0], 'logdata': logdata }) self.log(logstr, True) for phase in phaselist: print("\t{}: {}".format(phase.phase, phase.status)) return resultdata
def test_missing_ini_files(self): v = get_version(self.binary) self.assertEqual(v, {})
def test_without_sources_file(self): """With a missing sources file no exception should be thrown""" self._write_conf_files(sources=False) get_version(self.binary)
def test_sources_in_current_directory(self): self._write_conf_files() os.chdir(self.tempdir) self._check_version(get_version())
def test_sources(self): self._write_conf_files() os.chdir(self.tempdir) self._check_version(get_version(sources=os.path.join(self.tempdir, 'sources.xml')))
def test_gaia_commit(self): revision, date = ('a' * 40, 'date') self._create_zip(revision, date) v = get_version(self.binary) self.assertEqual(v.get('gaia_changeset'), revision) self.assertEqual(v.get('gaia_date'), date)
def test_missing_zip_file(self): v = get_version(self.binary) self.assertIsNone(v.get('gaia_changeset')) self.assertIsNone(v.get('gaia_date'))
def post_to_treeherder(self, tests): version = mozversion.get_version(binary=self.bin, sources=self.sources, dm_type='adb', device_serial=self.device_serial) job_collection = TreeherderJobCollection() job = job_collection.get_job() device = version.get('device_id') device_firmware_version_release = \ version.get('device_firmware_version_release') if not device: self.logger.error('Submitting to Treeherder is currently limited ' 'to devices.') return try: group = DEVICE_GROUP_MAP[device][device_firmware_version_release] job.add_group_name(group['name']) job.add_group_symbol(group['symbol']) job.add_job_name('Gaia Python Integration Test (%s)' % group['symbol']) job.add_job_symbol('Gip') except KeyError: self.logger.error('Unknown device id: %s or device firmware ' 'version: %s. Unable to determine Treeherder ' 'group. Supported devices: %s' % (device, device_firmware_version_release, [ '%s: %s' % (k, [fw for fw in v.keys()]) for k, v in DEVICE_GROUP_MAP.iteritems() ])) return # Determine revision hash from application revision revision = version['application_changeset'] project = version['application_repository'].split('/')[-1] lookup_url = urljoin( self.treeherder_url, 'api/project/%s/revision-lookup/?revision=%s' % (project, revision)) self.logger.debug('Getting revision hash from: %s' % lookup_url) response = requests.get(lookup_url) response.raise_for_status() assert response.json(), 'Unable to determine revision hash for %s. ' \ 'Perhaps it has not been ingested by ' \ 'Treeherder?' % revision revision_hash = response.json()[revision]['revision_hash'] job.add_revision_hash(revision_hash) job.add_project(project) job.add_job_guid(str(uuid.uuid4())) job.add_product_name('b2g') job.add_state('completed') # Determine test result if self.failed or self.unexpected_successes: job.add_result('testfailed') else: job.add_result('success') job.add_submit_timestamp(int(self.start_time)) job.add_start_timestamp(int(self.start_time)) job.add_end_timestamp(int(self.end_time)) job.add_machine(socket.gethostname()) job.add_build_info('b2g', 'b2g-device-image', 'x86') job.add_machine_info('b2g', 'b2g-device-image', 'x86') # All B2G device builds are currently opt builds job.add_option_collection({'opt': True}) date_format = '%d %b %Y %H:%M:%S' job_details = [{ 'content_type': 'link', 'title': 'Gaia revision:', 'url': 'https://github.com/mozilla-b2g/gaia/commit/%s' % version.get('gaia_changeset'), 'value': version.get('gaia_changeset'), }, { 'content_type': 'text', 'title': 'Gaia date:', 'value': version.get('gaia_date') and time.strftime( date_format, time.localtime(int(version.get('gaia_date')))), }, { 'content_type': 'text', 'title': 'Device identifier:', 'value': version.get('device_id') }, { 'content_type': 'text', 'title': 'Device firmware (date):', 'value': version.get('device_firmware_date') and time.strftime( date_format, time.localtime(int(version.get('device_firmware_date')))), }, { 'content_type': 'text', 'title': 'Device firmware (incremental):', 'value': version.get('device_firmware_version_incremental') }, { 'content_type': 'text', 'title': 'Device firmware (release):', 'value': version.get('device_firmware_version_release') }] ci_url = os.environ.get('BUILD_URL') if ci_url: job_details.append({ 'url': ci_url, 'value': ci_url, 'content_type': 'link', 'title': 'CI build:' }) # Attach logcat adb_device = ADBDevice(self.device_serial) with tempfile.NamedTemporaryFile(suffix='logcat.txt') as f: f.writelines(adb_device.get_logcat()) self.logger.debug('Logcat stored in: %s' % f.name) try: url = self.upload_to_s3(f.name) job_details.append({ 'url': url, 'value': 'logcat.txt', 'content_type': 'link', 'title': 'Log:' }) except S3UploadError: job_details.append({ 'value': 'Failed to upload logcat.txt', 'content_type': 'text', 'title': 'Error:' }) # Attach log files handlers = [ handler for handler in self.logger.handlers if isinstance(handler, StreamHandler) and os.path.exists(handler.stream.name) ] for handler in handlers: path = handler.stream.name filename = os.path.split(path)[-1] try: url = self.upload_to_s3(path) job_details.append({ 'url': url, 'value': filename, 'content_type': 'link', 'title': 'Log:' }) # Add log reference if type(handler.formatter) is TbplFormatter or \ type(handler.formatter) is LogLevelFilter and \ type(handler.formatter.inner) is TbplFormatter: job.add_log_reference(filename, url) except S3UploadError: job_details.append({ 'value': 'Failed to upload %s' % filename, 'content_type': 'text', 'title': 'Error:' }) # Attach reports for report in [self.html_output, self.xml_output]: if report is not None: filename = os.path.split(report)[-1] try: url = self.upload_to_s3(report) job_details.append({ 'url': url, 'value': filename, 'content_type': 'link', 'title': 'Report:' }) except S3UploadError: job_details.append({ 'value': 'Failed to upload %s' % filename, 'content_type': 'text', 'title': 'Error:' }) if job_details: job.add_artifact('Job Info', 'json', {'job_details': job_details}) job_collection.add(job) # Send the collection to Treeherder url = urlparse(self.treeherder_url) request = TreeherderRequest( protocol=url.scheme, host=url.hostname, project=project, oauth_key=os.environ.get('TREEHERDER_KEY'), oauth_secret=os.environ.get('TREEHERDER_SECRET')) self.logger.debug('Sending results to Treeherder: %s' % job_collection.to_json()) response = request.post(job_collection) self.logger.debug('Response: %s' % response.read()) assert response.status == 200, 'Failed to send results!' self.logger.info( 'Results are available to view at: %s' % (urljoin(self.treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' % (project, revision))))
def run_tests(config, browser_config): """Runs the talos tests on the given configuration and generates a report. """ # get the test data tests = config['tests'] tests = useBaseTestDefaults(config.get('basetest', {}), tests) paths = ['profile_path', 'tpmanifest', 'extensions', 'setup', 'cleanup'] for test in tests: # Check for profile_path, tpmanifest and interpolate based on Talos # root https://bugzilla.mozilla.org/show_bug.cgi?id=727711 # Build command line from config for path in paths: if test.get(path): test[path] = utils.interpolate(test[path]) if test.get('tpmanifest'): test['tpmanifest'] = \ os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'], '/\\t:\\'))) if not test.get('url'): # build 'url' for tptest test['url'] = buildCommandLine(test) test['url'] = utils.interpolate(test['url']) test['setup'] = utils.interpolate(test['setup']) test['cleanup'] = utils.interpolate(test['cleanup']) # pass --no-remote to firefox launch, if --develop is specified # we do that to allow locally the user to have another running firefox # instance if browser_config['develop']: browser_config['extra_args'] = '--no-remote' # with addon signing for production talos, we want to develop without it if browser_config['develop'] or browser_config['branch_name'] == 'Try': browser_config['preferences']['xpinstall.signatures.required'] = False browser_config['extensions'] = [ os.path.dirname(i) for i in browser_config['extensions'] ] # set defaults title = config.get('title', '') testdate = config.get('testdate', '') if browser_config['e10s'] and not title.endswith(".e"): # we are running in e10s mode title = "%s.e" % (title, ) # get the process name from the path to the browser if not browser_config['process']: browser_config['process'] = \ os.path.basename(browser_config['browser_path']) # fix paths to substitute # `os.path.dirname(os.path.abspath(__file__))` for ${talos} # https://bugzilla.mozilla.org/show_bug.cgi?id=705809 browser_config['extensions'] = [ utils.interpolate(i) for i in browser_config['extensions'] ] browser_config['bcontroller_config'] = \ utils.interpolate(browser_config['bcontroller_config']) # normalize browser path to work across platforms browser_config['browser_path'] = \ os.path.normpath(browser_config['browser_path']) binary = browser_config["browser_path"] version_info = mozversion.get_version(binary=binary) browser_config['browser_name'] = version_info['application_name'] browser_config['browser_version'] = version_info['application_version'] browser_config['buildid'] = version_info['application_buildid'] try: browser_config['repository'] = version_info['application_repository'] browser_config['sourcestamp'] = version_info['application_changeset'] except KeyError: if not browser_config['develop']: print "unable to find changeset or repository: %s" % version_info sys.exit() else: browser_config['repository'] = 'develop' browser_config['sourcestamp'] = 'develop' # get test date in seconds since epoch if testdate: date = int( time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT'))) else: date = int(time.time()) LOG.debug("using testdate: %d" % date) LOG.debug("actual date: %d" % int(time.time())) # results container talos_results = TalosResults() # results links if not browser_config['develop']: results_urls = dict( # another hack; datazilla stands for Perfherder # and do not require url, but a non empty dict is required... datazilla_urls=['local.json'], ) else: # local mode, output to files results_urls = dict(datazilla_urls=[os.path.abspath('local.json')]) talos_results.check_output_formats(results_urls) httpd = setup_webserver(browser_config['webserver']) httpd.start() testname = None # run the tests timer = utils.Timer() LOG.suite_start(tests=[test['name'] for test in tests]) try: for test in tests: testname = test['name'] LOG.test_start(testname) mytest = TTest() talos_results.add(mytest.runTest(browser_config, test)) LOG.test_end(testname, status='OK') except TalosRegression as exc: LOG.error("Detected a regression for %s" % testname) # by returning 1, we report an orange to buildbot # http://docs.buildbot.net/latest/developer/results.html LOG.test_end(testname, status='FAIL', message=unicode(exc), stack=traceback.format_exc()) return 1 except Exception as exc: # NOTE: if we get into this condition, talos has an internal # problem and cannot continue # this will prevent future tests from running LOG.test_end(testname, status='ERROR', message=unicode(exc), stack=traceback.format_exc()) # indicate a failure to buildbot, turn the job red return 2 finally: LOG.suite_end() httpd.stop() LOG.info("Completed test suite (%s)" % timer.elapsed()) # output results if results_urls: talos_results.output(results_urls) if browser_config['develop']: print("Thanks for running Talos locally. Results are in %s" % (results_urls['datazilla_urls'])) # we will stop running tests on a failed test, or we will return 0 for # green return 0
def test_invalid_gaia_commit(self): revision, date = ('a' * 41, 'date') self._create_zip(revision, date) v = get_version(self.binary) self.assertIsNone(v.get('gaia_changeset')) self.assertEqual(v.get('gaia_date'), date)
def test_without_sources_file(self): """With a missing sources file no exception should be thrown""" with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f: f.writelines(self.application_ini) get_version(self.binary)
def test_missing_gaia_commit(self): self._create_zip() v = get_version(self.binary) self.assertIsNone(v.get('gaia_changeset')) self.assertIsNone(v.get('gaia_date'))
def run_tests(self, tests): assert len(tests) > 0 assert len(self.test_handlers) > 0 self.reset_test_stats() self.start_time = time.time() need_external_ip = True if not self.marionette: self.start_marionette() # if we're working against a desktop version, we usually don't need # an external ip if self.capabilities['device'] == "desktop": need_external_ip = False self.logger.info('Initial Profile Destination is ' '"{}"'.format(self.marionette.profile_path)) # Gaia sets server_root and that means we shouldn't spin up our own httpd if not self.httpd: if self.server_root is None or os.path.isdir(self.server_root): self.logger.info("starting httpd") self.start_httpd(need_external_ip) self.marionette.baseurl = self.httpd.get_url() self.logger.info("running httpd on %s" % self.marionette.baseurl) else: self.marionette.baseurl = self.server_root self.logger.info("using remote content from %s" % self.marionette.baseurl) device_info = None for test in tests: self.add_test(test) # ensure we have only tests files with names starting with 'test_' invalid_tests = \ [t['filepath'] for t in self.tests if not os.path.basename(t['filepath']).startswith('test_')] if invalid_tests: raise Exception("Tests file names must starts with 'test_'." " Invalid test names:\n %s" % '\n '.join(invalid_tests)) self.logger.info("running with e10s: {}".format(self.e10s)) version_info = mozversion.get_version(binary=self.bin, sources=self.sources, dm_type=os.environ.get( 'DM_TRANS', 'adb')) self.logger.suite_start(self.tests, version_info=version_info, device_info=device_info) for test in self.manifest_skipped_tests: name = os.path.basename(test['path']) self.logger.test_start(name) self.logger.test_end(name, 'SKIP', message=test['disabled']) self.todo += 1 interrupted = None try: counter = self.repeat while counter >= 0: round = self.repeat - counter if round > 0: self.logger.info('\nREPEAT %d\n-------' % round) self.run_test_sets() counter -= 1 except KeyboardInterrupt: # in case of KeyboardInterrupt during the test execution # we want to display current test results. # so we keep the exception to raise it later. interrupted = sys.exc_info() try: self._print_summary(tests) except: # raise only the exception if we were not interrupted if not interrupted: raise finally: # reraise previous interruption now if interrupted: raise interrupted[0], interrupted[1], interrupted[2]