def post_request(self, machine, project, job_collection, attempts, last_attempt): logger.debug('AutophoneTreeherder.post_request: %s, attempt=%d, last=%s' % (job_collection.__dict__, attempts, last_attempt)) client = TreeherderClient(protocol=self.protocol, host=self.server, client_id=self.client_id, secret=self.secret) try: client.post_collection(project, job_collection) return True except Exception, e: logger.exception('Error submitting request to Treeherder, attempt=%d, last=%s' % (attempts, last_attempt)) if self.mailer: if hasattr(e, 'response') and e.response: response_json = json.dumps(e.response.json(), indent=2, sort_keys=True) else: response_json = None self.mailer.send( '%s attempt %d Error submitting request to Treeherder' % (utils.host(), attempts), 'Phone: %s\n' 'Exception: %s\n' 'Last attempt: %s\n' 'Response: %s\n' % ( machine, e, last_attempt, response_json))
def post_request(self, machine, project, job_collection, attempts, last_attempt): logger = utils.getLogger() logger.debug( 'AutophoneTreeherder.post_request: %s, attempt=%d, last=%s', job_collection.__dict__, attempts, last_attempt) try: self.client.post_collection(project, job_collection) return True except Exception, e: logger.exception( 'Error submitting request to Treeherder, attempt=%d, last=%s', attempts, last_attempt) if attempts > 1 and self.mailer: if hasattr(e, 'response') and e.response: response_json = json.dumps(e.response.json(), indent=2, sort_keys=True) else: response_json = None request_len = len(job_collection.to_json()) self.mailer.send( '%s attempt %d Error submitting request to Treeherder' % (utils.host(), attempts), 'Phone: %s\n' 'Exception: %s\n' 'Last attempt: %s\n' 'Request length: %d\n' 'Response: %s\n' % (machine, e, last_attempt, request_len, response_json))
def post_request(self, machine, project, job_collection, attempts, last_attempt): logger = utils.getLogger() logger.debug('AutophoneTreeherder.post_request: %s, attempt=%d, last=%s', job_collection.__dict__, attempts, last_attempt) try: self.client.post_collection(project, job_collection) return True except Exception, e: logger.exception('Error submitting request to Treeherder, attempt=%d, last=%s', attempts, last_attempt) if attempts > 1 and self.mailer: if hasattr(e, 'response') and e.response: response_json = json.dumps(e.response.json(), indent=2, sort_keys=True) else: response_json = None request_len = len(job_collection.to_json()) self.mailer.send( '%s attempt %d Error submitting request to Treeherder' % (utils.host(), attempts), 'Phone: %s\n' 'Exception: %s\n' 'Last attempt: %s\n' 'Request length: %d\n' 'Response: %s\n' % ( machine, e, last_attempt, request_len, response_json))
def report_sql_error(self, attempt, email_sent, sql, values): logger = utils.getLogger() message = '%s %s' % (sql, values) logger.exception(message) if attempt > self.SQL_MAX_RETRIES and not email_sent: email_sent = True email_subject = '%s jobs SQL Error' % utils.host() email_body = ('Attempt %d to execute %s failed.\n' '%s' % (attempt, message, traceback.format_exc())) self.mailer.send(email_subject, email_body) logger.info( 'Sent mail notification about jobs database sql error.') time.sleep(self.SQL_RETRY_DELAY) return email_sent
def report_sql_error(self, attempt, email_sent, sql, values): logger = utils.getLogger() message = '%s %s' % (sql, values) logger.exception(message) if attempt > self.SQL_MAX_RETRIES and not email_sent: email_sent = True email_subject = '%s jobs SQL Error' % utils.host() email_body = ( 'Attempt %d to execute %s failed.\n' '%s' % (attempt, message, traceback.format_exc())) self.mailer.send(email_subject, email_body) logger.info('Sent mail notification about jobs database sql error.') time.sleep(self.SQL_RETRY_DELAY) return email_sent
def report_sql_error(self, attempt, email_sent, sql, values): message = "%s %s" % (sql, values) logger.exception(message) if attempt > self.SQL_MAX_RETRIES and not email_sent: email_sent = True email_subject = "%s jobs SQL Error" % utils.host() email_body = ( "Attempt %d to execute %s failed.\n" "%s" "Waiting for %d seconds." % (attempt, message, traceback.format_exc(), self.SQL_RETRY_DELAY) ) self.mailer.send(email_subject, email_body) logger.info("Sent mail notification about jobs database sql error.") time.sleep(self.SQL_RETRY_DELAY) return email_sent
def publish_results(self, starttime=0, tstrt=0, tstop=0, testname="", cache_enabled=True, rejected=False): # Create JSON to send to webserver resultdata = { "phoneid": self.phone.id, "testname": testname, "starttime": starttime, "throbberstart": tstrt, "throbberstop": tstop, "blddate": self.build.date, "cached": cache_enabled, "rejected": rejected, "revision": self.build.revision, "productname": self.build.app_name, "productversion": self.build.version, "osver": self.phone.osver, "bldtype": self.build.type, "machineid": self.phone.machinetype, } result = {"data": resultdata} # Upload if self._signer: encoded_result = jwt.encode(result, signer=self._signer) content_type = "application/jwt" else: encoded_result = json.dumps(result) content_type = "application/json; charset=utf-8" req = urllib2.Request(self._resulturl + "add/", encoded_result, {"Content-Type": content_type}) max_attempts = 10 wait_time = 10 for attempt in range(1, max_attempts + 1): try: f = urllib2.urlopen(req) f.read() f.close() return except Exception, e: # Retry submission if the exception is due to a # timeout and if we haven't exceeded the maximum # number of attempts. if attempt < max_attempts: self.loggerdeco.warning( "PerfTest.publish_results: " "Attempt %d/%d error %s sending " "results to server" % (attempt, max_attempts, e) ) time.sleep(wait_time) continue self.loggerdeco.exception("Error sending results to server") self.worker_subprocess.mailer.send( "%s attempt %s/%s Error sending %s results for phone %s, " "build %s" % (utils.host(), attempt, max_attempts, self.name, self.phone.id, self.build.id), "There was an error attempting to send test results " "to the result server %s.\n" "\n" "Host %s\n" "Job %s\n" "Test %s\n" "Phone %s\n" "Repository %s\n" "Build %s\n" "Revision %s\n" "Exception %s\n" "Result %s\n" % ( self.result_server, utils.host(), self.job_url, self.name, self.phone.id, self.build.tree, self.build.id, self.build.revision, e, json.dumps(resultdata, sort_keys=True, indent=2), ), ) message = "Error sending results to server" self.test_result.status = PhoneTestResult.EXCEPTION self.message = message self.update_status(message=message)
def submit_complete(self, machine, build_url, project, revision, build_type, build_abi, build_platform, build_sdk, builder_type, tests=None): """Submit test results for the worker's current job to Treeherder. :param machine: machine id :param build_url: url to build being tested. :param project: repository of build. :param revision: Either a URL to the changeset or the revision id. :param tests: Lists of tests to be reported. """ logger = utils.getLogger() logger.debug('AutophoneTreeherder.submit_complete: %s', tests) if not self.url or not revision: logger.debug( 'AutophoneTreeherder.submit_complete: no url/revision') return tjc = TreeherderJobCollection() for t in tests: logger.debug('AutophoneTreeherder.submit_complete for %s %s', t.name, project) t.end_timestamp = timestamp_now() # A usercancelled job may not have a start_timestamp # since it may have been cancelled before it started. if not t.start_timestamp: t.start_timestamp = t.end_timestamp tj = self._create_job(tjc, machine, build_url, project, revision, build_type, build_abi, build_platform, build_sdk, builder_type, t) tj.add_state(TestState.COMPLETED) tj.add_result(t.status) tj.add_submit_timestamp(t.submit_timestamp) tj.add_start_timestamp(t.start_timestamp) tj.add_end_timestamp(t.end_timestamp) t.job_details.append({ 'value': os.path.basename(t.config_file), 'title': 'Config' }) t.job_details.append({ 'url': build_url, 'value': os.path.basename(build_url), 'title': 'Build' }) t.job_details.append({'value': utils.host(), 'title': 'Host'}) if t.passed + t.failed + t.todo > 0: if t.failed == 0: failed = '0' else: failed = '<em class="testfail">%s</em>' % t.failed t.job_details.append({ 'value': "%s/%s/%s" % (t.passed, failed, t.todo), 'title': "%s-%s" % (t.job_name, t.job_symbol) }) if hasattr(t, 'phonedash_url'): t.job_details.append({ 'url': t.phonedash_url, 'value': 'graph', 'title': 'phonedash' }) # Attach log, ANRs, tombstones, etc. if self.s3_bucket: # We must make certain that S3 keys for uploaded files # are unique even in the event of retries. The # Treeherder logviewer limits the length of the log # url to 255 bytes. If the url length exceeds 255 # characters it is truncated in the Treeherder # logviewer url field even though the file is # successfully uploaded to s3 with the full url. The # logviewer will fail to parse the log since it # attempts to retrieve it from a truncated url. # We have been creating unique keys through the use of # human readable "log_identifiers" combined with the # test's job_guid and base filename to create unique # keys for s3. Unfortunately, the choice of the aws # host name, a path based on the path to the build, # test names and config file names has resulted in # overly long urls which exceed 255 bytes. Given that # the s3 hostname and build url path currently consume # 100 bytes and the test's job-guid and filename # consume another 51, we only have a maximum of 104 # bytes for the log_identifier. The safest course of # action is to eliminate the test name, test config # filename, the chunk and device name and rely solely # on the test's job_guid to provide uniqueness. log_identifier = t.job_guid key_prefix = os.path.dirname(urlparse.urlparse(build_url).path) key_prefix = re.sub('/tmp$', '', key_prefix) # Upload directory containing ANRs, tombstones and other items # to be uploaded. if t.upload_dir: for f in utils.find_files(t.upload_dir): try: lname = os.path.relpath(f, t.upload_dir) try: fname = '%s-%s' % (log_identifier, lname) except UnicodeDecodeError, e: logger.exception( 'Ignoring artifact %s', lname.decode('utf-8', errors='replace')) continue url = self.s3_bucket.upload( f, "%s/%s" % (key_prefix, fname)) t.job_details.append({ 'url': url, 'value': lname, 'title': 'artifact uploaded' }) except (S3Error, IOError), e: logger.exception('Error uploading artifact %s', fname) t.job_details.append({ 'value': 'Failed to upload artifact %s: %s' % (fname, e), 'title': 'Error' }) # Autophone Log # Since we are submitting results to Treeherder, we flush # the worker's log before uploading the log to # Treeherder. When we upload the log, it will contain # results for a single test run with possibly an error # message from the previous test if the previous log # upload failed. try: # Emit the final step marker, flush and close the # log prior to uploading. t.worker_subprocess.log_step('Submitting Log') t.worker_subprocess.close_log() fname = '%s-autophone.log' % log_identifier lname = 'Autophone Log' key = "%s/%s" % (key_prefix, fname) url = self.s3_bucket.upload(t.worker_subprocess.logfile, key) # Truncate the log once it has been submitted to S3 # but do not close the filehandler as that messes with # the next test's log. t.worker_subprocess.filehandler.stream.truncate(0) t.job_details.append({ 'url': url, 'value': lname, 'title': 'artifact uploaded' }) tj.add_log_reference('buildbot_text', url, parse_status='pending') except Exception, e: logger.exception('Error %s uploading %s', e, fname) t.job_details.append({ 'value': 'Failed to upload Autophone log: %s' % e, 'title': 'Error' })
def publish_results(self, starttime=0, tstrt=0, tstop=0, testname='', cache_enabled=True, rejected=False): # Create JSON to send to webserver author = None if self.build.tree == 'try': rev_json_url = self.build.changeset.replace('/rev/', '/json-rev/') rev_json = utils.get_remote_json(rev_json_url) if rev_json: author = rev_json['pushuser'] blddate = float(convert_datetime_to_string(self.build.date, TIMESTAMP)) self.loggerdeco.debug('publish_results: build.id: %s, build.date: %s, blddate: %s' % ( self.build.id, self.build.date, blddate)) resultdata = { 'phoneid': self.phone.id, 'testname': testname, 'starttime': starttime, 'throbberstart': tstrt, 'throbberstop': tstop, 'blddate': blddate, 'cached': cache_enabled, 'rejected': rejected, 'revision': self.build.changeset, 'author': author, 'productname': self.build.app_name, 'productversion': self.build.version, 'osver': self.phone.osver, 'bldtype': self.build.type, 'machineid': self.phone.machinetype } result = {'data': resultdata} # Upload if self._signer: encoded_result = jwt.encode(result, signer=self._signer) content_type = 'application/jwt' else: encoded_result = json.dumps(result) content_type = 'application/json; charset=utf-8' req = urllib2.Request(self._resulturl + 'add/', encoded_result, {'Content-Type': content_type}) max_attempts = 10 wait_time = 10 for attempt in range(1, max_attempts+1): try: f = urllib2.urlopen(req) f.read() f.close() return except Exception, e: # Retry submission if the exception is due to a # timeout and if we haven't exceeded the maximum # number of attempts. if attempt < max_attempts: self.loggerdeco.warning('PerfTest.publish_results: ' 'Attempt %d/%d error %s sending ' 'results to server' % ( attempt, max_attempts, e)) time.sleep(wait_time) continue self.loggerdeco.exception('Error sending results to server') self.worker_subprocess.mailer.send( '%s attempt %s/%s Error sending %s results for phone %s, ' 'build %s' % (utils.host(), attempt, max_attempts, self.name, self.phone.id, self.build.id), 'There was an error attempting to send test results ' 'to the result server %s.\n' '\n' 'Host %s\n' 'Job %s\n' 'Test %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' 'Exception %s\n' 'Result %s\n' % (self.result_server, utils.host(), self.job_url, self.name, self.phone.id, self.build.tree, self.build.id, self.build.changeset, e, json.dumps(resultdata, sort_keys=True, indent=2))) message = 'Error sending results to server' self.status = PhoneTest.EXCEPTION self.message = message self.update_status(message=message)
def submit_complete(self, machine, build_url, project, revision, build_type, build_abi, build_platform, build_sdk, builder_type, tests=None): """Submit test results for the worker's current job to Treeherder. :param machine: machine id :param build_url: url to build being tested. :param project: repository of build. :param revision: Either a URL to the changeset or the revision id. :param tests: Lists of tests to be reported. """ LOGGER.debug('AutophoneTreeherder.submit_complete: %s', tests) if not self.url or not revision: LOGGER.debug( 'AutophoneTreeherder.submit_complete: no url/revision') return tjc = TreeherderJobCollection() for t in tests: LOGGER.debug('AutophoneTreeherder.submit_complete for %s %s', t.name, project) t.end_timestamp = timestamp_now() # A usercancelled job may not have a start_timestamp # since it may have been cancelled before it started. if not t.start_timestamp: t.start_timestamp = t.end_timestamp tj = self._create_job(tjc, machine, build_url, project, revision, build_type, build_abi, build_platform, build_sdk, builder_type, t) tj.add_state(TestState.COMPLETED) tj.add_result(t.status) tj.add_submit_timestamp(t.submit_timestamp) tj.add_start_timestamp(t.start_timestamp) tj.add_end_timestamp(t.end_timestamp) t.job_details.append({ 'value': os.path.basename(t.config_file), 'title': 'Config' }) t.job_details.append({ 'url': build_url, 'value': os.path.basename(build_url), 'title': 'Build' }) t.job_details.append({'value': utils.host(), 'title': 'Host'}) if t.passed + t.failed + t.todo > 0: if t.failed == 0: failed = '0' else: failed = '<em class="testfail">%s</em>' % t.failed t.job_details.append({ 'value': "%s/%s/%s" % (t.passed, failed, t.todo), 'title': "%s-%s" % (t.job_name, t.job_symbol) }) if hasattr(t, 'phonedash_url'): t.job_details.append({ 'url': t.phonedash_url, 'value': 'graph', 'title': 'phonedash' }) # Attach logs, ANRs, tombstones, etc. logurl = None if self.s3_bucket: # We must make certain that S3 keys for uploaded files # are unique. We can create a unique log_identifier as # follows: For Unittests, t.unittest_logpath's # basename contains a unique name based on the actual # Unittest name, chunk and machine id. For # Non-Unittests, the test classname, chunk and machine # id can be used. if t.unittest_logpath: log_identifier = os.path.splitext( os.path.basename(t.unittest_logpath))[0] else: log_identifier = "%s-%s-%s-%s" % ( t.name, os.path.basename( t.config_file), t.chunk, machine) # We must make certain the key is unique even in the # event of retries. log_identifier = '%s-%s' % (log_identifier, t.job_guid) key_prefix = os.path.dirname(urlparse.urlparse(build_url).path) key_prefix = re.sub('/tmp$', '', key_prefix) # Logcat fname = '%s-logcat.log' % log_identifier lname = 'logcat' key = "%s/%s" % (key_prefix, fname) with tempfile.NamedTemporaryFile(suffix='logcat.txt') as f: try: if self.worker.is_ok(): for line in t.worker_subprocess.logcat.get( full=True): f.write('%s\n' % line.encode('UTF-8', errors='replace')) t.worker_subprocess.logcat.reset() else: # Device is in an error state so we can't # get the full logcat but we can output # any logcat output we accumulated # previously. for line in t.worker_subprocess.logcat._accumulated_logcat: f.write('%s\n' % line.encode('UTF-8', errors='replace')) f.flush() except Exception, e: LOGGER.exception('Error reading logcat %s', fname) t.job_details.append({ 'value': 'Failed to read %s: %s' % (fname, e), 'title': 'Error' }) try: url = self.s3_bucket.upload(f.name, key) t.job_details.append({ 'url': url, 'value': lname, 'title': 'artifact uploaded' }) except S3Error, e: LOGGER.exception('Error uploading logcat %s', fname) t.job_details.append({ 'value': 'Failed to upload %s: %s' % (fname, e), 'title': 'Error' }) # Upload directory containing ANRs, tombstones and other items # to be uploaded. if t.upload_dir: for f in glob.glob(os.path.join(t.upload_dir, '*')): try: lname = os.path.basename(f) try: fname = '%s-%s' % (log_identifier, lname) except UnicodeDecodeError, e: LOGGER.exception( 'Ignoring artifact %s', lname.decode('utf-8', errors='replace')) continue url = self.s3_bucket.upload( f, "%s/%s" % (key_prefix, fname)) t.job_details.append({ 'url': url, 'value': lname, 'title': 'artifact uploaded' }) except S3Error, e: LOGGER.exception('Error uploading artifact %s', fname) t.job_details.append({ 'value': 'Failed to upload artifact %s: %s' % (fname, e), 'title': 'Error' })
def run_job(self): is_test_completed = False custom_addons = ['pageloader.xpi'] if not self.install_local_pages(): self.test_failure( self.name, 'TEST_UNEXPECTED_FAIL', 'Aborting test - Could not install local pages on phone.', PhoneTestResult.EXCEPTION) return is_test_completed if not self.create_profile(custom_addons=custom_addons): self.test_failure( self.name, 'TEST_UNEXPECTED_FAIL', 'Aborting test - Could not run Fennec.', PhoneTestResult.BUSTED) return is_test_completed is_test_completed = True testcount = len(self._test_args.keys()) test_items = enumerate(self._test_args.iteritems(), 1) for testnum, (testname, test_args) in test_items: if self.fennec_crashed: break self.loggerdeco = self.loggerdeco.clone( extradict={'phoneid': self.phone.id, 'buildid': self.build.id, 'testname': testname}, extraformat='%(phoneid)s|%(buildid)s|%(testname)s|%(message)s') self.dm._logger = self.loggerdeco self.loggerdeco.info('Running test (%d/%d)' % (testnum, testcount)) # success == False indicates that none of the attempts # were successful in getting any measurement. This is # typically due to a regression in the brower which should # be reported. success = False command = self.worker_subprocess.process_autophone_cmd( test=self, require_ip_address=testname.startswith('remote')) if command['interrupt']: is_test_completed = False self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status(message='Test %d/%d, for test_args %s' % (testnum, testcount, test_args)) if not self.create_profile(custom_addons=custom_addons): self.test_failure(test_args, 'TEST_UNEXPECTED_FAIL', 'Failed to create profile', PhoneTestResult.TESTFAILED) else: measurement = self.runtest(test_args) if measurement: if not self.perfherder_artifact: self.perfherder_artifact = PerfherderArtifact() suite = self.create_suite(measurement['pageload_metric'], testname) self.perfherder_artifact.add_suite(suite) self.test_pass(test_args) success = True else: self.test_failure( test_args, 'TEST_UNEXPECTED_FAIL', 'Failed to get measurement.', PhoneTestResult.TESTFAILED) if not success: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.loggerdeco.info( 'Failed to get measurements for test %s' % (testname)) self.worker_subprocess.mailer.send( '%s %s failed for Build %s %s on %s %s' % (self.__class__.__name__, testname, self.build.tree, self.build.id, host(), self.phone.id), 'No measurements were detected for test %s.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (testname, self.job_url, host(), self.phone.id, self.build.tree, self.build.id, self.build.revision)) self.test_failure(self.name, 'TEST_UNEXPECTED_FAIL', 'No measurements detected.', PhoneTestResult.BUSTED) self.loggerdeco.debug('publishing results') if command and command['interrupt']: break elif not success: break return is_test_completed
def submit_complete(self, machine, build_url, project, revision, build_type, build_abi, build_platform, build_sdk, builder_type, tests=None): """Submit test results for the worker's current job to Treeherder. :param machine: machine id :param build_url: url to build being tested. :param project: repository of build. :param revision: Either a URL to the changeset or the revision id. :param tests: Lists of tests to be reported. """ logger = utils.getLogger() logger.debug('AutophoneTreeherder.submit_complete: %s', tests) if not self.url or not revision: logger.debug('AutophoneTreeherder.submit_complete: no url/revision') return tjc = TreeherderJobCollection() for t in tests: logger.debug('AutophoneTreeherder.submit_complete for %s %s', t.name, project) t.end_timestamp = timestamp_now() # A usercancelled job may not have a start_timestamp # since it may have been cancelled before it started. if not t.start_timestamp: t.start_timestamp = t.end_timestamp tj = self._create_job(tjc, machine, build_url, project, revision, build_type, build_abi, build_platform, build_sdk, builder_type, t) tj.add_state(TestState.COMPLETED) tj.add_result(t.status) tj.add_submit_timestamp(t.submit_timestamp) tj.add_start_timestamp(t.start_timestamp) tj.add_end_timestamp(t.end_timestamp) t.job_details.append({ 'value': os.path.basename(t.config_file), 'title': 'Config'}) t.job_details.append({ 'url': build_url, 'value': os.path.basename(build_url), 'title': 'Build'}) t.job_details.append({ 'value': utils.host(), 'title': 'Host'}) if t.passed + t.failed + t.todo > 0: if t.failed == 0: failed = '0' else: failed = '<em class="testfail">%s</em>' % t.failed t.job_details.append({ 'value': "%s/%s/%s" % (t.passed, failed, t.todo), 'title': "%s-%s" % (t.job_name, t.job_symbol) }) if hasattr(t, 'phonedash_url'): t.job_details.append({ 'url': t.phonedash_url, 'value': 'graph', 'title': 'phonedash' }) # Attach log, ANRs, tombstones, etc. if self.s3_bucket: # We must make certain that S3 keys for uploaded files # are unique even in the event of retries. The # Treeherder logviewer limits the length of the log # url to 255 bytes. If the url length exceeds 255 # characters it is truncated in the Treeherder # logviewer url field even though the file is # successfully uploaded to s3 with the full url. The # logviewer will fail to parse the log since it # attempts to retrieve it from a truncated url. # We have been creating unique keys through the use of # human readable "log_identifiers" combined with the # test's job_guid and base filename to create unique # keys for s3. Unfortunately, the choice of the aws # host name, a path based on the path to the build, # test names and config file names has resulted in # overly long urls which exceed 255 bytes. Given that # the s3 hostname and build url path currently consume # 100 bytes and the test's job-guid and filename # consume another 51, we only have a maximum of 104 # bytes for the log_identifier. The safest course of # action is to eliminate the test name, test config # filename, the chunk and device name and rely solely # on the test's job_guid to provide uniqueness. log_identifier = t.job_guid key_prefix = os.path.dirname( urlparse.urlparse(build_url).path) key_prefix = re.sub('/tmp$', '', key_prefix) # Upload directory containing ANRs, tombstones and other items # to be uploaded. if t.upload_dir: for f in utils.find_files(t.upload_dir): try: lname = os.path.relpath(f, t.upload_dir) try: fname = '%s-%s' % (log_identifier, lname) except UnicodeDecodeError, e: logger.exception('Ignoring artifact %s', lname.decode('utf-8', errors='replace')) continue url = self.s3_bucket.upload(f, "%s/%s" % ( key_prefix, fname)) t.job_details.append({ 'url': url, 'value': lname, 'title': 'artifact uploaded'}) except (S3Error, IOError), e: logger.exception('Error uploading artifact %s', fname) t.job_details.append({ 'value': 'Failed to upload artifact %s: %s' % (fname, e), 'title': 'Error'}) # Autophone Log # Since we are submitting results to Treeherder, we flush # the worker's log before uploading the log to # Treeherder. When we upload the log, it will contain # results for a single test run with possibly an error # message from the previous test if the previous log # upload failed. try: # Emit the final step marker, flush and close the # log prior to uploading. t.worker_subprocess.log_step('Submitting Log') t.worker_subprocess.close_log() fname = '%s-autophone.log' % log_identifier lname = 'Autophone Log' key = "%s/%s" % (key_prefix, fname) url = self.s3_bucket.upload( t.worker_subprocess.logfile, key) # Truncate the log once it has been submitted to S3 # but do not close the filehandler as that messes with # the next test's log. t.worker_subprocess.filehandler.stream.truncate(0) t.job_details.append({ 'url': url, 'value': lname, 'title': 'artifact uploaded'}) tj.add_log_reference('buildbot_text', url, parse_status='pending') except Exception, e: logger.exception('Error %s uploading %s', e, fname) t.job_details.append({ 'value': 'Failed to upload Autophone log: %s' % e, 'title': 'Error'})
def run_job(self): is_test_completed = False custom_addons = ['pageloader.xpi'] if not self.install_local_pages(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not install local pages on phone.', TreeherderStatus.EXCEPTION) return is_test_completed if not self.create_profile(custom_addons=custom_addons): self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not run Fennec.', TreeherderStatus.BUSTED) return is_test_completed perfherder_options = PerfherderOptions(self.perfherder_options, self.build.tree) is_test_completed = True testcount = len(self._test_args.keys()) test_items = enumerate(self._test_args.iteritems(), 1) for testnum, (testname, test_args) in test_items: self.loggerdeco = self.loggerdeco.clone( extradict={ 'repo': self.build.tree, 'buildid': self.build.id, 'buildtype': self.build.type, 'sdk': self.phone.sdk, 'platform': self.build.platform, 'testname': testname }, extraformat= 'TalosTestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s' ) self.dm._logger = self.loggerdeco self.loggerdeco.info('Running test (%d/%d)', testnum, testcount) # success == False indicates that none of the attempts # were successful in getting any measurement. This is # typically due to a regression in the brower which should # be reported. success = False command = self.worker_subprocess.process_autophone_cmd( test=self, require_ip_address=testname.startswith('remote')) if command['interrupt']: self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status(message='Test %d/%d, for test_args %s' % (testnum, testcount, test_args)) if not self.create_profile(custom_addons=custom_addons): self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to create profile', TreeherderStatus.TESTFAILED) else: measurement = self.runtest(test_args) if measurement: if not self.perfherder_artifact: self.perfherder_artifact = PerfherderArtifact() suite = self.create_suite(measurement['pageload_metric'], testname, options=perfherder_options) self.perfherder_artifact.add_suite(suite) self.add_pass(test_args) success = True else: self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to get measurement.', TreeherderStatus.TESTFAILED) if not success: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.loggerdeco.info('Failed to get measurements for test %s', testname) self.worker_subprocess.mailer.send( '%s %s failed for Build %s %s on %s %s' % (self.__class__.__name__, testname, self.build.tree, self.build.id, host(), self.phone.id), 'No measurements were detected for test %s.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (testname, self.job_url, host(), self.phone.id, self.build.tree, self.build.id, self.build.changeset)) self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'No measurements detected.', TreeherderStatus.BUSTED) self.loggerdeco.debug('publishing results') if command and command['interrupt']: break elif not success: break return is_test_completed
def run_job(self): is_test_completed = False if not self.install_local_pages(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not install local pages on phone.', TreeherderStatus.EXCEPTION) return is_test_completed if not self.create_profile(): self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not run Fennec.', TreeherderStatus.BUSTED) return is_test_completed perfherder_options = PerfherderOptions(self.perfherder_options, repo=self.build.tree) is_test_completed = True testcount = len(self._urls.keys()) for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1): self.loggerdeco = self.loggerdeco.clone( extradict={ 'phoneid': self.phone.id, 'buildid': self.build.id, 'testname': testname }, extraformat= 'S1S2TestJob|%(phoneid)s|%(buildid)s|%(testname)s|%(message)s') self.dm._logger = self.loggerdeco self.loggerdeco.info('Running test (%d/%d) for %d iterations', testnum, testcount, self._iterations) command = None for attempt in range(1, self.stderrp_attempts + 1): # dataset is a list of the measurements made for the # iterations for this test. # # An empty item in the dataset list represents a # failure to obtain any measurement for that # iteration. # # It is possible for an item in the dataset to have an # uncached value and not have a corresponding cached # value if the cached test failed to record the # values. iteration = 0 dataset = [] for iteration in range(1, self._iterations + 1): # Calling svc power stayon true will turn on the # display for at least some devices if it has # turned off. self.dm.power_on() command = self.worker_subprocess.process_autophone_cmd( test=self, require_ip_address=url.startswith('http')) if command['interrupt']: self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status(message='Attempt %d/%d for Test %d/%d, ' 'run %d, for url %s' % (attempt, self.stderrp_attempts, testnum, testcount, iteration, url)) if not self.create_profile(): self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to create profile', TreeherderStatus.TESTFAILED) continue measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get uncached measurement.', testname, url, attempt) continue self.add_pass(url) dataset.append({'uncached': measurement}) measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get cached measurement.', testname, url, attempt) continue self.add_pass(url) dataset[-1]['cached'] = measurement if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_accept): self.loggerdeco.info( 'Accepted test (%d/%d) after %d of %d iterations', testnum, testcount, iteration, self._iterations) break if command and command['interrupt']: break measurements = len(dataset) if measurements > 0 and self._iterations != measurements: self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to get all measurements', TreeherderStatus.TESTFAILED) elif measurements == 0: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'No measurements detected.', TreeherderStatus.BUSTED) self.loggerdeco.info( 'Failed to get measurements for test %s after %d/%d attempt ' 'of %d iterations', testname, attempt, self.stderrp_attempts, self._iterations) self.worker_subprocess.mailer.send( '%s %s failed for Build %s %s on %s %s' % (self.__class__.__name__, testname, self.build.tree, self.build.id, utils.host(), self.phone.id), 'No measurements were detected for test %s.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (testname, self.job_url, utils.host(), self.phone.id, self.build.tree, self.build.id, self.build.changeset)) break if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_reject): rejected = False else: rejected = True self.loggerdeco.info( 'Rejected test (%d/%d) after %d/%d iterations', testnum, testcount, iteration, self._iterations) self.loggerdeco.debug('publishing results') perfherder_values = {'geometric_mean': 0} metric_keys = ['throbberstart', 'throbberstop', 'throbbertime'] cache_names = {'uncached': 'first', 'cached': 'second'} cache_keys = cache_names.keys() for metric_key in metric_keys: perfherder_values[metric_key] = {'geometric_mean': 0} for cache_key in cache_keys: perfherder_values[metric_key][cache_key] = { 'median': 0, 'values': [] } for datapoint in dataset: for cache_key in datapoint: starttime = datapoint[cache_key]['starttime'] throbberstart = datapoint[cache_key]['throbberstart'] throbberstop = datapoint[cache_key]['throbberstop'] self.report_results( starttime=starttime, tstrt=throbberstart, tstop=throbberstop, testname=testname, cache_enabled=(cache_key == 'cached'), rejected=rejected) perfherder_values['throbberstart'][cache_key][ 'values'].append(throbberstart - starttime) perfherder_values['throbberstop'][cache_key][ 'values'].append(throbberstop - starttime) perfherder_values['throbbertime'][cache_key][ 'values'].append(throbberstop - throbberstart) test_values = [] for metric_key in metric_keys: for cache_key in cache_keys: perfherder_values[metric_key][cache_key][ 'median'] = utils.median( perfherder_values[metric_key][cache_key] ['values']) perfherder_values[metric_key][ 'geometric_mean'] = utils.geometric_mean([ perfherder_values[metric_key]['uncached'] ['median'], perfherder_values[metric_key]['cached']['median'] ]) test_values.append( perfherder_values[metric_key]['geometric_mean']) perfherder_suite = PerfherderSuite( name=testname, value=utils.geometric_mean(test_values), options=perfherder_options) for metric_key in metric_keys: for cache_key in cache_keys: cache_name = cache_names[cache_key] subtest_name = "%s %s" % (metric_key, cache_name) perfherder_suite.add_subtest( subtest_name, perfherder_values[metric_key][cache_key]['median'], options=perfherder_options) self.perfherder_artifact = PerfherderArtifact() self.perfherder_artifact.add_suite(perfherder_suite) self.loggerdeco.debug("PerfherderArtifact: %s", self.perfherder_artifact) if not rejected: break if command and command['interrupt']: break return is_test_completed
def publish_results(self, starttime=0, tstrt=0, tstop=0, testname='', cache_enabled=True, rejected=False): # Create JSON to send to webserver author = None if self.build.tree == 'try': rev_json_url = self.build.changeset.replace('/rev/', '/json-rev/') rev_json = utils.get_remote_json(rev_json_url) if rev_json: author = rev_json['pushuser'] blddate = float(convert_datetime_to_string(self.build.date, TIMESTAMP)) self.loggerdeco.debug('publish_results: build.id: %s, build.date: %s, blddate: %s' % ( self.build.id, self.build.date, blddate)) resultdata = { 'phoneid': self.phone.id, 'testname': testname, 'starttime': starttime, 'throbberstart': tstrt, 'throbberstop': tstop, 'blddate': blddate, 'cached': cache_enabled, 'rejected': rejected, 'revision': self.build.changeset, 'author': author, 'productname': self.build.app_name, 'productversion': self.build.version, 'osver': self.phone.osver, 'bldtype': self.build.type, 'machineid': self.phone.machinetype } result = {'data': resultdata} # Upload if self._signer: encoded_result = jwt.encode(result, signer=self._signer) content_type = 'application/jwt' else: encoded_result = json.dumps(result) content_type = 'application/json; charset=utf-8' req = urllib2.Request(self._resulturl + 'add/', encoded_result, {'Content-Type': content_type}) max_attempts = 10 wait_time = 10 for attempt in range(1, max_attempts+1): try: f = urllib2.urlopen(req) f.read() f.close() return except Exception, e: # Retry submission if the exception is due to a # timeout and if we haven't exceeded the maximum # number of attempts. if attempt < max_attempts: self.loggerdeco.warning('PerfTest.publish_results: ' 'Attempt %d/%d error %s sending ' 'results to server' % ( attempt, max_attempts, e)) time.sleep(wait_time) continue self.loggerdeco.exception('Error sending results to server') self.worker_subprocess.mailer.send( '%s attempt %s/%s Error sending %s results for phone %s, ' 'build %s' % (utils.host(), attempt, max_attempts, self.name, self.phone.id, self.build.id), 'There was an error attempting to send test results ' 'to the result server %s.\n' '\n' 'Host %s\n' 'Job %s\n' 'Test %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' 'Exception %s\n' 'Result %s\n' % (self.result_server, utils.host(), self.job_url, self.name, self.phone.id, self.build.tree, self.build.id, self.build.changeset, e, json.dumps(resultdata, sort_keys=True, indent=2))) message = 'Error sending results to phonedash server' self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, message, TreeherderStatus.EXCEPTION)
def publish_results(self, starttime=0, tstrt=0, tstop=0, testname='', cache_enabled=True, rejected=False): # Create JSON to send to webserver resultdata = { 'phoneid': self.phone.id, 'testname': testname, 'starttime': starttime, 'throbberstart': tstrt, 'throbberstop': tstop, 'blddate': self.build.date, 'cached': cache_enabled, 'rejected': rejected, 'revision': self.build.revision, 'productname': self.build.app_name, 'productversion': self.build.version, 'osver': self.phone.osver, 'bldtype': self.build.type, 'machineid': self.phone.machinetype } result = {'data': resultdata} # Upload if self._signer: encoded_result = jwt.encode(result, signer=self._signer) content_type = 'application/jwt' else: encoded_result = json.dumps(result) content_type = 'application/json; charset=utf-8' req = urllib2.Request(self._resulturl + 'add/', encoded_result, {'Content-Type': content_type}) max_attempts = 10 wait_time = 10 for attempt in range(1, max_attempts+1): try: f = urllib2.urlopen(req) f.read() f.close() return except Exception, e: # Retry submission if the exception is due to a # timeout and if we haven't exceeded the maximum # number of attempts. if attempt < max_attempts: self.loggerdeco.warning('PerfTest.publish_results: ' 'Attempt %d/%d error %s sending ' 'results to server' % ( attempt, max_attempts, e)) time.sleep(wait_time) continue self.loggerdeco.exception('Error sending results to server') self.worker_subprocess.mailer.send( '%s attempt %s/%s Error sending %s results for phone %s, ' 'build %s' % (utils.host(), attempt, max_attempts, self.name, self.phone.id, self.build.id), 'There was an error attempting to send test results ' 'to the result server %s.\n' '\n' 'Host %s\n' 'Job %s\n' 'Test %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' 'Exception %s\n' 'Result %s\n' % (self.result_server, utils.host(), self.job_url, self.name, self.phone.id, self.build.tree, self.build.id, self.build.revision, e, json.dumps(resultdata, sort_keys=True, indent=2))) message = 'Error sending results to server' self.test_result.status = PhoneTestResult.EXCEPTION self.message = message self.update_status(message=message)
def submit_complete(self, machine, build_url, project, revision_hash, tests=None): """Submit test results for the worker's current job to Treeherder. :param machine: machine id :param build_url: url to build being tested. :param project: repository of build. :param revision_hash: Treeherder revision hash of build. :param tests: Lists of tests to be reported. """ logger.debug('AutophoneTreeherder.submit_complete: %s' % tests) if not self.url or not revision_hash: logger.debug('AutophoneTreeherder.submit_complete: no url/revision hash') return tjc = TreeherderJobCollection() for t in tests: logger.debug('AutophoneTreeherder.submit_complete ' 'for %s %s' % (t.name, project)) t.job_details.append({ 'value': os.path.basename(t.config_file), 'content_type': 'text', 'title': 'Config'}) t.job_details.append({ 'url': build_url, 'value': os.path.basename(build_url), 'content_type': 'link', 'title': 'Build'}) t.job_details.append({ 'value': utils.host(), 'content_type': 'text', 'title': 'Host'}) t.end_timestamp = timestamp_now() # A usercancelled job may not have a start_timestamp # since it may have been cancelled before it started. if not t.start_timestamp: t.start_timestamp = t.end_timestamp if t.test_result.failed == 0: failed = '0' else: failed = '<em class="testfail">%s</em>' % t.test_result.failed t.job_details.append({ 'value': "%s/%s/%s" % (t.test_result.passed, failed, t.test_result.todo), 'content_type': 'raw_html', 'title': "%s-%s" % (t.job_name, t.job_symbol) }) if hasattr(t, 'phonedash_url'): t.job_details.append({ 'url': t.phonedash_url, 'value': 'graph', 'content_type': 'link', 'title': 'phonedash' }) tj = tjc.get_job() # Attach logs, ANRs, tombstones, etc. logurl = None logname = None if self.s3_bucket: # We must make certain that S3 keys for uploaded files # are unique. We can create a unique log_identifier as # follows: For Unittests, t.unittest_logpath's # basename contains a unique name based on the actual # Unittest name, chunk and machine id. For # Non-Unittests, the test classname, chunk and machine # id can be used. if t.unittest_logpath: log_identifier = os.path.splitext(os.path.basename( t.unittest_logpath))[0] else: log_identifier = "%s-%s-%s-%s" % ( t.name, os.path.basename(t.config_file), t.chunk, machine) # We must make certain the key is unique even in the # event of retries. log_identifier = '%s-%s' % (log_identifier, t.job_guid) key_prefix = os.path.dirname( urlparse.urlparse(build_url).path) key_prefix = re.sub('/tmp$', '', key_prefix) # Logcat fname = '%s-logcat.log' % log_identifier lname = 'logcat' key = "%s/%s" % (key_prefix, fname) with tempfile.NamedTemporaryFile(suffix='logcat.txt') as f: try: if self.worker.is_ok(): for line in t.logcat.get(full=True): f.write('%s\n' % line.encode('UTF-8', errors='replace')) t.logcat.reset() else: # Device is in an error state so we can't # get the full logcat but we can output # any logcat output we accumulated # previously. for line in t.logcat._accumulated_logcat: f.write('%s\n' % line.encode('UTF-8', errors='replace')) except Exception, e: logger.exception('Error reading logcat %s' % fname) t.job_details.append({ 'value': 'Failed to read %s: %s' % (fname, e), 'content_type': 'text', 'title': 'Error'}) try: url = self.s3_bucket.upload(f.name, key) t.job_details.append({ 'url': url, 'value': lname, 'content_type': 'link', 'title': 'artifact uploaded'}) except S3Error, e: logger.exception('Error uploading logcat %s' % fname) t.job_details.append({ 'value': 'Failed to upload %s: %s' % (fname, e), 'content_type': 'text', 'title': 'Error'}) # Upload directory containing ANRs, tombstones and other items # to be uploaded. if t.upload_dir: for f in glob.glob(os.path.join(t.upload_dir, '*')): try: lname = os.path.basename(f) fname = '%s-%s' % (log_identifier, lname) url = self.s3_bucket.upload(f, "%s/%s" % ( key_prefix, fname)) t.job_details.append({ 'url': url, 'value': lname, 'content_type': 'link', 'title': 'artifact uploaded'}) except S3Error, e: logger.exception('Error uploading artifact %s' % fname) t.job_details.append({ 'value': 'Failed to upload artifact %s: %s' % (fname, e), 'content_type': 'text', 'title': 'Error'})
def run_job(self): is_test_completed = False if not self.install_local_pages(): self.test_failure( self.name, 'TEST_UNEXPECTED_FAIL', 'Aborting test - Could not install local pages on phone.', PhoneTestResult.EXCEPTION) return is_test_completed if not self.create_profile(): self.test_failure( self.name, 'TEST_UNEXPECTED_FAIL', 'Aborting test - Could not run Fennec.', PhoneTestResult.BUSTED) return is_test_completed is_test_completed = True testcount = len(self._urls.keys()) for testnum,(testname,url) in enumerate(self._urls.iteritems(), 1): if self.fennec_crashed: break self.loggerdeco = self.loggerdeco.clone( extradict={'phoneid': self.phone.id, 'buildid': self.build.id, 'testname': testname}, extraformat='%(phoneid)s|%(buildid)s|%(testname)s|%(message)s') self.dm._logger = self.loggerdeco self.loggerdeco.info('Running test (%d/%d) for %d iterations' % (testnum, testcount, self._iterations)) # success == False indicates that none of the attempts # were successful in getting any measurement. This is # typically due to a regression in the brower which should # be reported. success = False command = None for attempt in range(1, self.stderrp_attempts+1): if self.fennec_crashed: break # dataset is a list of the measurements made for the # iterations for this test. # # An empty item in the dataset list represents a # failure to obtain any measurement for that # iteration. # # It is possible for an item in the dataset to have an # uncached value and not have a corresponding cached # value if the cached test failed to record the # values. dataset = [] for iteration in range(1, self._iterations+1): # Calling svc power stayon true will turn on the # display for at least some devices if it has # turned off. self.dm.power_on() command = self.worker_subprocess.process_autophone_cmd( test=self, require_ip_address=url.startswith('http')) if command['interrupt']: is_test_completed = False self.handle_test_interrupt(command['reason'], command['test_result']) break if self.fennec_crashed: break self.update_status(message='Attempt %d/%d for Test %d/%d, ' 'run %d, for url %s' % (attempt, self.stderrp_attempts, testnum, testcount, iteration, url)) dataset.append({}) if not self.create_profile(): self.test_failure(url, 'TEST_UNEXPECTED_FAIL', 'Failed to create profile', PhoneTestResult.TESTFAILED) continue measurement = self.runtest(url) if measurement: self.test_pass(url) else: self.test_failure( url, 'TEST_UNEXPECTED_FAIL', 'Failed to get uncached measurement.', PhoneTestResult.TESTFAILED) continue dataset[-1]['uncached'] = measurement success = True measurement = self.runtest(url) if measurement: self.test_pass(url) else: self.test_failure( url, 'TEST_UNEXPECTED_FAIL', 'Failed to get cached measurement.', PhoneTestResult.TESTFAILED) continue dataset[-1]['cached'] = measurement if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_accept): self.loggerdeco.info( 'Accepted test (%d/%d) after %d of %d iterations' % (testnum, testcount, iteration, self._iterations)) break if command and command['interrupt']: break if not success: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.loggerdeco.info( 'Failed to get measurements for test %s after %d/%d attempt ' 'of %d iterations' % (testname, attempt, self.stderrp_attempts, self._iterations)) self.worker_subprocess.mailer.send( '%s %s failed for Build %s %s on %s %s' % (self.__class__.__name__, testname, self.build.tree, self.build.id, utils.host(), self.phone.id), 'No measurements were detected for test %s.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (testname, self.job_url, utils.host(), self.phone.id, self.build.tree, self.build.id, self.build.revision)) self.test_failure(self.name, 'TEST_UNEXPECTED_FAIL', 'No measurements detected.', PhoneTestResult.BUSTED) break if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_reject): rejected = False else: rejected = True self.loggerdeco.info( 'Rejected test (%d/%d) after %d/%d iterations' % (testnum, testcount, iteration, self._iterations)) self.loggerdeco.debug('publishing results') for datapoint in dataset: for cachekey in datapoint: self.report_results( starttime=datapoint[cachekey]['starttime'], tstrt=datapoint[cachekey]['throbberstart'], tstop=datapoint[cachekey]['throbberstop'], testname=testname, cache_enabled=(cachekey=='cached'), rejected=rejected) if not rejected: break if command and command['interrupt']: break elif not success: break return is_test_completed
def run_job(self): is_test_completed = False if not self.install_local_pages(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not install local pages on phone.', TreeherderStatus.EXCEPTION) return is_test_completed if not self.create_profile(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not run Fennec.', TreeherderStatus.BUSTED) return is_test_completed perfherder_options = PerfherderOptions(self.perfherder_options, repo=self.build.tree) is_test_completed = True testcount = len(self._urls.keys()) for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1): self.loggerdeco = self.loggerdeco.clone( extradict={ 'repo': self.build.tree, 'buildid': self.build.id, 'buildtype': self.build.type, 'sdk': self.phone.sdk, 'platform': self.build.platform, 'testname': testname }, extraformat='S1S2TestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s') self.dm._logger = self.loggerdeco self.loggerdeco.info('Running test (%d/%d) for %d iterations', testnum, testcount, self._iterations) command = None for attempt in range(1, self.stderrp_attempts+1): # dataset is a list of the measurements made for the # iterations for this test. # # An empty item in the dataset list represents a # failure to obtain any measurement for that # iteration. # # It is possible for an item in the dataset to have an # uncached value and not have a corresponding cached # value if the cached test failed to record the # values. iteration = 0 dataset = [] for iteration in range(1, self._iterations+1): # Calling svc power stayon true will turn on the # display for at least some devices if it has # turned off. self.dm.power_on() command = self.worker_subprocess.process_autophone_cmd( test=self, require_ip_address=url.startswith('http')) if command['interrupt']: self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status(message='Attempt %d/%d for Test %d/%d, ' 'run %d, for url %s' % (attempt, self.stderrp_attempts, testnum, testcount, iteration, url)) if not self.create_profile(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to create profile', TreeherderStatus.TESTFAILED) continue measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get uncached measurement.', testname, url, attempt) continue self.add_pass(url, text='uncached') dataset.append({'uncached': measurement}) measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get cached measurement.', testname, url, attempt) continue self.add_pass(url, text='cached') dataset[-1]['cached'] = measurement if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_accept): self.loggerdeco.info( 'Accepted test (%d/%d) after %d of %d iterations', testnum, testcount, iteration, self._iterations) break if command and command['interrupt']: break measurements = len(dataset) if measurements > 0 and self._iterations != measurements: self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to get all measurements', TreeherderStatus.TESTFAILED) elif measurements == 0: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'No measurements detected.', TreeherderStatus.BUSTED) self.loggerdeco.info( 'Failed to get measurements for test %s after %d/%d attempt ' 'of %d iterations', testname, attempt, self.stderrp_attempts, self._iterations) self.worker_subprocess.mailer.send( '%s %s failed for Build %s %s on %s %s' % (self.__class__.__name__, testname, self.build.tree, self.build.id, utils.host(), self.phone.id), 'No measurements were detected for test %s.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (testname, self.job_url, utils.host(), self.phone.id, self.build.tree, self.build.id, self.build.changeset)) break if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_reject): rejected = False else: rejected = True self.loggerdeco.info( 'Rejected test (%d/%d) after %d/%d iterations', testnum, testcount, iteration, self._iterations) self.loggerdeco.debug('publishing results') perfherder_values = {'geometric_mean': 0} metric_keys = ['throbberstart', 'throbberstop', 'throbbertime'] cache_names = {'uncached': 'first', 'cached': 'second'} cache_keys = cache_names.keys() for metric_key in metric_keys: perfherder_values[metric_key] = {'geometric_mean': 0} for cache_key in cache_keys: perfherder_values[metric_key][cache_key] = {'median': 0, 'values': []} for datapoint in dataset: for cache_key in datapoint: starttime = datapoint[cache_key]['starttime'] throbberstart = datapoint[cache_key]['throbberstart'] throbberstop = datapoint[cache_key]['throbberstop'] self.report_results( starttime=starttime, tstrt=throbberstart, tstop=throbberstop, testname=testname, cache_enabled=(cache_key == 'cached'), rejected=rejected) perfherder_values['throbberstart'][cache_key]['values'].append( throbberstart - starttime) perfherder_values['throbberstop'][cache_key]['values'].append( throbberstop - starttime) perfherder_values['throbbertime'][cache_key]['values'].append( throbberstop - throbberstart) test_values = [] for metric_key in metric_keys: for cache_key in cache_keys: perfherder_values[metric_key][cache_key]['median'] = utils.median( perfherder_values[metric_key][cache_key]['values']) perfherder_values[metric_key]['geometric_mean'] = utils.geometric_mean( [perfherder_values[metric_key]['uncached']['median'], perfherder_values[metric_key]['cached']['median']]) test_values.append(perfherder_values[metric_key]['geometric_mean']) perfherder_suite = PerfherderSuite(name=testname, value=utils.geometric_mean(test_values), options=perfherder_options) for metric_key in metric_keys: for cache_key in cache_keys: cache_name = cache_names[cache_key] subtest_name = "%s %s" % (metric_key, cache_name) perfherder_suite.add_subtest( subtest_name, perfherder_values[metric_key][cache_key]['median'], options=perfherder_options) self.perfherder_artifact = PerfherderArtifact() self.perfherder_artifact.add_suite(perfherder_suite) self.loggerdeco.debug("PerfherderArtifact: %s", self.perfherder_artifact) if not rejected: break if command and command['interrupt']: break return is_test_completed
def run_job(self): self.loggerdeco.info('Running test for %d iterations', self._iterations) # success == False indicates that none of the attempts # were successful in getting any measurement. This is # typically due to a regression in the brower which should # be reported. success = False command = None is_test_completed = True for attempt in range(1, self.stderrp_attempts + 1): # dataset is a list of the measurements made for the # iterations for this test. # # An empty item in the dataset list represents a # failure to obtain any measurement for that # iteration. # # It is possible for an item in the dataset to have an # uncached value and not have a corresponding cached # value if the cached test failed to record the # values. iteration = 0 dataset = [] for iteration in range(1, self._iterations + 1): command = self.worker_subprocess.process_autophone_cmd( test=self) if command['interrupt']: is_test_completed = False self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status( message='Attempt %d/%d for Test %s, ' 'run %d' % (attempt, self.stderrp_attempts, self.testname, iteration)) dataset.append({}) if not self.install_webappstartup(): self.update_status( message='Attempt %d/%d for Test %s, ' 'run %d failed to install webappstartup' % (attempt, self.stderrp_attempts, self.testname, iteration)) self.add_failure(self.name, 'TEST-UNEXPECTED-FAIL', 'Failed to get uncached measurement.', PhoneTest.TESTFAILED) continue if not self.create_profile(): self.add_failure(self.name, 'TEST-UNEXPECTED-FAIL', 'Failed to create profile', PhoneTest.TESTFAILED) continue measurement = self.runtest() if measurement: self.add_pass(self.name) else: self.add_failure(self.name, 'TEST-UNEXPECTED-FAIL', 'Failed to get uncached measurement.', PhoneTest.TESTFAILED) continue dataset[-1]['uncached'] = measurement success = True measurement = self.runtest() if measurement: self.add_pass(self.name) else: self.add_failure(self.name, 'TEST-UNEXPECTED-FAIL', 'Failed to get cached measurement.', PhoneTest.TESTFAILED) continue dataset[-1]['cached'] = measurement if self.is_stderr_below_threshold( ('chrome_time', 'startup_time'), dataset, self.stderrp_accept): self.loggerdeco.info( 'Accepted test %s after %d of %d iterations', self.testname, iteration, self._iterations) break if command and command['interrupt']: break if not success: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.loggerdeco.info( 'Failed to get measurements for test %s after %d/%d attempt ' 'of %d iterations', self.testname, attempt, self.stderrp_attempts, self._iterations) self.worker_subprocess.mailer.send( 'Webappstartup test failed for Build %s %s on %s %s' % (self.build.tree, self.build.id, utils.host(), self.phone.id), 'No measurements were detected for test webappstartup.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (self.job_url, utils.host(), self.phone.id, self.build.tree, self.build.id, self.build.changeset)) self.add_failure(self.name, 'TEST-UNEXPECTED-FAIL', 'No measurements detected.', PhoneTest.BUSTED) break if self.is_stderr_below_threshold(('chrome_time', 'startup_time'), dataset, self.stderrp_reject): rejected = False else: rejected = True self.loggerdeco.info('Rejected test %s after %d/%d iterations', self.testname, iteration, self._iterations) self.loggerdeco.debug('publishing results') for datapoint in dataset: for cachekey in datapoint: self.report_results( starttime=datapoint[cachekey]['starttime'], tstrt=datapoint[cachekey]['chrome_time'], tstop=datapoint[cachekey]['startup_time'], testname=self.testname, cache_enabled=(cachekey == 'cached'), rejected=rejected) if not rejected: break return is_test_completed
def run_job(self): self.testname = 'webappstartup' self.loggerdeco.info('Running test for %d iterations' % self._iterations) # success == False indicates that none of the attempts # were successful in getting any measurement. This is # typically due to a regression in the brower which should # be reported. success = False command = None is_test_completed = True for attempt in range(1, self.stderrp_attempts+1): # dataset is a list of the measurements made for the # iterations for this test. # # An empty item in the dataset list represents a # failure to obtain any measurement for that # iteration. # # It is possible for an item in the dataset to have an # uncached value and not have a corresponding cached # value if the cached test failed to record the # values. dataset = [] for iteration in range(1, self._iterations+1): command = self.worker_subprocess.process_autophone_cmd(test=self) if command['interrupt']: is_test_completed = False self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status(message='Attempt %d/%d for Test %s, ' 'run %d' % (attempt, self.stderrp_attempts, self.testname, iteration)) dataset.append({}) if not self.install_webappstartup(): self.update_status(message='Attempt %d/%d for Test %s, ' 'run %d failed to install webappstartup' % (attempt, self.stderrp_attempts, self.testname, iteration)) self.test_failure( self.name, 'TEST_UNEXPECTED_FAIL', 'Failed to get uncached measurement.', PhoneTestResult.TESTFAILED) continue if not self.create_profile(): self.test_failure(self.name, 'TEST_UNEXPECTED_FAIL', 'Failed to create profile', PhoneTestResult.TESTFAILED) continue measurement = self.runtest() if measurement: self.test_pass(self.name) else: self.test_failure( self.name, 'TEST_UNEXPECTED_FAIL', 'Failed to get uncached measurement.', PhoneTestResult.TESTFAILED) continue dataset[-1]['uncached'] = measurement success = True measurement = self.runtest() if measurement: self.test_pass(self.name) else: self.test_failure( self.name, 'TEST_UNEXPECTED_FAIL', 'Failed to get cached measurement.', PhoneTestResult.TESTFAILED) continue dataset[-1]['cached'] = measurement if self.is_stderr_below_threshold( ('chrome_time', 'startup_time'), dataset, self.stderrp_accept): self.loggerdeco.info( 'Accepted test %s after %d of %d iterations' % (self.testname, iteration, self._iterations)) break if command and command['interrupt']: break if not success: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.loggerdeco.info( 'Failed to get measurements for test %s after %d/%d attempt ' 'of %d iterations' % (self.testname, attempt, self.stderrp_attempts, self._iterations)) self.worker_subprocess.mailer.send( 'Webappstartup test failed for Build %s %s on %s %s' % (self.build.tree, self.build.id, utils.host(), self.phone.id), 'No measurements were detected for test webappstartup.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (self.job_url, utils.host(), self.phone.id, self.build.tree, self.build.id, self.build.revision)) self.test_failure(self.name, 'TEST_UNEXPECTED_FAIL', 'No measurements detected.', PhoneTestResult.BUSTED) break if self.is_stderr_below_threshold( ('chrome_time', 'startup_time'), dataset, self.stderrp_reject): rejected = False else: rejected = True self.loggerdeco.info( 'Rejected test %s after %d/%d iterations' % (self.testname, iteration, self._iterations)) self.loggerdeco.debug('publishing results') for datapoint in dataset: for cachekey in datapoint: self.report_results( starttime=datapoint[cachekey]['starttime'], tstrt=datapoint[cachekey]['chrome_time'], tstop=datapoint[cachekey]['startup_time'], testname=self.testname, cache_enabled=(cachekey=='cached'), rejected=rejected) if not rejected: break return is_test_completed