def analyze_logcat(self): """ __start_report12.853116__end_report We will parse the syntax here and build up a {name:[value,],} hash. Next we will compute the median value for each name. Finally we will report the geomtric mean of all of the median values. """ self.loggerdeco.debug('analyzing logcat') re_data = re.compile('.*__start_report([0-9\.]+)__end_report.*') attempt = 1 max_time = 90 # maximum time to wait for completeness score wait_time = 3 # time to wait between attempts max_attempts = max_time / wait_time results = {"tcheck3": []} pageload_metric = {'summary': 0} while attempt <= max_attempts and pageload_metric['summary'] == 0: buf = self.logcat.get() for line in buf: match = re_data.match(line) if match: numbers = match.group(1) if numbers: results["tcheck3"].append(float(numbers)) if self.fennec_crashed: # If fennec crashed, don't bother looking for pageload metric break if pageload_metric['summary'] == 0: sleep(wait_time) attempt += 1 if not results["tcheck3"]: continue # calculate score data = results["tcheck3"] pageload_metric["tcheck3"] = median(data) pageload_metric['summary'] = geometric_mean(data) if pageload_metric['summary'] == 0: self.loggerdeco.info('Unable to find pageload metric') self.loggerdeco.info("returning from logcat analyze with: %s" % pageload_metric) return pageload_metric
def analyze_logcat(self): """ I/GeckoDump( 2284): __start_tp_report I/GeckoDump( 2284): _x_x_mozilla_page_load I/GeckoDump( 2284): _x_x_mozilla_page_load_details I/GeckoDump( 2284): |i|pagename|runs| I/GeckoDump( 2284): |0;amazon.com/www.amazon.com/index.html;2386;1146 I/GeckoDump( 2284): |1;m.yahoo.co.jp/www.yahoo.co.jp/index.html;1724;901 I/GeckoDump( 2284): |2;m.accuweather.com/www.accuweather.com/index.html;228;231 I/GeckoDump( 2284): |3;m.yandex.ru/www.yandex.ru/index.html;6043;2984 I/GeckoDump( 2284): |4;m.wikipedia.com/en.m.wikipedia.org/index.html;734;385 I/GeckoDump( 2284): |5;m.espn.com/m.espn.go.com/index.html;576;419 I/GeckoDump( 2284): |6;m.bbc.co.uk/www.bbc.co.uk/mobile/index.html;349;229 I/GeckoDump( 2284): __end_tp_report I/GeckoDump( 2284): __start_cc_report I/GeckoDump( 2284): _x_x_mozilla_cycle_collect,3390 I/GeckoDump( 2284): __end_cc_report I/GeckoDump( 2284): __startTimestamp1433438438092__endTimestamp We will parse the syntax here and build up a {name:[value,],} hash. Next we will compute the median value for each name. Finally we will report the geoemtric mean of all of the median values. """ self.loggerdeco.debug('analyzing logcat') re_page_data = re.compile( r'.*\|[0-9];([a-zA-Z0-9\.\/\-]+);([0-9;]+).*') re_end_report = re.compile(r'.*__end_tp_report.*') attempt = 1 max_time = 180 # maximum time to wait for tp report wait_time = 3 # time to wait between attempts max_attempts = max_time / wait_time results = {} pageload_metric = {'summary': 0} while attempt <= max_attempts and pageload_metric['summary'] == 0: buf = self.worker_subprocess.logcat.get() for line in buf: self.loggerdeco.debug('analyze_logcat: %s', line) if re_end_report.match(line): # calculate score data = [] for page in results: data.append(median(results[page])) # median of each page, ignoring the first run pageload_metric[page] = median(results[page][1:]) pageload_metric['summary'] = geometric_mean(data) break match = re_page_data.match(line) if match: page_name = match.group(1) numbers = match.group(2) if page_name and numbers: page_name = page_name.split('/')[0] numbers = [float(x) for x in numbers.split(';')] results[page_name] = numbers if self.handle_crashes(): # If fennec crashed, don't bother looking for pageload metric break if pageload_metric['summary'] == 0: sleep(wait_time) attempt += 1 if pageload_metric['summary'] == 0: self.loggerdeco.warning('Unable to find pageload metric') return pageload_metric
def run_job(self): is_test_completed = False if not self.install_local_pages(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not install local pages on phone.', TreeherderStatus.EXCEPTION) return is_test_completed if not self.create_profile(): self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not run Fennec.', TreeherderStatus.BUSTED) return is_test_completed perfherder_options = PerfherderOptions(self.perfherder_options, repo=self.build.tree) is_test_completed = True testcount = len(self._urls.keys()) for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1): self.loggerdeco = self.loggerdeco.clone( extradict={ 'phoneid': self.phone.id, 'buildid': self.build.id, 'testname': testname }, extraformat= 'S1S2TestJob|%(phoneid)s|%(buildid)s|%(testname)s|%(message)s') self.dm._logger = self.loggerdeco self.loggerdeco.info('Running test (%d/%d) for %d iterations', testnum, testcount, self._iterations) command = None for attempt in range(1, self.stderrp_attempts + 1): # dataset is a list of the measurements made for the # iterations for this test. # # An empty item in the dataset list represents a # failure to obtain any measurement for that # iteration. # # It is possible for an item in the dataset to have an # uncached value and not have a corresponding cached # value if the cached test failed to record the # values. iteration = 0 dataset = [] for iteration in range(1, self._iterations + 1): # Calling svc power stayon true will turn on the # display for at least some devices if it has # turned off. self.dm.power_on() command = self.worker_subprocess.process_autophone_cmd( test=self, require_ip_address=url.startswith('http')) if command['interrupt']: self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status(message='Attempt %d/%d for Test %d/%d, ' 'run %d, for url %s' % (attempt, self.stderrp_attempts, testnum, testcount, iteration, url)) if not self.create_profile(): self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to create profile', TreeherderStatus.TESTFAILED) continue measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get uncached measurement.', testname, url, attempt) continue self.add_pass(url) dataset.append({'uncached': measurement}) measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get cached measurement.', testname, url, attempt) continue self.add_pass(url) dataset[-1]['cached'] = measurement if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_accept): self.loggerdeco.info( 'Accepted test (%d/%d) after %d of %d iterations', testnum, testcount, iteration, self._iterations) break if command and command['interrupt']: break measurements = len(dataset) if measurements > 0 and self._iterations != measurements: self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to get all measurements', TreeherderStatus.TESTFAILED) elif measurements == 0: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'No measurements detected.', TreeherderStatus.BUSTED) self.loggerdeco.info( 'Failed to get measurements for test %s after %d/%d attempt ' 'of %d iterations', testname, attempt, self.stderrp_attempts, self._iterations) self.worker_subprocess.mailer.send( '%s %s failed for Build %s %s on %s %s' % (self.__class__.__name__, testname, self.build.tree, self.build.id, utils.host(), self.phone.id), 'No measurements were detected for test %s.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (testname, self.job_url, utils.host(), self.phone.id, self.build.tree, self.build.id, self.build.changeset)) break if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_reject): rejected = False else: rejected = True self.loggerdeco.info( 'Rejected test (%d/%d) after %d/%d iterations', testnum, testcount, iteration, self._iterations) self.loggerdeco.debug('publishing results') perfherder_values = {'geometric_mean': 0} metric_keys = ['throbberstart', 'throbberstop', 'throbbertime'] cache_names = {'uncached': 'first', 'cached': 'second'} cache_keys = cache_names.keys() for metric_key in metric_keys: perfherder_values[metric_key] = {'geometric_mean': 0} for cache_key in cache_keys: perfherder_values[metric_key][cache_key] = { 'median': 0, 'values': [] } for datapoint in dataset: for cache_key in datapoint: starttime = datapoint[cache_key]['starttime'] throbberstart = datapoint[cache_key]['throbberstart'] throbberstop = datapoint[cache_key]['throbberstop'] self.report_results( starttime=starttime, tstrt=throbberstart, tstop=throbberstop, testname=testname, cache_enabled=(cache_key == 'cached'), rejected=rejected) perfherder_values['throbberstart'][cache_key][ 'values'].append(throbberstart - starttime) perfherder_values['throbberstop'][cache_key][ 'values'].append(throbberstop - starttime) perfherder_values['throbbertime'][cache_key][ 'values'].append(throbberstop - throbberstart) test_values = [] for metric_key in metric_keys: for cache_key in cache_keys: perfherder_values[metric_key][cache_key][ 'median'] = utils.median( perfherder_values[metric_key][cache_key] ['values']) perfherder_values[metric_key][ 'geometric_mean'] = utils.geometric_mean([ perfherder_values[metric_key]['uncached'] ['median'], perfherder_values[metric_key]['cached']['median'] ]) test_values.append( perfherder_values[metric_key]['geometric_mean']) perfherder_suite = PerfherderSuite( name=testname, value=utils.geometric_mean(test_values), options=perfherder_options) for metric_key in metric_keys: for cache_key in cache_keys: cache_name = cache_names[cache_key] subtest_name = "%s %s" % (metric_key, cache_name) perfherder_suite.add_subtest( subtest_name, perfherder_values[metric_key][cache_key]['median'], options=perfherder_options) self.perfherder_artifact = PerfherderArtifact() self.perfherder_artifact.add_suite(perfherder_suite) self.loggerdeco.debug("PerfherderArtifact: %s", self.perfherder_artifact) if not rejected: break if command and command['interrupt']: break return is_test_completed
def analyze_logcat(self): """ I/GeckoDump( 2284): __start_tp_report I/GeckoDump( 2284): _x_x_mozilla_page_load I/GeckoDump( 2284): _x_x_mozilla_page_load_details I/GeckoDump( 2284): |i|pagename|runs| I/GeckoDump( 2284): |0;amazon.com/www.amazon.com/index.html;2386;1146 I/GeckoDump( 2284): |1;m.yahoo.co.jp/www.yahoo.co.jp/index.html;1724;901 I/GeckoDump( 2284): |2;m.accuweather.com/www.accuweather.com/index.html;228;231 I/GeckoDump( 2284): |3;m.yandex.ru/www.yandex.ru/index.html;6043;2984 I/GeckoDump( 2284): |4;m.wikipedia.com/en.m.wikipedia.org/index.html;734;385 I/GeckoDump( 2284): |5;m.espn.com/m.espn.go.com/index.html;576;419 I/GeckoDump( 2284): |6;m.bbc.co.uk/www.bbc.co.uk/mobile/index.html;349;229 I/GeckoDump( 2284): __end_tp_report I/GeckoDump( 2284): __start_cc_report I/GeckoDump( 2284): _x_x_mozilla_cycle_collect,3390 I/GeckoDump( 2284): __end_cc_report I/GeckoDump( 2284): __startTimestamp1433438438092__endTimestamp We will parse the syntax here and build up a {name:[value,],} hash. Next we will compute the median value for each name. Finally we will report the geoemtric mean of all of the median values. """ self.loggerdeco.debug('analyzing logcat') re_page_data = re.compile('.*\|[0-9];([a-zA-Z0-9\.\/\-]+);([0-9;]+).*') re_end_report = re.compile('.*__end_tp_report.*') attempt = 1 max_time = 90 # maximum time to wait for completeness score wait_time = 3 # time to wait between attempts max_attempts = max_time / wait_time results = {} pageload_metric = {'summary': 0} while attempt <= max_attempts and pageload_metric['summary'] == 0: buf = self.logcat.get() for line in buf: self.loggerdeco.debug('analyze_logcat: %s' % line) if re_end_report.match(line): # calculate score data = [] for page in results: data.append(median(results[page])) pageload_metric[page] = median(results[page]) pageload_metric['summary'] = geometric_mean(data) break match = re_page_data.match(line) if match: page_name = match.group(1) numbers = match.group(2) if page_name and numbers: page_name = page_name.split('/')[0] numbers = [float(x) for x in numbers.split(';')] results[page_name] = numbers if self.fennec_crashed: # If fennec crashed, don't bother looking for pageload metric break if pageload_metric['summary'] == 0: sleep(wait_time) attempt += 1 if pageload_metric['summary'] == 0: self.loggerdeco.warning('Unable to find pageload metric') return pageload_metric
def run_job(self): is_test_completed = False if not self.install_local_pages(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not install local pages on phone.', TreeherderStatus.EXCEPTION) return is_test_completed if not self.create_profile(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Aborting test - Could not run Fennec.', TreeherderStatus.BUSTED) return is_test_completed perfherder_options = PerfherderOptions(self.perfherder_options, repo=self.build.tree) is_test_completed = True testcount = len(self._urls.keys()) for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1): self.loggerdeco = self.loggerdeco.clone( extradict={ 'repo': self.build.tree, 'buildid': self.build.id, 'buildtype': self.build.type, 'sdk': self.phone.sdk, 'platform': self.build.platform, 'testname': testname }, extraformat='S1S2TestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s') self.dm._logger = self.loggerdeco self.loggerdeco.info('Running test (%d/%d) for %d iterations', testnum, testcount, self._iterations) command = None for attempt in range(1, self.stderrp_attempts+1): # dataset is a list of the measurements made for the # iterations for this test. # # An empty item in the dataset list represents a # failure to obtain any measurement for that # iteration. # # It is possible for an item in the dataset to have an # uncached value and not have a corresponding cached # value if the cached test failed to record the # values. iteration = 0 dataset = [] for iteration in range(1, self._iterations+1): # Calling svc power stayon true will turn on the # display for at least some devices if it has # turned off. self.dm.power_on() command = self.worker_subprocess.process_autophone_cmd( test=self, require_ip_address=url.startswith('http')) if command['interrupt']: self.handle_test_interrupt(command['reason'], command['test_result']) break self.update_status(message='Attempt %d/%d for Test %d/%d, ' 'run %d, for url %s' % (attempt, self.stderrp_attempts, testnum, testcount, iteration, url)) if not self.create_profile(): self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to create profile', TreeherderStatus.TESTFAILED) continue measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get uncached measurement.', testname, url, attempt) continue self.add_pass(url, text='uncached') dataset.append({'uncached': measurement}) measurement = self.runtest(url) if not measurement: self.loggerdeco.warning( '%s %s Attempt %s Failed to get cached measurement.', testname, url, attempt) continue self.add_pass(url, text='cached') dataset[-1]['cached'] = measurement if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_accept): self.loggerdeco.info( 'Accepted test (%d/%d) after %d of %d iterations', testnum, testcount, iteration, self._iterations) break if command and command['interrupt']: break measurements = len(dataset) if measurements > 0 and self._iterations != measurements: self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'Failed to get all measurements', TreeherderStatus.TESTFAILED) elif measurements == 0: # If we have not gotten a single measurement at this point, # just bail and report the failure rather than wasting time # continuing more attempts. self.add_failure( self.name, TestStatus.TEST_UNEXPECTED_FAIL, 'No measurements detected.', TreeherderStatus.BUSTED) self.loggerdeco.info( 'Failed to get measurements for test %s after %d/%d attempt ' 'of %d iterations', testname, attempt, self.stderrp_attempts, self._iterations) self.worker_subprocess.mailer.send( '%s %s failed for Build %s %s on %s %s' % (self.__class__.__name__, testname, self.build.tree, self.build.id, utils.host(), self.phone.id), 'No measurements were detected for test %s.\n\n' 'Job %s\n' 'Host %s\n' 'Phone %s\n' 'Repository %s\n' 'Build %s\n' 'Revision %s\n' % (testname, self.job_url, utils.host(), self.phone.id, self.build.tree, self.build.id, self.build.changeset)) break if self.is_stderr_below_threshold( ('throbberstart', 'throbberstop'), dataset, self.stderrp_reject): rejected = False else: rejected = True self.loggerdeco.info( 'Rejected test (%d/%d) after %d/%d iterations', testnum, testcount, iteration, self._iterations) self.loggerdeco.debug('publishing results') perfherder_values = {'geometric_mean': 0} metric_keys = ['throbberstart', 'throbberstop', 'throbbertime'] cache_names = {'uncached': 'first', 'cached': 'second'} cache_keys = cache_names.keys() for metric_key in metric_keys: perfherder_values[metric_key] = {'geometric_mean': 0} for cache_key in cache_keys: perfherder_values[metric_key][cache_key] = {'median': 0, 'values': []} for datapoint in dataset: for cache_key in datapoint: starttime = datapoint[cache_key]['starttime'] throbberstart = datapoint[cache_key]['throbberstart'] throbberstop = datapoint[cache_key]['throbberstop'] self.report_results( starttime=starttime, tstrt=throbberstart, tstop=throbberstop, testname=testname, cache_enabled=(cache_key == 'cached'), rejected=rejected) perfherder_values['throbberstart'][cache_key]['values'].append( throbberstart - starttime) perfherder_values['throbberstop'][cache_key]['values'].append( throbberstop - starttime) perfherder_values['throbbertime'][cache_key]['values'].append( throbberstop - throbberstart) test_values = [] for metric_key in metric_keys: for cache_key in cache_keys: perfherder_values[metric_key][cache_key]['median'] = utils.median( perfherder_values[metric_key][cache_key]['values']) perfherder_values[metric_key]['geometric_mean'] = utils.geometric_mean( [perfherder_values[metric_key]['uncached']['median'], perfherder_values[metric_key]['cached']['median']]) test_values.append(perfherder_values[metric_key]['geometric_mean']) perfherder_suite = PerfherderSuite(name=testname, value=utils.geometric_mean(test_values), options=perfherder_options) for metric_key in metric_keys: for cache_key in cache_keys: cache_name = cache_names[cache_key] subtest_name = "%s %s" % (metric_key, cache_name) perfherder_suite.add_subtest( subtest_name, perfherder_values[metric_key][cache_key]['median'], options=perfherder_options) self.perfherder_artifact = PerfherderArtifact() self.perfherder_artifact.add_suite(perfherder_suite) self.loggerdeco.debug("PerfherderArtifact: %s", self.perfherder_artifact) if not rejected: break if command and command['interrupt']: break return is_test_completed
def attack_probability(self): if self.is_active: return geometric_mean([unit.attack_success for unit in self.units]) return 0