class TalosTest(PerfTest):
    def __init__(self,
                 dm=None,
                 phone=None,
                 options=None,
                 config_file=None,
                 chunk=1,
                 repos=[]):

        PerfTest.__init__(self,
                          dm=dm,
                          phone=phone,
                          options=options,
                          config_file=config_file,
                          chunk=chunk,
                          repos=repos)

        self._test_args = {}
        config_vars = {'webserver_url': options.webserver_url}

        try:
            location_items = self.cfg.items('locations', False, config_vars)
        except ConfigParser.NoSectionError:
            location_items = [('local', None)]

        # Finialize test configuration
        tpargs = self.cfg.get('settings', 'tpargs')
        for test_location, test_path in location_items:
            if test_location in config_vars:
                # Ignore the pseudo-options which result from passing
                # the config_vars for interpolation.
                continue

            for test_name in self._tests:
                try:
                    manifest = self._tests[test_name]

                    if not os.path.exists(manifest):
                        self.loggerdeco.debug('ignoring manifest for %s',
                                              manifest)
                        continue

                    with open(manifest, 'r') as fHandle:
                        lines = fHandle.readlines()

                    manifest_path = os.path.dirname(manifest)
                    manifest_file = os.path.basename(manifest)
                    test_manifest_path = os.path.join(manifest_path,
                                                      'manifest')
                    test_manifest_file = "%s.%s" % (manifest_file,
                                                    test_location)
                    test_manifest = os.path.join(test_manifest_path,
                                                 test_manifest_file)

                    if not os.path.isdir(test_manifest_path):
                        os.mkdir(test_manifest_path)

                    if test_location == 'remote':
                        url_root = test_path
                    else:
                        url_root = 'file://' + self._paths['dest']

                    with open(test_manifest, 'w') as fHandle:
                        for line in lines:
                            fHandle.write(line.replace('%webserver%',
                                                       url_root))
                    dest_manifest = os.path.join(self._paths['dest'],
                                                 test_manifest_path,
                                                 test_manifest_file)
                    self._pushes[test_manifest] = dest_manifest

                    extra_args = "-tp file://%s %s" % (dest_manifest, tpargs)

                    self._test_args["%s-%s" %
                                    (test_location, test_name)] = extra_args

                    self.loggerdeco.debug(
                        'generating manifest: test_location: %s, test_path: %s, '
                        'test_name: %s, manifest: %s, extra_args: %s',
                        test_location, test_name, test_path, manifest,
                        extra_args)

                except Exception:
                    self.loggerdeco.exception(
                        'generating manifest: test_location: %s, test_path: %s, '
                        'test_name: %s, manifest: %s', test_location,
                        test_name, test_path, manifest)
                    raise

        self.loggerdeco.debug('TalosTest: %s', self.__dict__)

    @property
    def name(self):
        return 'autophone-talos%s' % self.name_suffix

    def run_job(self):
        is_test_completed = False
        custom_addons = ['pageloader.xpi']

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile(custom_addons=custom_addons):
            self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                             'Aborting test - Could not run Fennec.',
                             TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               self.build.tree)
        is_test_completed = True
        testcount = len(self._test_args.keys())
        test_items = enumerate(self._test_args.iteritems(), 1)
        for testnum, (testname, test_args) in test_items:
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'repo': self.build.tree,
                    'buildid': self.build.id,
                    'buildtype': self.build.type,
                    'sdk': self.phone.sdk,
                    'platform': self.build.platform,
                    'testname': testname
                },
                extraformat=
                'TalosTestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s'
            )
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d)', testnum, testcount)

            # success == False indicates that none of the attempts
            # were successful in getting any measurement. This is
            # typically due to a regression in the brower which should
            # be reported.
            success = False

            command = self.worker_subprocess.process_autophone_cmd(
                test=self, require_ip_address=testname.startswith('remote'))

            if command['interrupt']:
                self.handle_test_interrupt(command['reason'],
                                           command['test_result'])
                break

            self.update_status(message='Test %d/%d, for test_args %s' %
                               (testnum, testcount, test_args))

            if not self.create_profile(custom_addons=custom_addons):
                self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                                 'Failed to create profile',
                                 TreeherderStatus.TESTFAILED)
            else:
                measurement = self.runtest(test_args)
                if measurement:
                    if not self.perfherder_artifact:
                        self.perfherder_artifact = PerfherderArtifact()
                    suite = self.create_suite(measurement['pageload_metric'],
                                              testname,
                                              options=perfherder_options)
                    self.perfherder_artifact.add_suite(suite)
                    self.add_pass(test_args)
                    success = True
                else:
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'Failed to get measurement.',
                                     TreeherderStatus.TESTFAILED)

            if not success:
                # If we have not gotten a single measurement at this point,
                # just bail and report the failure rather than wasting time
                # continuing more attempts.
                self.loggerdeco.info('Failed to get measurements for test %s',
                                     testname)
                self.worker_subprocess.mailer.send(
                    '%s %s failed for Build %s %s on %s %s' %
                    (self.__class__.__name__, testname, self.build.tree,
                     self.build.id, host(), self.phone.id),
                    'No measurements were detected for test %s.\n\n'
                    'Job        %s\n'
                    'Host       %s\n'
                    'Phone      %s\n'
                    'Repository %s\n'
                    'Build      %s\n'
                    'Revision   %s\n' %
                    (testname, self.job_url, host(), self.phone.id,
                     self.build.tree, self.build.id, self.build.changeset))
                self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                                 'No measurements detected.',
                                 TreeherderStatus.BUSTED)

                self.loggerdeco.debug('publishing results')

            if command and command['interrupt']:
                break
            elif not success:
                break

        return is_test_completed

    def runtest(self, extra_args):
        # Clear logcat
        self.worker_subprocess.logcat.clear()

        # Run test
        self.run_fennec_with_profile(self.build.app_name,
                                     "",
                                     extra_args=extra_args.split(' '))

        # Get results - do this now so we don't have as much to
        # parse in logcat.
        pageload_metric = self.analyze_logcat()

        # Ensure we succeeded - no 0's reported
        datapoint = {}
        if pageload_metric['summary'] != 0:
            datapoint = {'pageload_metric': pageload_metric}
        return datapoint

    def analyze_logcat(self):
        """
I/GeckoDump( 2284): __start_tp_report
I/GeckoDump( 2284): _x_x_mozilla_page_load
I/GeckoDump( 2284): _x_x_mozilla_page_load_details
I/GeckoDump( 2284): |i|pagename|runs|
I/GeckoDump( 2284): |0;amazon.com/www.amazon.com/index.html;2386;1146
I/GeckoDump( 2284): |1;m.yahoo.co.jp/www.yahoo.co.jp/index.html;1724;901
I/GeckoDump( 2284): |2;m.accuweather.com/www.accuweather.com/index.html;228;231
I/GeckoDump( 2284): |3;m.yandex.ru/www.yandex.ru/index.html;6043;2984
I/GeckoDump( 2284): |4;m.wikipedia.com/en.m.wikipedia.org/index.html;734;385
I/GeckoDump( 2284): |5;m.espn.com/m.espn.go.com/index.html;576;419
I/GeckoDump( 2284): |6;m.bbc.co.uk/www.bbc.co.uk/mobile/index.html;349;229
I/GeckoDump( 2284): __end_tp_report
I/GeckoDump( 2284): __start_cc_report
I/GeckoDump( 2284): _x_x_mozilla_cycle_collect,3390
I/GeckoDump( 2284): __end_cc_report
I/GeckoDump( 2284): __startTimestamp1433438438092__endTimestamp

        We will parse the syntax here and build up a {name:[value,],} hash.
        Next we will compute the median value for each name.
        Finally we will report the geoemtric mean of all of the median values.
        """
        self.loggerdeco.debug('analyzing logcat')

        re_page_data = re.compile(
            r'.*\|[0-9];([a-zA-Z0-9\.\/\-]+);([0-9;]+).*')
        re_end_report = re.compile(r'.*__end_tp_report.*')

        attempt = 1
        max_time = 180  # maximum time to wait for tp report
        wait_time = 3  # time to wait between attempts
        max_attempts = max_time / wait_time

        results = {}
        pageload_metric = {'summary': 0}
        while attempt <= max_attempts and pageload_metric['summary'] == 0:
            buf = self.worker_subprocess.logcat.get()
            for line in buf:
                self.loggerdeco.debug('analyze_logcat: %s', line)
                if re_end_report.match(line):
                    # calculate score
                    data = []
                    for page in results:
                        data.append(median(results[page]))
                        # median of each page, ignoring the first run
                        pageload_metric[page] = median(results[page][1:])
                    pageload_metric['summary'] = geometric_mean(data)
                    break

                match = re_page_data.match(line)
                if match:
                    page_name = match.group(1)
                    numbers = match.group(2)
                    if page_name and numbers:
                        page_name = page_name.split('/')[0]
                        numbers = [float(x) for x in numbers.split(';')]
                        results[page_name] = numbers

            if self.handle_crashes():
                # If fennec crashed, don't bother looking for pageload metric
                break
            if pageload_metric['summary'] == 0:
                sleep(wait_time)
                attempt += 1
        if pageload_metric['summary'] == 0:
            self.loggerdeco.warning('Unable to find pageload metric')

        return pageload_metric

    def create_suite(self, metric, testname, options=None):
        phsuite = PerfherderSuite(name=testname,
                                  value=metric['summary'],
                                  options=options)
        for p in metric:
            if p != 'summary':
                phsuite.add_subtest(p, metric[p], options=options)
        return phsuite
Exemplo n.º 2
0
    def run_job(self):
        is_test_completed = False

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile():
            self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                             'Aborting test - Could not run Fennec.',
                             TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               repo=self.build.tree)
        is_test_completed = True
        testcount = len(self._urls.keys())
        for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1):
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'phoneid': self.phone.id,
                    'buildid': self.build.id,
                    'testname': testname
                },
                extraformat=
                'S1S2TestJob|%(phoneid)s|%(buildid)s|%(testname)s|%(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations',
                                 testnum, testcount, self._iterations)

            command = None
            for attempt in range(1, self.stderrp_attempts + 1):
                # dataset is a list of the measurements made for the
                # iterations for this test.
                #
                # An empty item in the dataset list represents a
                # failure to obtain any measurement for that
                # iteration.
                #
                # It is possible for an item in the dataset to have an
                # uncached value and not have a corresponding cached
                # value if the cached test failed to record the
                # values.

                iteration = 0
                dataset = []
                for iteration in range(1, self._iterations + 1):
                    # Calling svc power stayon true will turn on the
                    # display for at least some devices if it has
                    # turned off.
                    self.dm.power_on()
                    command = self.worker_subprocess.process_autophone_cmd(
                        test=self, require_ip_address=url.startswith('http'))
                    if command['interrupt']:
                        self.handle_test_interrupt(command['reason'],
                                                   command['test_result'])
                        break
                    self.update_status(message='Attempt %d/%d for Test %d/%d, '
                                       'run %d, for url %s' %
                                       (attempt, self.stderrp_attempts,
                                        testnum, testcount, iteration, url))

                    if not self.create_profile():
                        self.add_failure(self.name,
                                         TestStatus.TEST_UNEXPECTED_FAIL,
                                         'Failed to create profile',
                                         TreeherderStatus.TESTFAILED)
                        continue

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get uncached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url)
                    dataset.append({'uncached': measurement})

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get cached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url)
                    dataset[-1]['cached'] = measurement

                    if self.is_stderr_below_threshold(
                        ('throbberstart', 'throbberstop'), dataset,
                            self.stderrp_accept):
                        self.loggerdeco.info(
                            'Accepted test (%d/%d) after %d of %d iterations',
                            testnum, testcount, iteration, self._iterations)
                        break

                if command and command['interrupt']:
                    break
                measurements = len(dataset)
                if measurements > 0 and self._iterations != measurements:
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'Failed to get all measurements',
                                     TreeherderStatus.TESTFAILED)
                elif measurements == 0:
                    # If we have not gotten a single measurement at this point,
                    # just bail and report the failure rather than wasting time
                    # continuing more attempts.
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'No measurements detected.',
                                     TreeherderStatus.BUSTED)
                    self.loggerdeco.info(
                        'Failed to get measurements for test %s after %d/%d attempt '
                        'of %d iterations', testname, attempt,
                        self.stderrp_attempts, self._iterations)
                    self.worker_subprocess.mailer.send(
                        '%s %s failed for Build %s %s on %s %s' %
                        (self.__class__.__name__, testname, self.build.tree,
                         self.build.id, utils.host(), self.phone.id),
                        'No measurements were detected for test %s.\n\n'
                        'Job        %s\n'
                        'Host       %s\n'
                        'Phone      %s\n'
                        'Repository %s\n'
                        'Build      %s\n'
                        'Revision   %s\n' %
                        (testname, self.job_url, utils.host(), self.phone.id,
                         self.build.tree, self.build.id, self.build.changeset))
                    break

                if self.is_stderr_below_threshold(
                    ('throbberstart', 'throbberstop'), dataset,
                        self.stderrp_reject):
                    rejected = False
                else:
                    rejected = True
                    self.loggerdeco.info(
                        'Rejected test (%d/%d) after %d/%d iterations',
                        testnum, testcount, iteration, self._iterations)

                self.loggerdeco.debug('publishing results')

                perfherder_values = {'geometric_mean': 0}
                metric_keys = ['throbberstart', 'throbberstop', 'throbbertime']
                cache_names = {'uncached': 'first', 'cached': 'second'}
                cache_keys = cache_names.keys()

                for metric_key in metric_keys:
                    perfherder_values[metric_key] = {'geometric_mean': 0}
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key] = {
                            'median': 0,
                            'values': []
                        }

                for datapoint in dataset:
                    for cache_key in datapoint:
                        starttime = datapoint[cache_key]['starttime']
                        throbberstart = datapoint[cache_key]['throbberstart']
                        throbberstop = datapoint[cache_key]['throbberstop']
                        self.report_results(
                            starttime=starttime,
                            tstrt=throbberstart,
                            tstop=throbberstop,
                            testname=testname,
                            cache_enabled=(cache_key == 'cached'),
                            rejected=rejected)
                        perfherder_values['throbberstart'][cache_key][
                            'values'].append(throbberstart - starttime)
                        perfherder_values['throbberstop'][cache_key][
                            'values'].append(throbberstop - starttime)
                        perfherder_values['throbbertime'][cache_key][
                            'values'].append(throbberstop - throbberstart)

                test_values = []
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key][
                            'median'] = utils.median(
                                perfherder_values[metric_key][cache_key]
                                ['values'])
                    perfherder_values[metric_key][
                        'geometric_mean'] = utils.geometric_mean([
                            perfherder_values[metric_key]['uncached']
                            ['median'],
                            perfherder_values[metric_key]['cached']['median']
                        ])
                    test_values.append(
                        perfherder_values[metric_key]['geometric_mean'])

                perfherder_suite = PerfherderSuite(
                    name=testname,
                    value=utils.geometric_mean(test_values),
                    options=perfherder_options)
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        cache_name = cache_names[cache_key]
                        subtest_name = "%s %s" % (metric_key, cache_name)
                        perfherder_suite.add_subtest(
                            subtest_name,
                            perfherder_values[metric_key][cache_key]['median'],
                            options=perfherder_options)

                self.perfherder_artifact = PerfherderArtifact()
                self.perfherder_artifact.add_suite(perfherder_suite)
                self.loggerdeco.debug("PerfherderArtifact: %s",
                                      self.perfherder_artifact)

                if not rejected:
                    break

            if command and command['interrupt']:
                break

        return is_test_completed
    def run_job(self):
        is_test_completed = False
        custom_addons = ['pageloader.xpi']

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile(custom_addons=custom_addons):
            self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                             'Aborting test - Could not run Fennec.',
                             TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               self.build.tree)
        is_test_completed = True
        testcount = len(self._test_args.keys())
        test_items = enumerate(self._test_args.iteritems(), 1)
        for testnum, (testname, test_args) in test_items:
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'repo': self.build.tree,
                    'buildid': self.build.id,
                    'buildtype': self.build.type,
                    'sdk': self.phone.sdk,
                    'platform': self.build.platform,
                    'testname': testname
                },
                extraformat=
                'TalosTestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s'
            )
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d)', testnum, testcount)

            # success == False indicates that none of the attempts
            # were successful in getting any measurement. This is
            # typically due to a regression in the brower which should
            # be reported.
            success = False

            command = self.worker_subprocess.process_autophone_cmd(
                test=self, require_ip_address=testname.startswith('remote'))

            if command['interrupt']:
                self.handle_test_interrupt(command['reason'],
                                           command['test_result'])
                break

            self.update_status(message='Test %d/%d, for test_args %s' %
                               (testnum, testcount, test_args))

            if not self.create_profile(custom_addons=custom_addons):
                self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                                 'Failed to create profile',
                                 TreeherderStatus.TESTFAILED)
            else:
                measurement = self.runtest(test_args)
                if measurement:
                    if not self.perfherder_artifact:
                        self.perfherder_artifact = PerfherderArtifact()
                    suite = self.create_suite(measurement['pageload_metric'],
                                              testname,
                                              options=perfherder_options)
                    self.perfherder_artifact.add_suite(suite)
                    self.add_pass(test_args)
                    success = True
                else:
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'Failed to get measurement.',
                                     TreeherderStatus.TESTFAILED)

            if not success:
                # If we have not gotten a single measurement at this point,
                # just bail and report the failure rather than wasting time
                # continuing more attempts.
                self.loggerdeco.info('Failed to get measurements for test %s',
                                     testname)
                self.worker_subprocess.mailer.send(
                    '%s %s failed for Build %s %s on %s %s' %
                    (self.__class__.__name__, testname, self.build.tree,
                     self.build.id, host(), self.phone.id),
                    'No measurements were detected for test %s.\n\n'
                    'Job        %s\n'
                    'Host       %s\n'
                    'Phone      %s\n'
                    'Repository %s\n'
                    'Build      %s\n'
                    'Revision   %s\n' %
                    (testname, self.job_url, host(), self.phone.id,
                     self.build.tree, self.build.id, self.build.changeset))
                self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                                 'No measurements detected.',
                                 TreeherderStatus.BUSTED)

                self.loggerdeco.debug('publishing results')

            if command and command['interrupt']:
                break
            elif not success:
                break

        return is_test_completed
Exemplo n.º 4
0
    def run_job(self):
        is_test_completed = False
        custom_addons = ['pageloader.xpi']

        if not self.install_local_pages():
            self.test_failure(
                self.name, 'TEST_UNEXPECTED_FAIL',
                'Aborting test - Could not install local pages on phone.',
                PhoneTestResult.EXCEPTION)
            return is_test_completed

        if not self.create_profile(custom_addons=custom_addons):
            self.test_failure(
                self.name, 'TEST_UNEXPECTED_FAIL',
                'Aborting test - Could not run Fennec.',
                PhoneTestResult.BUSTED)
            return is_test_completed

        is_test_completed = True
        testcount = len(self._test_args.keys())
        test_items = enumerate(self._test_args.iteritems(), 1)
        for testnum, (testname, test_args) in test_items:
            if self.fennec_crashed:
                break
            self.loggerdeco = self.loggerdeco.clone(
                extradict={'phoneid': self.phone.id,
                           'buildid': self.build.id,
                           'testname': testname},
                extraformat='%(phoneid)s|%(buildid)s|%(testname)s|%(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations' %
                                 (testnum, testcount, self._iterations))

            # success == False indicates that none of the attempts
            # were successful in getting any measurement. This is
            # typically due to a regression in the brower which should
            # be reported.
            success = False
            command = None

            if self.fennec_crashed:
                break

            for iteration in range(1, self._iterations+1):
                command = self.worker_subprocess.process_autophone_cmd(
                    test=self, require_ip_address=testname.startswith('remote'))
                if command['interrupt']:
                    is_test_completed = False
                    self.handle_test_interrupt(command['reason'],
                                               command['test_result'])
                    break
                if self.fennec_crashed:
                    break

                self.update_status(message='Test %d/%d, '
                                   'run %d, for test_args %s' %
                                   (testnum, testcount, iteration, test_args))

                if not self.create_profile(custom_addons=custom_addons):
                    self.test_failure(test_args,
                                      'TEST_UNEXPECTED_FAIL',
                                      'Failed to create profile',
                                      PhoneTestResult.TESTFAILED)
                    continue

                measurement = self.runtest(test_args)
                if measurement:
                    if not self.perfherder_artifact:
                        self.perfherder_artifact = PerfherderArtifact()
                    suite = self.create_suite(measurement['pageload_metric'],
                                             testname)
                    self.perfherder_artifact.add_suite(suite)
                    self.test_pass(test_args)
                else:
                    self.test_failure(
                        test_args,
                        'TEST_UNEXPECTED_FAIL',
                        'Failed to get measurement.',
                        PhoneTestResult.TESTFAILED)
                    continue
                success = True

            if not success:
                # If we have not gotten a single measurement at this point,
                # just bail and report the failure rather than wasting time
                # continuing more attempts.
                self.loggerdeco.info(
                    'Failed to get measurements for test %s after '
                    '%d iterations' % (testname, self._iterations))
                self.worker_subprocess.mailer.send(
                    '%s %s failed for Build %s %s on Phone %s' %
                    (self.__class__.__name__, testname, self.build.tree, self.build.id,
                     self.phone.id),
                    'No measurements were detected for test %s.\n\n'
                    'Job        %s\n'
                    'Phone      %s\n'
                    'Repository %s\n'
                    'Build      %s\n'
                    'Revision   %s\n' %
                    (testname,
                     self.job_url,
                     self.phone.id,
                     self.build.tree,
                     self.build.id,
                     self.build.revision))
                self.test_failure(self.name, 'TEST_UNEXPECTED_FAIL',
                                  'No measurements detected.',
                                  PhoneTestResult.BUSTED)

                self.loggerdeco.debug('publishing results')

            if command and command['interrupt']:
                break
            elif not success:
                break

        return is_test_completed
Exemplo n.º 5
0
class S1S2Test(PerfTest):
    def __init__(self,
                 dm=None,
                 phone=None,
                 options=None,
                 config_file=None,
                 chunk=1,
                 repos=[]):

        PerfTest.__init__(self,
                          dm=dm,
                          phone=phone,
                          options=options,
                          config_file=config_file,
                          chunk=chunk,
                          repos=repos)

        # Map URLS - {urlname: url} - urlname serves as testname
        self._urls = {}
        config_vars = {'webserver_url': options.webserver_url}

        try:
            location_items = self.cfg.items('locations', False, config_vars)
        except ConfigParser.NoSectionError:
            location_items = [('local', None)]

        # Finialize test configuration
        for test_location, test_path in location_items:
            if test_location in config_vars:
                # Ignore the pseudo-options which result from passing
                # the config_vars for interpolation.
                continue
            for test_name in self._tests:
                if test_path:
                    test_url = urlparse.urljoin(test_path,
                                                self._tests[test_name])
                else:
                    test_url = 'file://' + os.path.join(
                        self._paths['dest'], self._tests[test_name])
                self.loggerdeco.debug(
                    'test_location: %s, test_name: %s, test_path: %s, '
                    'test: %s, test_url: %s' %
                    (test_location, test_name, test_path,
                     self._tests[test_name], test_url))
                self._urls["%s-%s" % (test_location, test_name)] = test_url

        self.loggerdeco.debug('S1S2Test: %s', self.__dict__)

    @property
    def name(self):
        return 'autophone-s1s2%s' % self.name_suffix

    @property
    def phonedash_url(self):
        # For s1s2test, there are 4 different test names due to historical design.
        # We pick local-blank as the default.
        return self._phonedash_url('local-blank')

    def run_job(self):
        is_test_completed = False

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile():
            self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                             'Aborting test - Could not run Fennec.',
                             TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               repo=self.build.tree)
        is_test_completed = True
        testcount = len(self._urls.keys())
        for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1):
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'phoneid': self.phone.id,
                    'buildid': self.build.id,
                    'testname': testname
                },
                extraformat=
                'S1S2TestJob|%(phoneid)s|%(buildid)s|%(testname)s|%(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations',
                                 testnum, testcount, self._iterations)

            command = None
            for attempt in range(1, self.stderrp_attempts + 1):
                # dataset is a list of the measurements made for the
                # iterations for this test.
                #
                # An empty item in the dataset list represents a
                # failure to obtain any measurement for that
                # iteration.
                #
                # It is possible for an item in the dataset to have an
                # uncached value and not have a corresponding cached
                # value if the cached test failed to record the
                # values.

                iteration = 0
                dataset = []
                for iteration in range(1, self._iterations + 1):
                    # Calling svc power stayon true will turn on the
                    # display for at least some devices if it has
                    # turned off.
                    self.dm.power_on()
                    command = self.worker_subprocess.process_autophone_cmd(
                        test=self, require_ip_address=url.startswith('http'))
                    if command['interrupt']:
                        self.handle_test_interrupt(command['reason'],
                                                   command['test_result'])
                        break
                    self.update_status(message='Attempt %d/%d for Test %d/%d, '
                                       'run %d, for url %s' %
                                       (attempt, self.stderrp_attempts,
                                        testnum, testcount, iteration, url))

                    if not self.create_profile():
                        self.add_failure(self.name,
                                         TestStatus.TEST_UNEXPECTED_FAIL,
                                         'Failed to create profile',
                                         TreeherderStatus.TESTFAILED)
                        continue

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get uncached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url)
                    dataset.append({'uncached': measurement})

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get cached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url)
                    dataset[-1]['cached'] = measurement

                    if self.is_stderr_below_threshold(
                        ('throbberstart', 'throbberstop'), dataset,
                            self.stderrp_accept):
                        self.loggerdeco.info(
                            'Accepted test (%d/%d) after %d of %d iterations',
                            testnum, testcount, iteration, self._iterations)
                        break

                if command and command['interrupt']:
                    break
                measurements = len(dataset)
                if measurements > 0 and self._iterations != measurements:
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'Failed to get all measurements',
                                     TreeherderStatus.TESTFAILED)
                elif measurements == 0:
                    # If we have not gotten a single measurement at this point,
                    # just bail and report the failure rather than wasting time
                    # continuing more attempts.
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'No measurements detected.',
                                     TreeherderStatus.BUSTED)
                    self.loggerdeco.info(
                        'Failed to get measurements for test %s after %d/%d attempt '
                        'of %d iterations', testname, attempt,
                        self.stderrp_attempts, self._iterations)
                    self.worker_subprocess.mailer.send(
                        '%s %s failed for Build %s %s on %s %s' %
                        (self.__class__.__name__, testname, self.build.tree,
                         self.build.id, utils.host(), self.phone.id),
                        'No measurements were detected for test %s.\n\n'
                        'Job        %s\n'
                        'Host       %s\n'
                        'Phone      %s\n'
                        'Repository %s\n'
                        'Build      %s\n'
                        'Revision   %s\n' %
                        (testname, self.job_url, utils.host(), self.phone.id,
                         self.build.tree, self.build.id, self.build.changeset))
                    break

                if self.is_stderr_below_threshold(
                    ('throbberstart', 'throbberstop'), dataset,
                        self.stderrp_reject):
                    rejected = False
                else:
                    rejected = True
                    self.loggerdeco.info(
                        'Rejected test (%d/%d) after %d/%d iterations',
                        testnum, testcount, iteration, self._iterations)

                self.loggerdeco.debug('publishing results')

                perfherder_values = {'geometric_mean': 0}
                metric_keys = ['throbberstart', 'throbberstop', 'throbbertime']
                cache_names = {'uncached': 'first', 'cached': 'second'}
                cache_keys = cache_names.keys()

                for metric_key in metric_keys:
                    perfherder_values[metric_key] = {'geometric_mean': 0}
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key] = {
                            'median': 0,
                            'values': []
                        }

                for datapoint in dataset:
                    for cache_key in datapoint:
                        starttime = datapoint[cache_key]['starttime']
                        throbberstart = datapoint[cache_key]['throbberstart']
                        throbberstop = datapoint[cache_key]['throbberstop']
                        self.report_results(
                            starttime=starttime,
                            tstrt=throbberstart,
                            tstop=throbberstop,
                            testname=testname,
                            cache_enabled=(cache_key == 'cached'),
                            rejected=rejected)
                        perfherder_values['throbberstart'][cache_key][
                            'values'].append(throbberstart - starttime)
                        perfherder_values['throbberstop'][cache_key][
                            'values'].append(throbberstop - starttime)
                        perfherder_values['throbbertime'][cache_key][
                            'values'].append(throbberstop - throbberstart)

                test_values = []
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key][
                            'median'] = utils.median(
                                perfherder_values[metric_key][cache_key]
                                ['values'])
                    perfherder_values[metric_key][
                        'geometric_mean'] = utils.geometric_mean([
                            perfherder_values[metric_key]['uncached']
                            ['median'],
                            perfherder_values[metric_key]['cached']['median']
                        ])
                    test_values.append(
                        perfherder_values[metric_key]['geometric_mean'])

                perfherder_suite = PerfherderSuite(
                    name=testname,
                    value=utils.geometric_mean(test_values),
                    options=perfherder_options)
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        cache_name = cache_names[cache_key]
                        subtest_name = "%s %s" % (metric_key, cache_name)
                        perfherder_suite.add_subtest(
                            subtest_name,
                            perfherder_values[metric_key][cache_key]['median'],
                            options=perfherder_options)

                self.perfherder_artifact = PerfherderArtifact()
                self.perfherder_artifact.add_suite(perfherder_suite)
                self.loggerdeco.debug("PerfherderArtifact: %s",
                                      self.perfherder_artifact)

                if not rejected:
                    break

            if command and command['interrupt']:
                break

        return is_test_completed

    def runtest(self, url):
        # Clear logcat
        self.worker_subprocess.logcat.clear()

        # Run test
        self.run_fennec_with_profile(self.build.app_name, url)

        # Get results - do this now so we don't have as much to
        # parse in logcat.
        starttime, throbberstart, throbberstop = self.analyze_logcat()

        self.wait_for_fennec()
        self.handle_crashes()

        # Ensure we succeeded - no 0's reported
        datapoint = {}
        if throbberstart and throbberstop:
            datapoint['starttime'] = starttime
            datapoint['throbberstart'] = throbberstart
            datapoint['throbberstop'] = throbberstop
            datapoint['throbbertime'] = throbberstop - throbberstart
        return datapoint

    def analyze_logcat(self):
        self.loggerdeco.debug('analyzing logcat')

        logcat_prefix = r'(\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})'
        throbber_prefix = r'..GeckoToolbarDisplayLayout.*zerdatime (\d+) - Throbber'
        re_base_time = re.compile(r'%s' % logcat_prefix)
        re_gecko_time = re.compile(r'%s .*([Gg]ecko)' % logcat_prefix)
        re_start_time = re.compile(
            '%s .*('
            'Start proc .*%s.* for activity %s|'
            'Fennec application start)' %
            (logcat_prefix, self.build.app_name, self.build.app_name))
        re_throbber_start_time = re.compile('%s %s start' %
                                            (logcat_prefix, throbber_prefix))
        re_throbber_stop_time = re.compile('%s %s stop' %
                                           (logcat_prefix, throbber_prefix))

        base_time = 0
        start_time = 0
        throbber_start_time = 0
        throbber_stop_time = 0
        start_time_reason = ''
        fennec_start = 'Fennec application start'

        attempt = 1
        max_time = 90  # maximum time to wait for throbbers
        wait_time = 3  # time to wait between attempts
        max_attempts = max_time / wait_time

        while (attempt <= max_attempts
               and (throbber_start_time == 0 or throbber_stop_time == 0)):
            buf = self.worker_subprocess.logcat.get()
            for line in buf:
                self.loggerdeco.debug('analyze_logcat: %s', line)
                match = re_base_time.match(line)
                if match and not base_time:
                    base_time = match.group(1)
                    self.loggerdeco.info('analyze_logcat: base_time: %s',
                                         base_time)
                # We want the Fennec application start message, or the
                # Start proc message or the first gecko related
                # message in order to determine the start_time which
                # will be used to convert the absolute time values
                # into values relative to the start of fennec.  See
                # https://bugzilla.mozilla.org/show_bug.cgi?id=1214810
                if not start_time:
                    match = re_gecko_time.match(line)
                    if match:
                        start_time = match.group(1)
                        start_time_reason = match.group(2)
                        self.loggerdeco.info(
                            'analyze_logcat: new start_time: %s %s',
                            start_time, start_time_reason)
                match = re_start_time.match(line)
                if match:
                    group1 = match.group(1)
                    group2 = match.group(2)
                    if not start_time:
                        start_time = group1
                        start_time_reason = group2
                        self.loggerdeco.info(
                            'analyze_logcat: new start_time: %s %s',
                            start_time, start_time_reason)
                    elif (fennec_start in group2
                          and fennec_start not in start_time_reason):
                        # Only use the first if there are multiple
                        # fennec_start messages.
                        start_time = group1
                        start_time_reason = group2
                        self.loggerdeco.info(
                            'analyze_logcat: updated start_time: %s %s',
                            start_time, start_time_reason)
                    elif (fennec_start not in start_time_reason
                          and group2.startswith('Start proc')):
                        start_time = group1
                        start_time_reason = group2
                        self.loggerdeco.info(
                            'analyze_logcat: updated start_time: %s %s',
                            start_time, start_time_reason)
                    else:
                        self.loggerdeco.info(
                            'analyze_logcat: ignoring start_time: %s %s',
                            group1, group2)
                    continue
                # We want the first throbberstart and throbberstop
                # after the start_time.
                match = re_throbber_start_time.match(line)
                if match:
                    if throbber_start_time:
                        self.loggerdeco.warning(
                            'analyze_logcat: throbber_start_time: %s '
                            'missing throbber_stop. Resetting '
                            'throbber_start_time.', throbber_start_time)
                    throbber_start_time = match.group(1)
                    self.loggerdeco.info(
                        'analyze_logcat: throbber_start_time: %s',
                        throbber_start_time)
                    continue
                match = re_throbber_stop_time.match(line)
                if match and not throbber_stop_time:
                    throbber_stop_time = match.group(1)
                    self.loggerdeco.info(
                        'analyze_logcat: throbber_stop_time: %s',
                        throbber_stop_time)
                    continue
                if start_time and throbber_start_time and throbber_stop_time:
                    break
            if self.handle_crashes():
                # If fennec crashed, don't bother looking for the Throbbers
                self.loggerdeco.warning('analyze_logcat: fennec crashed.')
                break
            if start_time == 0 or \
               throbber_start_time == 0 or \
               throbber_stop_time == 0:
                sleep(wait_time)
                attempt += 1
        if throbber_start_time and throbber_stop_time == 0:
            self.loggerdeco.warning(
                'analyze_logcat: Unable to find Throbber stop')

        # The captured time from the logcat lines is in the format
        # MM-DD HH:MM:SS.mmm. It is possible for the year to change
        # between the different times, so we need to make adjustments
        # if necessary. First, we assume the year does not change and
        # parse the dates as if they are in the current year. If
        # the dates violate the natural order start_time,
        # throbber_start_time, throbber_stop_time, we can adjust the
        # year.

        if base_time and start_time and throbber_start_time and throbber_stop_time:
            parse = lambda y, t: datetime.datetime.strptime(
                '%4d-%s' % (y, t), '%Y-%m-%d %H:%M:%S.%f')
            year = datetime.datetime.utcnow().year
            base_time = parse(year, base_time)
            start_time = parse(year, start_time)
            throbber_start_time = parse(year, throbber_start_time)
            throbber_stop_time = parse(year, throbber_stop_time)

            self.loggerdeco.debug(
                'analyze_logcat: before year adjustment '
                'base: %s, start: %s, '
                'throbber start: %s', base_time, start_time,
                throbber_start_time)

            if base_time > start_time:
                base_time.replace(year=year - 1)
            elif start_time > throbber_start_time:
                base_time.replace(year=year - 1)
                start_time.replace(year=year - 1)
            elif throbber_start_time > throbber_stop_time:
                base_time.replace(year=year - 1)
                start_time.replace(year=year - 1)
                throbber_start_time.replace(year - 1)

            self.loggerdeco.debug(
                'analyze_logcat: after year adjustment '
                'base: %s, start: %s, '
                'throbber start: %s', base_time, start_time,
                throbber_start_time)

            # Convert the times to milliseconds from the base time.
            convert = lambda t1, t2: round((t2 - t1).total_seconds() * 1000.0)

            start_time = convert(base_time, start_time)
            throbber_start_time = convert(base_time, throbber_start_time)
            throbber_stop_time = convert(base_time, throbber_stop_time)

            self.loggerdeco.debug(
                'analyze_logcat: base: %s, start: %s, '
                'throbber start: %s, throbber stop: %s, '
                'throbber time: %s', base_time, start_time,
                throbber_start_time, throbber_stop_time,
                throbber_stop_time - throbber_start_time)

            if start_time > throbber_start_time or \
               start_time > throbber_stop_time or \
               throbber_start_time > throbber_stop_time:
                self.loggerdeco.warning(
                    'analyze_logcat: inconsistent measurements: '
                    'start: %s, '
                    'throbber start: %s, throbber stop: %s ', start_time,
                    throbber_start_time, throbber_stop_time)
                start_time = throbber_start_time = throbber_stop_time = 0
        else:
            self.loggerdeco.warning(
                'analyze_logcat: failed to get measurements '
                'start_time: %s, throbber start: %s, throbber stop: %s',
                start_time, throbber_start_time, throbber_stop_time)
            start_time = throbber_start_time = throbber_stop_time = 0

        return (start_time, throbber_start_time, throbber_stop_time)
Exemplo n.º 6
0
class TalosTest(PerfTest):
    def __init__(self, dm=None, phone=None, options=None,
                 config_file=None, chunk=1, repos=[]):

        PerfTest.__init__(self, dm=dm, phone=phone, options=options,
                          config_file=config_file, chunk=chunk, repos=repos)

        self._test_args = {}
        config_vars = {'webserver_url': options.webserver_url}

        try:
            location_items = self.cfg.items('locations', False, config_vars)
        except ConfigParser.NoSectionError:
            location_items = [('local', None)]

        # Finialize test configuration
        for test_location, test_path in location_items:
            if test_location in config_vars:
                # Ignore the pseudo-options which result from passing
                # the config_vars for interpolation.
                continue

            for test_name in self._tests:
                tpname = self._tests[test_name]
                tpargs = self.cfg.get('settings', 'tpargs')
                manifest_root = "file:%s" % self._paths['dest']
                tppath = os.path.join(manifest_root, tpname)
                extra_args = "-tp %s.develop %s" % (tppath, tpargs)

                try:
                    for source in self._paths['sources']:
                        fname = os.path.join(self.autophone_directory,
                                             source, tpname)
                        if not os.path.exists(fname):
                            continue

                        with open(fname, 'r') as fHandle:
                            lines = fHandle.readlines()

                        if test_location == 'remote':
                            manifest_root = options.webserver_url

                        manifest = "%s.develop" % fname
                        with open(manifest, 'w') as fHandle:
                            for line in lines:
                                fHandle.write(line.replace('%webserver%',
                                                           manifest_root))
                        self._pushes[manifest] = "%s.develop" % self._pushes[fname]

                except Exception:
                    pass

                self.loggerdeco.debug(
                    'test_location: %s, test_name: %s, test_path: %s, '
                    'test: %s, extra_args: %s' %
                    (test_location, test_name, test_path,
                     self._tests[test_name], extra_args))
                self._test_args["%s-%s" % (test_location, test_name)] = extra_args

    @property
    def name(self):
        return 'autophone-talos%s' % self.name_suffix

    def run_job(self):
        is_test_completed = False
        custom_addons = ['pageloader.xpi']

        if not self.install_local_pages():
            self.test_failure(
                self.name, 'TEST_UNEXPECTED_FAIL',
                'Aborting test - Could not install local pages on phone.',
                PhoneTestResult.EXCEPTION)
            return is_test_completed

        if not self.create_profile(custom_addons=custom_addons):
            self.test_failure(
                self.name, 'TEST_UNEXPECTED_FAIL',
                'Aborting test - Could not run Fennec.',
                PhoneTestResult.BUSTED)
            return is_test_completed

        is_test_completed = True
        testcount = len(self._test_args.keys())
        test_items = enumerate(self._test_args.iteritems(), 1)
        for testnum, (testname, test_args) in test_items:
            if self.fennec_crashed:
                break
            self.loggerdeco = self.loggerdeco.clone(
                extradict={'phoneid': self.phone.id,
                           'buildid': self.build.id,
                           'testname': testname},
                extraformat='%(phoneid)s|%(buildid)s|%(testname)s|%(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations' %
                                 (testnum, testcount, self._iterations))

            # success == False indicates that none of the attempts
            # were successful in getting any measurement. This is
            # typically due to a regression in the brower which should
            # be reported.
            success = False
            command = None

            if self.fennec_crashed:
                break

            for iteration in range(1, self._iterations+1):
                command = self.worker_subprocess.process_autophone_cmd(
                    test=self, require_ip_address=testname.startswith('remote'))
                if command['interrupt']:
                    is_test_completed = False
                    self.handle_test_interrupt(command['reason'],
                                               command['test_result'])
                    break
                if self.fennec_crashed:
                    break

                self.update_status(message='Test %d/%d, '
                                   'run %d, for test_args %s' %
                                   (testnum, testcount, iteration, test_args))

                if not self.create_profile(custom_addons=custom_addons):
                    self.test_failure(test_args,
                                      'TEST_UNEXPECTED_FAIL',
                                      'Failed to create profile',
                                      PhoneTestResult.TESTFAILED)
                    continue

                measurement = self.runtest(test_args)
                if measurement:
                    if not self.perfherder_artifact:
                        self.perfherder_artifact = PerfherderArtifact()
                    suite = self.create_suite(measurement['pageload_metric'],
                                             testname)
                    self.perfherder_artifact.add_suite(suite)
                    self.test_pass(test_args)
                else:
                    self.test_failure(
                        test_args,
                        'TEST_UNEXPECTED_FAIL',
                        'Failed to get measurement.',
                        PhoneTestResult.TESTFAILED)
                    continue
                success = True

            if not success:
                # If we have not gotten a single measurement at this point,
                # just bail and report the failure rather than wasting time
                # continuing more attempts.
                self.loggerdeco.info(
                    'Failed to get measurements for test %s after '
                    '%d iterations' % (testname, self._iterations))
                self.worker_subprocess.mailer.send(
                    '%s %s failed for Build %s %s on Phone %s' %
                    (self.__class__.__name__, testname, self.build.tree, self.build.id,
                     self.phone.id),
                    'No measurements were detected for test %s.\n\n'
                    'Job        %s\n'
                    'Phone      %s\n'
                    'Repository %s\n'
                    'Build      %s\n'
                    'Revision   %s\n' %
                    (testname,
                     self.job_url,
                     self.phone.id,
                     self.build.tree,
                     self.build.id,
                     self.build.revision))
                self.test_failure(self.name, 'TEST_UNEXPECTED_FAIL',
                                  'No measurements detected.',
                                  PhoneTestResult.BUSTED)

                self.loggerdeco.debug('publishing results')

            if command and command['interrupt']:
                break
            elif not success:
                break

        return is_test_completed

    def runtest(self, extra_args):
        # Clear logcat
        self.logcat.clear()

        # Run test
        self.run_fennec_with_profile(self.build.app_name, "",
                                     extra_args=extra_args.split(' '))

        # Get results - do this now so we don't have as much to
        # parse in logcat.
        pageload_metric = self.analyze_logcat()

        # Ensure we succeeded - no 0's reported
        datapoint = {}
        if pageload_metric['summary'] != 0:
            datapoint = {'pageload_metric': pageload_metric}
        return datapoint

    def analyze_logcat(self):
        """
I/GeckoDump( 2284): __start_tp_report
I/GeckoDump( 2284): _x_x_mozilla_page_load
I/GeckoDump( 2284): _x_x_mozilla_page_load_details
I/GeckoDump( 2284): |i|pagename|runs|
I/GeckoDump( 2284): |0;amazon.com/www.amazon.com/index.html;2386;1146
I/GeckoDump( 2284): |1;m.yahoo.co.jp/www.yahoo.co.jp/index.html;1724;901
I/GeckoDump( 2284): |2;m.accuweather.com/www.accuweather.com/index.html;228;231
I/GeckoDump( 2284): |3;m.yandex.ru/www.yandex.ru/index.html;6043;2984
I/GeckoDump( 2284): |4;m.wikipedia.com/en.m.wikipedia.org/index.html;734;385
I/GeckoDump( 2284): |5;m.espn.com/m.espn.go.com/index.html;576;419
I/GeckoDump( 2284): |6;m.bbc.co.uk/www.bbc.co.uk/mobile/index.html;349;229
I/GeckoDump( 2284): __end_tp_report
I/GeckoDump( 2284): __start_cc_report
I/GeckoDump( 2284): _x_x_mozilla_cycle_collect,3390
I/GeckoDump( 2284): __end_cc_report
I/GeckoDump( 2284): __startTimestamp1433438438092__endTimestamp

        We will parse the syntax here and build up a {name:[value,],} hash.
        Next we will compute the median value for each name.
        Finally we will report the geoemtric mean of all of the median values.
        """
        self.loggerdeco.debug('analyzing logcat')

        re_page_data = re.compile('.*\|[0-9];([a-zA-Z0-9\.\/\-]+);([0-9;]+).*')
        re_end_report = re.compile('.*__end_tp_report.*')

        attempt = 1
        max_time = 90  # maximum time to wait for completeness score
        wait_time = 3  # time to wait between attempts
        max_attempts = max_time / wait_time

        results = {}
        pageload_metric = {'summary': 0}
        while attempt <= max_attempts and pageload_metric['summary'] == 0:
            buf = self.logcat.get()
            for line in buf:
                self.loggerdeco.debug('analyze_logcat: %s' % line)
                if re_end_report.match(line):
                    # calculate score
                    data = []
                    for page in results:
                        data.append(median(results[page]))
                        pageload_metric[page] = median(results[page])
                    pageload_metric['summary'] = geometric_mean(data)
                    break

                match = re_page_data.match(line)
                if match:
                    page_name = match.group(1)
                    numbers = match.group(2)
                    if page_name and numbers:
                        page_name = page_name.split('/')[0]
                        numbers = [float(x) for x in numbers.split(';')]
                        results[page_name] = numbers

            if self.fennec_crashed:
                # If fennec crashed, don't bother looking for pageload metric
                break
            if pageload_metric['summary'] == 0:
                sleep(wait_time)
                attempt += 1
        if pageload_metric['summary'] == 0:
            self.loggerdeco.warning('Unable to find pageload metric')

        return pageload_metric

    def create_suite(self, metric, testname):
        phsuite = PerfherderSuite(name=testname,
                                  value=metric['summary'])
        for p in metric:
            if p != 'summary':
                phsuite.add_subtest(p, metric[p])
        return phsuite
Exemplo n.º 7
0
    def run_job(self):
        is_test_completed = False

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not run Fennec.',
                TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               repo=self.build.tree)
        is_test_completed = True
        testcount = len(self._urls.keys())
        for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1):
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'repo': self.build.tree,
                    'buildid': self.build.id,
                    'buildtype': self.build.type,
                    'sdk': self.phone.sdk,
                    'platform': self.build.platform,
                    'testname': testname
                },
                extraformat='S1S2TestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations',
                                 testnum, testcount, self._iterations)

            command = None
            for attempt in range(1, self.stderrp_attempts+1):
                # dataset is a list of the measurements made for the
                # iterations for this test.
                #
                # An empty item in the dataset list represents a
                # failure to obtain any measurement for that
                # iteration.
                #
                # It is possible for an item in the dataset to have an
                # uncached value and not have a corresponding cached
                # value if the cached test failed to record the
                # values.

                iteration = 0
                dataset = []
                for iteration in range(1, self._iterations+1):
                    # Calling svc power stayon true will turn on the
                    # display for at least some devices if it has
                    # turned off.
                    self.dm.power_on()
                    command = self.worker_subprocess.process_autophone_cmd(
                        test=self, require_ip_address=url.startswith('http'))
                    if command['interrupt']:
                        self.handle_test_interrupt(command['reason'],
                                                   command['test_result'])
                        break
                    self.update_status(message='Attempt %d/%d for Test %d/%d, '
                                       'run %d, for url %s' %
                                       (attempt, self.stderrp_attempts,
                                        testnum, testcount, iteration, url))

                    if not self.create_profile():
                        self.add_failure(
                            self.name,
                            TestStatus.TEST_UNEXPECTED_FAIL,
                            'Failed to create profile',
                            TreeherderStatus.TESTFAILED)
                        continue

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get uncached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='uncached')
                    dataset.append({'uncached': measurement})

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get cached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='cached')
                    dataset[-1]['cached'] = measurement

                    if self.is_stderr_below_threshold(
                            ('throbberstart',
                             'throbberstop'),
                            dataset,
                            self.stderrp_accept):
                        self.loggerdeco.info(
                            'Accepted test (%d/%d) after %d of %d iterations',
                            testnum, testcount, iteration, self._iterations)
                        break

                if command and command['interrupt']:
                    break
                measurements = len(dataset)
                if measurements > 0 and self._iterations != measurements:
                    self.add_failure(
                        self.name,
                        TestStatus.TEST_UNEXPECTED_FAIL,
                        'Failed to get all measurements',
                        TreeherderStatus.TESTFAILED)
                elif measurements == 0:
                    # If we have not gotten a single measurement at this point,
                    # just bail and report the failure rather than wasting time
                    # continuing more attempts.
                    self.add_failure(
                        self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                        'No measurements detected.',
                        TreeherderStatus.BUSTED)
                    self.loggerdeco.info(
                        'Failed to get measurements for test %s after %d/%d attempt '
                        'of %d iterations', testname, attempt,
                        self.stderrp_attempts, self._iterations)
                    self.worker_subprocess.mailer.send(
                        '%s %s failed for Build %s %s on %s %s' %
                        (self.__class__.__name__, testname, self.build.tree,
                         self.build.id, utils.host(), self.phone.id),
                        'No measurements were detected for test %s.\n\n'
                        'Job        %s\n'
                        'Host       %s\n'
                        'Phone      %s\n'
                        'Repository %s\n'
                        'Build      %s\n'
                        'Revision   %s\n' %
                        (testname,
                         self.job_url,
                         utils.host(),
                         self.phone.id,
                         self.build.tree,
                         self.build.id,
                         self.build.changeset))
                    break

                if self.is_stderr_below_threshold(
                        ('throbberstart',
                         'throbberstop'),
                        dataset,
                        self.stderrp_reject):
                    rejected = False
                else:
                    rejected = True
                    self.loggerdeco.info(
                        'Rejected test (%d/%d) after %d/%d iterations',
                        testnum, testcount, iteration, self._iterations)

                self.loggerdeco.debug('publishing results')

                perfherder_values = {'geometric_mean': 0}
                metric_keys = ['throbberstart', 'throbberstop', 'throbbertime']
                cache_names = {'uncached': 'first', 'cached': 'second'}
                cache_keys = cache_names.keys()

                for metric_key in metric_keys:
                    perfherder_values[metric_key] = {'geometric_mean': 0}
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key] = {'median': 0, 'values': []}

                for datapoint in dataset:
                    for cache_key in datapoint:
                        starttime = datapoint[cache_key]['starttime']
                        throbberstart = datapoint[cache_key]['throbberstart']
                        throbberstop = datapoint[cache_key]['throbberstop']
                        self.report_results(
                            starttime=starttime,
                            tstrt=throbberstart,
                            tstop=throbberstop,
                            testname=testname,
                            cache_enabled=(cache_key == 'cached'),
                            rejected=rejected)
                        perfherder_values['throbberstart'][cache_key]['values'].append(
                            throbberstart - starttime)
                        perfherder_values['throbberstop'][cache_key]['values'].append(
                            throbberstop - starttime)
                        perfherder_values['throbbertime'][cache_key]['values'].append(
                            throbberstop - throbberstart)

                test_values = []
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key]['median'] = utils.median(
                            perfherder_values[metric_key][cache_key]['values'])
                    perfherder_values[metric_key]['geometric_mean'] = utils.geometric_mean(
                        [perfherder_values[metric_key]['uncached']['median'],
                         perfherder_values[metric_key]['cached']['median']])
                    test_values.append(perfherder_values[metric_key]['geometric_mean'])

                perfherder_suite = PerfherderSuite(name=testname,
                                                   value=utils.geometric_mean(test_values),
                                                   options=perfherder_options)
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        cache_name = cache_names[cache_key]
                        subtest_name = "%s %s" % (metric_key, cache_name)
                        perfherder_suite.add_subtest(
                            subtest_name,
                            perfherder_values[metric_key][cache_key]['median'],
                            options=perfherder_options)

                self.perfherder_artifact = PerfherderArtifact()
                self.perfherder_artifact.add_suite(perfherder_suite)
                self.loggerdeco.debug("PerfherderArtifact: %s", self.perfherder_artifact)

                if not rejected:
                    break

            if command and command['interrupt']:
                break

        return is_test_completed
Exemplo n.º 8
0
class S1S2Test(PerfTest):
    def __init__(self, dm=None, phone=None, options=None,
                 config_file=None, chunk=1, repos=[]):

        PerfTest.__init__(self, dm=dm, phone=phone, options=options,
                          config_file=config_file, chunk=chunk, repos=repos)

        # Map URLS - {urlname: url} - urlname serves as testname
        self._urls = {}
        config_vars = {'webserver_url': options.webserver_url}

        try:
            location_items = self.cfg.items('locations', False, config_vars)
        except ConfigParser.NoSectionError:
            location_items = [('local', None)]

        # Finialize test configuration
        for test_location, test_path in location_items:
            if test_location in config_vars:
                # Ignore the pseudo-options which result from passing
                # the config_vars for interpolation.
                continue
            for test_name in self._tests:
                if test_path:
                    test_url = urlparse.urljoin(test_path, self._tests[test_name])
                else:
                    test_url = 'file://' + os.path.join(self._paths['dest'],
                                                        self._tests[test_name])
                self.loggerdeco.debug(
                    'test_location: %s, test_name: %s, test_path: %s, '
                    'test: %s, test_url: %s' %
                    (test_location, test_name, test_path,
                     self._tests[test_name], test_url))
                self._urls["%s-%s" % (test_location, test_name)] = test_url

        self.loggerdeco.debug('S1S2Test: %s', self.__dict__)

    @property
    def name(self):
        return 'autophone-s1s2%s' % self.name_suffix

    @property
    def phonedash_url(self):
        # For s1s2test, there are 4 different test names due to historical design.
        # We pick local-blank as the default.
        return self._phonedash_url('local-blank')

    def run_job(self):
        is_test_completed = False

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not run Fennec.',
                TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               repo=self.build.tree)
        is_test_completed = True
        testcount = len(self._urls.keys())
        for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1):
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'repo': self.build.tree,
                    'buildid': self.build.id,
                    'buildtype': self.build.type,
                    'sdk': self.phone.sdk,
                    'platform': self.build.platform,
                    'testname': testname
                },
                extraformat='S1S2TestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations',
                                 testnum, testcount, self._iterations)

            command = None
            for attempt in range(1, self.stderrp_attempts+1):
                # dataset is a list of the measurements made for the
                # iterations for this test.
                #
                # An empty item in the dataset list represents a
                # failure to obtain any measurement for that
                # iteration.
                #
                # It is possible for an item in the dataset to have an
                # uncached value and not have a corresponding cached
                # value if the cached test failed to record the
                # values.

                iteration = 0
                dataset = []
                for iteration in range(1, self._iterations+1):
                    # Calling svc power stayon true will turn on the
                    # display for at least some devices if it has
                    # turned off.
                    self.dm.power_on()
                    command = self.worker_subprocess.process_autophone_cmd(
                        test=self, require_ip_address=url.startswith('http'))
                    if command['interrupt']:
                        self.handle_test_interrupt(command['reason'],
                                                   command['test_result'])
                        break
                    self.update_status(message='Attempt %d/%d for Test %d/%d, '
                                       'run %d, for url %s' %
                                       (attempt, self.stderrp_attempts,
                                        testnum, testcount, iteration, url))

                    if not self.create_profile():
                        self.add_failure(
                            self.name,
                            TestStatus.TEST_UNEXPECTED_FAIL,
                            'Failed to create profile',
                            TreeherderStatus.TESTFAILED)
                        continue

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get uncached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='uncached')
                    dataset.append({'uncached': measurement})

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get cached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='cached')
                    dataset[-1]['cached'] = measurement

                    if self.is_stderr_below_threshold(
                            ('throbberstart',
                             'throbberstop'),
                            dataset,
                            self.stderrp_accept):
                        self.loggerdeco.info(
                            'Accepted test (%d/%d) after %d of %d iterations',
                            testnum, testcount, iteration, self._iterations)
                        break

                if command and command['interrupt']:
                    break
                measurements = len(dataset)
                if measurements > 0 and self._iterations != measurements:
                    self.add_failure(
                        self.name,
                        TestStatus.TEST_UNEXPECTED_FAIL,
                        'Failed to get all measurements',
                        TreeherderStatus.TESTFAILED)
                elif measurements == 0:
                    # If we have not gotten a single measurement at this point,
                    # just bail and report the failure rather than wasting time
                    # continuing more attempts.
                    self.add_failure(
                        self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                        'No measurements detected.',
                        TreeherderStatus.BUSTED)
                    self.loggerdeco.info(
                        'Failed to get measurements for test %s after %d/%d attempt '
                        'of %d iterations', testname, attempt,
                        self.stderrp_attempts, self._iterations)
                    self.worker_subprocess.mailer.send(
                        '%s %s failed for Build %s %s on %s %s' %
                        (self.__class__.__name__, testname, self.build.tree,
                         self.build.id, utils.host(), self.phone.id),
                        'No measurements were detected for test %s.\n\n'
                        'Job        %s\n'
                        'Host       %s\n'
                        'Phone      %s\n'
                        'Repository %s\n'
                        'Build      %s\n'
                        'Revision   %s\n' %
                        (testname,
                         self.job_url,
                         utils.host(),
                         self.phone.id,
                         self.build.tree,
                         self.build.id,
                         self.build.changeset))
                    break

                if self.is_stderr_below_threshold(
                        ('throbberstart',
                         'throbberstop'),
                        dataset,
                        self.stderrp_reject):
                    rejected = False
                else:
                    rejected = True
                    self.loggerdeco.info(
                        'Rejected test (%d/%d) after %d/%d iterations',
                        testnum, testcount, iteration, self._iterations)

                self.loggerdeco.debug('publishing results')

                perfherder_values = {'geometric_mean': 0}
                metric_keys = ['throbberstart', 'throbberstop', 'throbbertime']
                cache_names = {'uncached': 'first', 'cached': 'second'}
                cache_keys = cache_names.keys()

                for metric_key in metric_keys:
                    perfherder_values[metric_key] = {'geometric_mean': 0}
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key] = {'median': 0, 'values': []}

                for datapoint in dataset:
                    for cache_key in datapoint:
                        starttime = datapoint[cache_key]['starttime']
                        throbberstart = datapoint[cache_key]['throbberstart']
                        throbberstop = datapoint[cache_key]['throbberstop']
                        self.report_results(
                            starttime=starttime,
                            tstrt=throbberstart,
                            tstop=throbberstop,
                            testname=testname,
                            cache_enabled=(cache_key == 'cached'),
                            rejected=rejected)
                        perfherder_values['throbberstart'][cache_key]['values'].append(
                            throbberstart - starttime)
                        perfherder_values['throbberstop'][cache_key]['values'].append(
                            throbberstop - starttime)
                        perfherder_values['throbbertime'][cache_key]['values'].append(
                            throbberstop - throbberstart)

                test_values = []
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key]['median'] = utils.median(
                            perfherder_values[metric_key][cache_key]['values'])
                    perfherder_values[metric_key]['geometric_mean'] = utils.geometric_mean(
                        [perfherder_values[metric_key]['uncached']['median'],
                         perfherder_values[metric_key]['cached']['median']])
                    test_values.append(perfherder_values[metric_key]['geometric_mean'])

                perfherder_suite = PerfherderSuite(name=testname,
                                                   value=utils.geometric_mean(test_values),
                                                   options=perfherder_options)
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        cache_name = cache_names[cache_key]
                        subtest_name = "%s %s" % (metric_key, cache_name)
                        perfherder_suite.add_subtest(
                            subtest_name,
                            perfherder_values[metric_key][cache_key]['median'],
                            options=perfherder_options)

                self.perfherder_artifact = PerfherderArtifact()
                self.perfherder_artifact.add_suite(perfherder_suite)
                self.loggerdeco.debug("PerfherderArtifact: %s", self.perfherder_artifact)

                if not rejected:
                    break

            if command and command['interrupt']:
                break

        return is_test_completed

    def runtest(self, url):
        # Clear logcat
        self.worker_subprocess.logcat.clear()

        # Run test
        self.run_fennec_with_profile(self.build.app_name, url)

        # Get results - do this now so we don't have as much to
        # parse in logcat.
        starttime, throbberstart, throbberstop = self.analyze_logcat()

        self.wait_for_fennec(max_wait_time=1,
                             wait_time=1,
                             kill_wait_time=1)
        self.handle_crashes()

        # Ensure we succeeded - no 0's reported
        datapoint = {}
        if throbberstart and throbberstop:
            datapoint['starttime'] = starttime
            datapoint['throbberstart'] = throbberstart
            datapoint['throbberstop'] = throbberstop
            datapoint['throbbertime'] = throbberstop - throbberstart
        return datapoint

    def analyze_logcat(self):
        self.loggerdeco.debug('analyzing logcat')

        logcat_prefix = r'\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}'
        application_start = r'..Gecko.*zerdatime (\d+) - .*application start'
        throbber_prefix = r'..Gecko.*zerdatime (\d+) -'
        re_start_time = re.compile(r'%s %s' % (logcat_prefix, application_start))
        re_throbber_start_time = re.compile('%s %s (Throbber|page load) start' %
                                            (logcat_prefix, throbber_prefix))
        re_throbber_stop_time = re.compile('%s %s (Throbber|page load) stop' %
                                           (logcat_prefix, throbber_prefix))
        # geckoview_example emits page start and stop messages for
        # about:blank prior to loading real pages. When is_about_blank
        # is True, we will ignore any page start and stop messages.
        # geckoview_example also will load https://mozilla.org/ if it is
        # launched without an url.
        ignore_page = False
        re_about_blank_start = re.compile('%s I/GeckoViewActivity.*Starting to load page at about:blank' %
                                          logcat_prefix)
        re_mozilla_org_start = re.compile('%s I/GeckoViewActivity.*Starting to load page at https://mozilla.org/' %
                                          logcat_prefix)

        start_time = 0
        throbber_start_time = 0
        throbber_stop_time = 0

        attempt = 1
        max_time = 90 # maximum time to wait for throbbers
        wait_time = 3 # time to wait between attempts
        max_attempts = max_time / wait_time
        success = False

        while not success and attempt <= max_attempts:
            buf = self.worker_subprocess.logcat.get()
            for line in buf:
                self.loggerdeco.debug('analyze_logcat: %s', line)

                if ignore_page:
                    self.loggerdeco.debug('analyze_logcat: ignoring %s', line)
                    match = re_throbber_stop_time.match(line)
                    if match:
                        ignore_page = False
                    continue
                match = re_about_blank_start.match(line)
                if match:
                    self.loggerdeco.debug('analyze_logcat: ignoring %s', line)
                    ignore_page = True
                    continue
                match = re_mozilla_org_start.match(line)
                if match:
                    self.loggerdeco.debug('analyze_logcat: ignoring %s', line)
                    ignore_page = True
                    self.loggerdeco.warning(
                        'analyze_logcat: unexpected load of https://mozilla.org. '
                        'geckoview_example launched without url?')
                    continue

                if not start_time:
                    match = re_start_time.match(line)
                    if match:
                        start_time = int(match.group(1))
                        self.loggerdeco.info(
                            'analyze_logcat: new start_time: %s',
                            start_time)
                    continue # line

                # We want the first throbberstart and throbberstop
                # after the start_time.
                if not throbber_start_time:
                    match = re_throbber_start_time.match(line)
                    if match:
                        throbber_start_time = int(match.group(1))
                        self.loggerdeco.info(
                            'analyze_logcat: throbber_start_time: %s',
                            throbber_start_time)
                    continue # line

                match = re_throbber_stop_time.match(line)
                if match:
                    throbber_stop_time = int(match.group(1))
                    self.loggerdeco.info(
                        'analyze_logcat: throbber_stop_time: %s',
                        throbber_stop_time)
                    break # line

            if self.handle_crashes():
                # If fennec crashed, don't bother looking for the Throbbers
                self.loggerdeco.warning('analyze_logcat: fennec crashed.')
                break # attempt

            if start_time and throbber_start_time and throbber_stop_time:
                self.loggerdeco.info(
                    'analyze_logcat: got measurements '
                    'start_time: %s, throbber start: %s, throbber stop: %s',
                    start_time, throbber_start_time, throbber_stop_time)
                success = True
            else:
                self.loggerdeco.debug(
                    'analyze_logcat: attempt %s, start_time: %s, '
                    'throbber_start_time: %s, throbber_stop_time: %s' %
                    (attempt, start_time, throbber_start_time, throbber_stop_time))
                sleep(wait_time)
                attempt += 1

        if not success:
            self.loggerdeco.warning(
                'analyze_logcat: failed to get measurements '
                'start_time: %s, throbber start: %s, throbber stop: %s',
                start_time, throbber_start_time, throbber_stop_time)
            start_time = throbber_start_time = throbber_stop_time = 0

        return (start_time, throbber_start_time, throbber_stop_time)
class S1S2Test(PerfTest):
    def __init__(self,
                 dm=None,
                 phone=None,
                 options=None,
                 config_file=None,
                 chunk=1,
                 repos=[]):

        PerfTest.__init__(self,
                          dm=dm,
                          phone=phone,
                          options=options,
                          config_file=config_file,
                          chunk=chunk,
                          repos=repos)

        # Map URLS - {urlname: url} - urlname serves as testname
        self._urls = {}
        config_vars = {'webserver_url': options.webserver_url}

        try:
            location_items = self.cfg.items('locations', False, config_vars)
        except ConfigParser.NoSectionError:
            location_items = [('local', None)]

        # Finialize test configuration
        for test_location, test_path in location_items:
            if test_location in config_vars:
                # Ignore the pseudo-options which result from passing
                # the config_vars for interpolation.
                continue
            for test_name in self._tests:
                if test_path:
                    test_url = urlparse.urljoin(test_path,
                                                self._tests[test_name])
                else:
                    test_url = 'file://' + os.path.join(
                        self._paths['dest'], self._tests[test_name])
                self.loggerdeco.debug(
                    'test_location: %s, test_name: %s, test_path: %s, '
                    'test: %s, test_url: %s' %
                    (test_location, test_name, test_path,
                     self._tests[test_name], test_url))
                self._urls["%s-%s" % (test_location, test_name)] = test_url

        self.loggerdeco.debug('S1S2Test: %s', self.__dict__)

    @property
    def name(self):
        return 'autophone-s1s2%s' % self.name_suffix

    @property
    def phonedash_url(self):
        # For s1s2test, there are 4 different test names due to historical design.
        # We pick local-blank as the default.
        return self._phonedash_url('local-blank')

    def run_job(self):
        is_test_completed = False

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile():
            self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                             'Aborting test - Could not run Fennec.',
                             TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               repo=self.build.tree)
        is_test_completed = True
        testcount = len(self._urls.keys())
        for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1):
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'repo': self.build.tree,
                    'buildid': self.build.id,
                    'buildtype': self.build.type,
                    'sdk': self.phone.sdk,
                    'platform': self.build.platform,
                    'testname': testname
                },
                extraformat=
                'S1S2TestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s'
            )
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations',
                                 testnum, testcount, self._iterations)

            command = None
            for attempt in range(1, self.stderrp_attempts + 1):
                # dataset is a list of the measurements made for the
                # iterations for this test.
                #
                # An empty item in the dataset list represents a
                # failure to obtain any measurement for that
                # iteration.
                #
                # It is possible for an item in the dataset to have an
                # uncached value and not have a corresponding cached
                # value if the cached test failed to record the
                # values.

                iteration = 0
                dataset = []
                for iteration in range(1, self._iterations + 1):
                    # Calling svc power stayon true will turn on the
                    # display for at least some devices if it has
                    # turned off.
                    self.dm.power_on()
                    command = self.worker_subprocess.process_autophone_cmd(
                        test=self, require_ip_address=url.startswith('http'))
                    if command['interrupt']:
                        self.handle_test_interrupt(command['reason'],
                                                   command['test_result'])
                        break
                    self.update_status(message='Attempt %d/%d for Test %d/%d, '
                                       'run %d, for url %s' %
                                       (attempt, self.stderrp_attempts,
                                        testnum, testcount, iteration, url))

                    if not self.create_profile():
                        self.add_failure(self.name,
                                         TestStatus.TEST_UNEXPECTED_FAIL,
                                         'Failed to create profile',
                                         TreeherderStatus.TESTFAILED)
                        continue

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get uncached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='uncached')
                    dataset.append({'uncached': measurement})

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get cached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='cached')
                    dataset[-1]['cached'] = measurement

                    if self.is_stderr_below_threshold(
                        ('throbberstart', 'throbberstop'), dataset,
                            self.stderrp_accept):
                        self.loggerdeco.info(
                            'Accepted test (%d/%d) after %d of %d iterations',
                            testnum, testcount, iteration, self._iterations)
                        break

                if command and command['interrupt']:
                    break
                measurements = len(dataset)
                if measurements > 0 and self._iterations != measurements:
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'Failed to get all measurements',
                                     TreeherderStatus.TESTFAILED)
                elif measurements == 0:
                    # If we have not gotten a single measurement at this point,
                    # just bail and report the failure rather than wasting time
                    # continuing more attempts.
                    self.add_failure(self.name,
                                     TestStatus.TEST_UNEXPECTED_FAIL,
                                     'No measurements detected.',
                                     TreeherderStatus.BUSTED)
                    self.loggerdeco.info(
                        'Failed to get measurements for test %s after %d/%d attempt '
                        'of %d iterations', testname, attempt,
                        self.stderrp_attempts, self._iterations)
                    self.worker_subprocess.mailer.send(
                        '%s %s failed for Build %s %s on %s %s' %
                        (self.__class__.__name__, testname, self.build.tree,
                         self.build.id, utils.host(), self.phone.id),
                        'No measurements were detected for test %s.\n\n'
                        'Job        %s\n'
                        'Host       %s\n'
                        'Phone      %s\n'
                        'Repository %s\n'
                        'Build      %s\n'
                        'Revision   %s\n' %
                        (testname, self.job_url, utils.host(), self.phone.id,
                         self.build.tree, self.build.id, self.build.changeset))
                    break

                if self.is_stderr_below_threshold(
                    ('throbberstart', 'throbberstop'), dataset,
                        self.stderrp_reject):
                    rejected = False
                else:
                    rejected = True
                    self.loggerdeco.info(
                        'Rejected test (%d/%d) after %d/%d iterations',
                        testnum, testcount, iteration, self._iterations)

                self.loggerdeco.debug('publishing results')

                perfherder_values = {'geometric_mean': 0}
                metric_keys = ['throbberstart', 'throbberstop', 'throbbertime']
                cache_names = {'uncached': 'first', 'cached': 'second'}
                cache_keys = cache_names.keys()

                for metric_key in metric_keys:
                    perfherder_values[metric_key] = {'geometric_mean': 0}
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key] = {
                            'median': 0,
                            'values': []
                        }

                for datapoint in dataset:
                    for cache_key in datapoint:
                        starttime = datapoint[cache_key]['starttime']
                        throbberstart = datapoint[cache_key]['throbberstart']
                        throbberstop = datapoint[cache_key]['throbberstop']
                        self.report_results(
                            starttime=starttime,
                            tstrt=throbberstart,
                            tstop=throbberstop,
                            testname=testname,
                            cache_enabled=(cache_key == 'cached'),
                            rejected=rejected)
                        perfherder_values['throbberstart'][cache_key][
                            'values'].append(throbberstart - starttime)
                        perfherder_values['throbberstop'][cache_key][
                            'values'].append(throbberstop - starttime)
                        perfherder_values['throbbertime'][cache_key][
                            'values'].append(throbberstop - throbberstart)

                test_values = []
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key][
                            'median'] = utils.median(
                                perfherder_values[metric_key][cache_key]
                                ['values'])
                    perfherder_values[metric_key][
                        'geometric_mean'] = utils.geometric_mean([
                            perfherder_values[metric_key]['uncached']
                            ['median'],
                            perfherder_values[metric_key]['cached']['median']
                        ])
                    test_values.append(
                        perfherder_values[metric_key]['geometric_mean'])

                perfherder_suite = PerfherderSuite(
                    name=testname,
                    value=utils.geometric_mean(test_values),
                    options=perfherder_options)
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        cache_name = cache_names[cache_key]
                        subtest_name = "%s %s" % (metric_key, cache_name)
                        perfherder_suite.add_subtest(
                            subtest_name,
                            perfherder_values[metric_key][cache_key]['median'],
                            options=perfherder_options)

                self.perfherder_artifact = PerfherderArtifact()
                self.perfherder_artifact.add_suite(perfherder_suite)
                self.loggerdeco.debug("PerfherderArtifact: %s",
                                      self.perfherder_artifact)

                if not rejected:
                    break

            if command and command['interrupt']:
                break

        return is_test_completed

    def runtest(self, url):
        # Clear logcat
        self.worker_subprocess.logcat.clear()

        # Run test
        self.run_fennec_with_profile(self.build.app_name, url)

        # Get results - do this now so we don't have as much to
        # parse in logcat.
        starttime, throbberstart, throbberstop = self.analyze_logcat()

        self.wait_for_fennec(max_wait_time=1, wait_time=1, kill_wait_time=1)
        self.handle_crashes()

        # Ensure we succeeded - no 0's reported
        datapoint = {}
        if throbberstart and throbberstop:
            datapoint['starttime'] = starttime
            datapoint['throbberstart'] = throbberstart
            datapoint['throbberstop'] = throbberstop
            datapoint['throbbertime'] = throbberstop - throbberstart
        return datapoint

    def analyze_logcat(self):
        self.loggerdeco.debug('analyzing logcat')

        logcat_prefix = r'\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}'
        application_start = r'..Gecko.*zerdatime (\d+) - .*application start'
        throbber_prefix = r'..Gecko.*zerdatime (\d+) -'
        re_start_time = re.compile(r'%s %s' %
                                   (logcat_prefix, application_start))
        re_throbber_start_time = re.compile(
            '%s %s (Throbber|page load) start' %
            (logcat_prefix, throbber_prefix))
        re_throbber_stop_time = re.compile('%s %s (Throbber|page load) stop' %
                                           (logcat_prefix, throbber_prefix))
        # geckoview_example emits page start and stop messages for
        # about:blank prior to loading real pages. When is_about_blank
        # is True, we will ignore any page start and stop messages.
        # geckoview_example also will load https://mozilla.org/ if it is
        # launched without an url.
        ignore_page = False
        re_about_blank_start = re.compile(
            '%s I/GeckoViewActivity.*Starting to load page at about:blank' %
            logcat_prefix)
        re_mozilla_org_start = re.compile(
            '%s I/GeckoViewActivity.*Starting to load page at https://mozilla.org/'
            % logcat_prefix)

        start_time = 0
        throbber_start_time = 0
        throbber_stop_time = 0

        attempt = 1
        max_time = 90  # maximum time to wait for throbbers
        wait_time = 3  # time to wait between attempts
        max_attempts = max_time / wait_time
        success = False

        while not success and attempt <= max_attempts:
            buf = self.worker_subprocess.logcat.get()
            for line in buf:
                self.loggerdeco.debug('analyze_logcat: %s', line)

                if ignore_page:
                    self.loggerdeco.debug('analyze_logcat: ignoring %s', line)
                    match = re_throbber_stop_time.match(line)
                    if match:
                        ignore_page = False
                    continue
                match = re_about_blank_start.match(line)
                if match:
                    self.loggerdeco.debug('analyze_logcat: ignoring %s', line)
                    ignore_page = True
                    continue
                match = re_mozilla_org_start.match(line)
                if match:
                    self.loggerdeco.debug('analyze_logcat: ignoring %s', line)
                    ignore_page = True
                    self.loggerdeco.warning(
                        'analyze_logcat: unexpected load of https://mozilla.org. '
                        'geckoview_example launched without url?')
                    continue

                if not start_time:
                    match = re_start_time.match(line)
                    if match:
                        start_time = int(match.group(1))
                        self.loggerdeco.info(
                            'analyze_logcat: new start_time: %s', start_time)
                    continue  # line

                # We want the first throbberstart and throbberstop
                # after the start_time.
                if not throbber_start_time:
                    match = re_throbber_start_time.match(line)
                    if match:
                        throbber_start_time = int(match.group(1))
                        self.loggerdeco.info(
                            'analyze_logcat: throbber_start_time: %s',
                            throbber_start_time)
                    continue  # line

                match = re_throbber_stop_time.match(line)
                if match:
                    throbber_stop_time = int(match.group(1))
                    self.loggerdeco.info(
                        'analyze_logcat: throbber_stop_time: %s',
                        throbber_stop_time)
                    break  # line

            if self.handle_crashes():
                # If fennec crashed, don't bother looking for the Throbbers
                self.loggerdeco.warning('analyze_logcat: fennec crashed.')
                break  # attempt

            if start_time and throbber_start_time and throbber_stop_time:
                self.loggerdeco.info(
                    'analyze_logcat: got measurements '
                    'start_time: %s, throbber start: %s, throbber stop: %s',
                    start_time, throbber_start_time, throbber_stop_time)
                success = True
            else:
                self.loggerdeco.debug(
                    'analyze_logcat: attempt %s, start_time: %s, '
                    'throbber_start_time: %s, throbber_stop_time: %s' %
                    (attempt, start_time, throbber_start_time,
                     throbber_stop_time))
                sleep(wait_time)
                attempt += 1

        if not success:
            self.loggerdeco.warning(
                'analyze_logcat: failed to get measurements '
                'start_time: %s, throbber start: %s, throbber stop: %s',
                start_time, throbber_start_time, throbber_stop_time)
            start_time = throbber_start_time = throbber_stop_time = 0

        return (start_time, throbber_start_time, throbber_stop_time)
Exemplo n.º 10
0
class RoboTest(PerfTest):
    def __init__(self, dm=None, phone=None, options=None,
                 config_file=None, chunk=1, repos=[]):

        PerfTest.__init__(self, dm=dm, phone=phone, options=options,
                          config_file=config_file, chunk=chunk, repos=repos)

        self.enable_unittests = True
        self._test_args = {}
        config_vars = {'webserver_url': options.webserver_url}

        try:
            location_items = self.cfg.items('locations', False, config_vars)
        except ConfigParser.NoSectionError:
            location_items = [('local', None)]

        # Finialize test configuration
        for test_location, test_path in location_items:
            if test_location in config_vars:
                # Ignore the pseudo-options which result from passing
                # the config_vars for interpolation.
                continue

            for test_name in self._tests:
                test_url = ('am instrument -w -e deviceroot %s %s' %
                            (self._paths['dest'],
                             self.cfg.get('settings', 'tcheck_args')
                           ))

                self.loggerdeco.debug(
                    'test_location: %s, test_name: %s, test_path: %s, '
                    'test: %s, adb args: %s' %
                    (test_location, test_name, test_path,
                     self._tests[test_name], test_url))
                self._test_args["%s-%s" % (test_location, test_name)] = test_url

    @property
    def name(self):
        return 'autophone-talos%s' % self.name_suffix

    def create_profile(self):
        retVal = PerfTest.create_profile(self)

        config_file = os.path.join(self.build.dir, 'robotium.config')
        with open(config_file, 'w') as fHandle:
            fHandle.write("profile=%s\n" % self.profile_path)

            remoteLog = self._paths['dest'] + "/tcheck3.log"
            fHandle.write("logfile=%s\n" % remoteLog)
            fHandle.write("host=%s\n" % self.options.webserver_url)
            fHandle.write("rawhost=%s\n" % self.options.webserver_url)
            envstr = ""
            delim = ""
            # This is not foolproof and the ideal solution would be to have
            # one env/line instead of a single string
            env_vars = {'MOZ_CRASHREPORTER_SHUTDOWN': 1,
                        'NO_EM_RESTART': 1,
                        'MOZ_CRASHREPORTER_NO_REPORT': 1,
                        'MOZ_CRASHREPORTER': 1}
#TODO: disabled until we have a single device and we can tweak the test for
#      network access
#                        'MOZ_DISABLE_NONLOCAL_CONNECTIONS': 1}
            for item in env_vars:
                envstr += "%s%s=%s" % (delim, item, env_vars[item])
                delim = ","
            fHandle.write("envvars=%s\n" % envstr)

        self.dm.push(config_file, self._paths['dest'])
        return retVal

    def run_job(self):
        is_test_completed = False

        if not self.install_local_pages():
            self.test_failure(
                self.name, 'TEST_UNEXPECTED_FAIL',
                'Aborting test - Could not install local pages on phone.',
                PhoneTestResult.EXCEPTION)
            return is_test_completed

        if not self.create_profile():
            self.test_failure(
                self.name, 'TEST_UNEXPECTED_FAIL',
                'Aborting test - Could not run Fennec.',
                PhoneTestResult.BUSTED)
            return is_test_completed

        is_test_completed = True
        testcount = len(self._test_args.keys())
        test_items = enumerate(self._test_args.iteritems(), 1)
        for testnum, (testname, test_args) in test_items:
            if self.fennec_crashed:
                break
            self.loggerdeco = self.loggerdeco.clone(
                extradict={'phoneid': self.phone.id,
                           'buildid': self.build.id,
                           'testname': testname},
                extraformat='%(phoneid)s|%(buildid)s|%(testname)s|%(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations' %
                                 (testnum, testcount, self._iterations))

            # success == False indicates that none of the attempts
            # were successful in getting any measurement. This is
            # typically due to a regression in the brower which should
            # be reported.
            success = False
            command = None

            # dataset is a list of the measurements made for the
            # iterations for this test.
            #
            # An empty item in the dataset list represents a
            # failure to obtain any measurement for that
            # iteration.
            dataset = []
            for iteration in range(1, self._iterations+1):
                command = self.worker_subprocess.process_autophone_cmd(
                    test=self, require_ip_address=testname.startswith('remote'))
                if command['interrupt']:
                    is_test_completed = False
                    self.handle_test_interrupt(command['reason'],
                                               command['test_result'])
                    break
                if self.fennec_crashed:
                    break

                self.update_status(message='Test %d/%d, '
                                   'run %d, for test_args %s' %
                                   (testnum, testcount, iteration, test_args))

                dataset.append({})

                if not self.create_profile():
                    self.test_failure(test_args,
                                      'TEST_UNEXPECTED_FAIL',
                                      'Failed to create profile',
                                      PhoneTestResult.TESTFAILED)
                    continue

                measurement = self.runtest(test_args)
                if measurement:
                    if not self.perfherder_artifact:
                        self.perfherder_artifact = PerfherderArtifact()
                    suite = self.create_suite(measurement['pageload_metric'],
                                             testname)
                    self.perfherder_artifact.add_suite(suite)
                    self.test_pass(test_args)
                else:
                    self.test_failure(
                        test_args,
                        'TEST_UNEXPECTED_FAIL',
                        'Failed to get measurement.',
                        PhoneTestResult.TESTFAILED)
                    continue
                dataset[-1] = measurement
                success = True

            if not success:
                # If we have not gotten a single measurement at this point,
                # just bail and report the failure rather than wasting time
                # continuing more attempts.
                self.loggerdeco.info(
                    'Failed to get measurements for test %s after '
                    '%d iterations' % (testname, self._iterations))
                self.worker_subprocess.mailer.send(
                    '%s %s failed for Build %s %s on Phone %s' %
                    (self.__class__.__name__,
                     testname,
                     self.build.tree,
                     self.build.id,
                     self.phone.id),
                    'No measurements were detected for test %s.\n\n'
                    'Job        %s\n'
                    'Phone      %s\n'
                    'Repository %s\n'
                    'Build      %s\n'
                    'Revision   %s\n' %
                    (testname,
                     self.job_url,
                     self.phone.id,
                     self.build.tree,
                     self.build.id,
                     self.build.revision))
                self.test_failure(self.name, 'TEST_UNEXPECTED_FAIL',
                                  'No measurements detected.',
                                  PhoneTestResult.BUSTED)

                self.loggerdeco.debug('publishing results')

                for datapoint in dataset:
                    for cachekey in datapoint:
                        pass
                        #TODO: figure out results reporting
#                        self.report_results(results)

            if command and command['interrupt']:
                break
            elif not success:
                break

        return is_test_completed

    def runtest(self, test_args):
        # Clear logcat
        self.logcat.clear()

        try:
            self.dm.uninstall_app('org.mozilla.roboexample.test')
            robocop_apk_path = os.path.join(self.build.dir, 'robocop.apk')
            self.dm.install_app(robocop_apk_path)
        except:
            self.loggerdeco.exception('robotest.py:runtest: \
                                       Exception installing robocop.apk.')
            return {}

        # Run test
        self.dm.shell_output(test_args)

        # Get results - do this now so we don't have as much to
        # parse in logcat.
        pageload_metric = self.analyze_logcat()

        # Ensure we succeeded - no 0's reported
        datapoint = {}
        if pageload_metric['summary'] != 0:
            datapoint = {'pageload_metric': pageload_metric}
        return datapoint

    def analyze_logcat(self):
        """
        __start_report12.853116__end_report

        We will parse the syntax here and build up a {name:[value,],} hash.
        Next we will compute the median value for each name.
        Finally we will report the geomtric mean of all of the median values.
        """
        self.loggerdeco.debug('analyzing logcat')

        re_data = re.compile('.*__start_report([0-9\.]+)__end_report.*')

        attempt = 1
        max_time = 90  # maximum time to wait for completeness score
        wait_time = 3  # time to wait between attempts
        max_attempts = max_time / wait_time

        results = {"tcheck3": []}
        pageload_metric = {'summary': 0}
        while attempt <= max_attempts and pageload_metric['summary'] == 0:
            buf = self.logcat.get()
            for line in buf:
                match = re_data.match(line)
                if match:
                    numbers = match.group(1)
                    if numbers:
                        results["tcheck3"].append(float(numbers))

            if self.fennec_crashed:
                # If fennec crashed, don't bother looking for pageload metric
                break
            if pageload_metric['summary'] == 0:
                sleep(wait_time)
                attempt += 1

            if not results["tcheck3"]:
                continue

            # calculate score
            data = results["tcheck3"]
            pageload_metric["tcheck3"] = median(data)
            pageload_metric['summary'] = geometric_mean(data)

        if pageload_metric['summary'] == 0:
            self.loggerdeco.info('Unable to find pageload metric')

        self.loggerdeco.info("returning from logcat analyze with: %s" %
                             pageload_metric)
        return pageload_metric

    def create_suite(self, metric, testname):
        phsuite = PerfherderSuite(name=testname,
                                  value=metric['summary'])
        for p in metric:
            if p != 'summary':
                phsuite.add_subtest(p, metric[p])
        return phsuite