Example #1
0
def run_tests(configurator):
  """Runs the talos tests on the given configuration and generates a report.

  Args:
    config: dictionary of configuration, as generated by PerfConfigurator
  """
  config=configurator.config
  # data filters
  filters = config['filters']
  try:
      filters = filter.filters_args(filters)
  except AssertionError, e:
      raise talosError(str(e))
Example #2
0
def run_tests(configurator):
    """Runs the talos tests on the given configuration and generates a report.

  Args:
    config: dictionary of configuration, as generated by PerfConfigurator
  """
    config = configurator.config
    # data filters
    filters = config['filters']
    try:
        filters = filter.filters_args(filters)
    except AssertionError, e:
        raise talosError(str(e))
Example #3
0
    # https://bugzilla.mozilla.org/show_bug.cgi?id=727711
    # Build command line from config
    for path in paths:
      if test.get(path):
        test[path] = utils.interpolatePath(test[path])
    if test.get('tpmanifest'):
      test['tpmanifest'] = os.path.normpath('file:/%s' % (urllib.quote(test['tpmanifest'], '/\\t:\\')))
    if not test.get('url'):
      # build 'url' for tptest
      test['url'] = buildCommandLine(test)
    test['url'] = utils.interpolatePath(test['url'])

    # ensure test-specific filters are valid
    if 'filters' in test:
      try:
        filter.filters_args(test['filters'])
      except AssertionError, e:
        raise talosError(str(e))
      except IndexError, e:
        raise talosError(str(e))


  # set browser_config
  browser_config=configurator.browser_config()

  #set defaults
  title = config.get('title', '')
  testdate = config.get('testdate', '')

  # get the process name from the path to the browser
  if not browser_config['process']:
Example #4
0
    def __call__(self):
        """
        results to send to graphserver:
        construct all the strings of data, one string per test and one string  per counter
        """

        result_strings = []

        info_dict = dict(title=self.results.title,
                         date=self.results.date,
                         branch_name=self.results.browser_config['branch_name'],
                         sourcestamp=self.results.browser_config['sourcestamp'],
                         buildid=self.results.browser_config['buildid'],
                         browser_name=self.results.browser_config['browser_name'],
                         browser_version=self.results.browser_config['browser_version']
                         )

        for test in self.results.results:

            utils.debug("Working with test: %s", test.name())


            # get full name of test
            testname = test.name()
            if test.format == 'tpformat':
                # for some reason, we append the test extension to tp results but not ts
                # http://hg.mozilla.org/build/talos/file/170c100911b6/talos/run_tests.py#l176
                testname += self.results.test_name_extension

            utils.stamped_msg("Generating results file: %s" % test.name(), "Started")

            # HACK: when running xperf, we upload xperf counters to the graph server but we do not want to
            # upload the test results as they will confuse the graph server
            if not test.using_xperf:
                vals = []
                for result in test.results:
                    # per test filters
                    _filters = self.results.filters
                    if 'filters' in test.test_config:
                        try:
                            _filters = filter.filters_args(test.test_config['filters'])
                        except AssertionError, e:
                            raise utils.talosError(str(e))

                    vals.extend(result.values(_filters))
                result_strings.append(self.construct_results(vals, testname=testname, **info_dict))
                utils.stamped_msg("Generating results file: %s" % test.name(), "Stopped")

            # counter results
            for cd in test.all_counter_results:
                for counter_type, values in cd.items():
                    # get the counter name
                    counterName = '%s_%s' % (test.name() , self.shortName(counter_type))
                    if not values:
                        # failed to collect any data for this counter
                        utils.stamped_msg("No results collected for: " + counterName, "Error")
# NOTE: we are not going to enforce this warning for now as this happens too frequently: bugs 803413, 802475, 805925
#                        raise utils.talosError("Unable to proceed with missing counter '%s'" % counterName)
# (jhammel: we probably should do this in e.g. results.py vs in graphserver-specific code anyway)

                    # exclude counters whose values are tuples (bad for graphserver)
                    if len(values) > 0 and isinstance(values[0], list):
                        continue

                    # counter values
                    vals = [[x, 'NULL'] for x in values]

                    # append test name extension but only for tpformat tests
                    if test.format == 'tpformat':
                        counterName += self.results.test_name_extension

                    info = info_dict.copy()
                    info['testname'] = counterName

                    # append the counter string
                    utils.stamped_msg("Generating results file: %s" % counterName, "Started")
                    result_strings.append(self.construct_results(vals, **info))
                    utils.stamped_msg("Generating results file: %s" % counterName, "Stopped")
Example #5
0
            if test.get(path):
                test[path] = utils.interpolatePath(test[path])
        if test.get('tpmanifest'):
            test['tpmanifest'] = os.path.normpath(
                'file:/%s' % (urllib.quote(test['tpmanifest'], '/\\t:\\')))
        if not test.get('url'):
            # build 'url' for tptest
            test['url'] = buildCommandLine(test)
        test['url'] = utils.interpolatePath(test['url'])
        test['setup'] = utils.interpolatePath(test['setup'])
        test['cleanup'] = utils.interpolatePath(test['cleanup'])

        # ensure test-specific filters are valid
        if 'filters' in test:
            try:
                filter.filters_args(test['filters'])
            except AssertionError, e:
                raise talosError(str(e))
            except IndexError, e:
                raise talosError(str(e))

    # set browser_config
    browser_config = configurator.browser_config()

    #set defaults
    title = config.get('title', '')
    testdate = config.get('testdate', '')

    # Bug 940690 - Get existing metrofx talos tests running on release/project
    # branches:
    #    Identify when we do a win8 metro talos run, and a non metro run.  We do
Example #6
0
    def __call__(self):
        """
        results to send to graphserver:
        construct all the strings of data, one string per test and one string  per counter
        """

        result_strings = []

        info_dict = dict(title=self.results.title,
                         date=self.results.date,
                         branch_name=self.results.browser_config['branch_name'],
                         sourcestamp=self.results.browser_config['sourcestamp'],
                         buildid=self.results.browser_config['buildid'],
                         browser_name=self.results.browser_config['browser_name'],
                         browser_version=self.results.browser_config['browser_version']
                         )

        for test in self.results.results:

            utils.debug("Working with test: %s", test.name())


            # get full name of test
            testname = test.name()
            if test.format == 'tpformat':
                # for some reason, we append the test extension to tp results but not ts
                # http://hg.mozilla.org/build/talos/file/170c100911b6/talos/run_tests.py#l176
                testname += self.results.test_name_extension

            utils.stamped_msg("Generating results file: %s" % test.name(), "Started")

            # HACK: when running xperf, we upload xperf counters to the graph server but we do not want to
            # upload the test results as they will confuse the graph server
            if not test.using_xperf:
                vals = []
                for result in test.results:
                    # per test filters
                    _filters = self.results.filters
                    if 'filters' in test.test_config:
                        try:
                            _filters = filter.filters_args(test.test_config['filters'])
                        except AssertionError, e:
                            raise utils.talosError(str(e))

                    vals.extend(result.values(_filters))
                result_strings.append(self.construct_results(vals, testname=testname, **info_dict))
                utils.stamped_msg("Generating results file: %s" % test.name(), "Stopped")

            # counter results
            for cd in test.all_counter_results:
                for counter_type, values in cd.items():
                    # get the counter name
                    counterName = '%s_%s' % (test.name() , self.shortName(counter_type))
                    if not values:
                        # failed to collect any data for this counter
                        utils.stamped_msg("No results collected for: " + counterName, "Error")
# NOTE: we are not going to enforce this warning for now as this happens too frequently: bugs 803413, 802475, 805925
#                        raise utils.talosError("Unable to proceed with missing counter '%s'" % counterName)
# (jhammel: we probably should do this in e.g. results.py vs in graphserver-specific code anyway)

                    # exclude counters whose values are tuples (bad for graphserver)
                    if len(values) > 0 and isinstance(values[0], list):
                        continue

                    # counter values
                    vals = [[x, 'NULL'] for x in values]

                    # append test name extension but only for tpformat tests
                    if test.format == 'tpformat':
                        counterName += self.results.test_name_extension

                    info = info_dict.copy()
                    info['testname'] = counterName

                    # append the counter string
                    utils.stamped_msg("Generating results file: %s" % counterName, "Started")
                    result_strings.append(self.construct_results(vals, **info))
                    utils.stamped_msg("Generating results file: %s" % counterName, "Stopped")