Example #1
0
def send_to_graph(results_server, results_link, machine, date, browser_config,
                  results):
    links = ''
    files = []

    #construct all the files of data, one file per test and one file per counter
    for testname in results:
        vals = []
        fullname = testname
        browser_dump, counter_dump = results[testname]
        utils.debug("Working with test: " + testname)
        utils.debug("Sending results: " + " ".join(browser_dump))
        utils.stamped_msg("Generating results file: " + testname, "Started")
        if testname in ('ts', 'twinopen'):
            #non-tpformat results
            for bd in browser_dump:
                vals.extend([[x, 'NULL'] for x in bd.split('|')])
        else:
            #tpformat results
            fullname += browser_config['test_name_extension']
            for bd in browser_dump:
                bd.rstrip('\n')
                page_results = bd.splitlines()
                for line in page_results:
                    val, page = process_tpformat(line)
                    if val > -1:
                        vals.append([val, page])
        files.append(
            construct_file(machine, fullname, browser_config['branch_name'],
                           browser_config['sourcestamp'],
                           browser_config['buildid'], date, vals))
        utils.stamped_msg("Generating results file: " + testname, "Stopped")
        for cd in counter_dump:
            for count_type in cd:
                vals = [[x, 'NULL'] for x in cd[count_type]]
                counterName = testname + '_' + shortName(count_type)
                if testname not in ('ts', 'twinopen'):
                    counterName += browser_config['test_name_extension']
                utils.stamped_msg("Generating results file: " + counterName,
                                  "Started")
                files.append(
                    construct_file(machine, counterName,
                                   browser_config['branch_name'],
                                   browser_config['sourcestamp'],
                                   browser_config['buildid'], date, vals))
                utils.stamped_msg("Generating results file: " + counterName,
                                  "Stopped")

    #send all the files along to the graph server
    for filename in files:
        links += process_Request(
            post_test_result(results_server, results_link, filename))
        os.remove(filename)
        utils.stamped_msg("Transmitting test: " + testname, "Stopped")

    return links
Example #2
0
                               test_name_extension=browser_config['test_name_extension'])

  # results links
  results_urls, results_options = configurator.output_options()
  talos_results.check_output_formats(results_urls, **results_options)

  # setup a webserver, if --develop is specified to PerfConfigurator.py
  httpd = None
  if browser_config['develop'] == True:
    httpd = setup_webserver(browser_config['webserver'])
    if httpd:
      httpd.start()

  # run the tests
  utils.startTimer()
  utils.stamped_msg(title, "Started")
  for test in tests:
    testname = test['name']
    utils.stamped_msg("Running test " + testname, "Started")

    if os.path.exists('logcat.log'):
        os.unlink('logcat.log')

    try:
      mytest = TTest(browser_config['remote'])
      if mytest:
        talos_results.add(mytest.runTest(browser_config, test))
      else:
        utils.stamped_msg("Error found while running %s" % testname, "Error")
    except talosRegression, tr:
      utils.stamped_msg("Detected a regression for " + testname, "Stopped")
Example #3
0
    def __call__(self):
        """
        results to send to graphserver:
        construct all the strings of data, one string per test and one string  per counter
        """

        result_strings = []

        info_dict = dict(title=self.results.title,
                         date=self.results.date,
                         branch_name=self.results.browser_config['branch_name'],
                         sourcestamp=self.results.browser_config['sourcestamp'],
                         buildid=self.results.browser_config['buildid'],
                         browser_name=self.results.browser_config['browser_name'],
                         browser_version=self.results.browser_config['browser_version']
                         )

        for test in self.results.results:

            utils.debug("Working with test: %s", test.name())


            # get full name of test
            testname = test.name()
            if test.format == 'tpformat':
                # for some reason, we append the test extension to tp results but not ts
                # http://hg.mozilla.org/build/talos/file/170c100911b6/talos/run_tests.py#l176
                testname += self.results.test_name_extension

            utils.stamped_msg("Generating results file: %s" % test.name(), "Started")

            # HACK: when running xperf, we upload xperf counters to the graph server but we do not want to
            # upload the test results as they will confuse the graph server
            if not test.using_xperf:
                vals = []
                for result in test.results:
                    # per test filters
                    _filters = self.results.filters
                    if 'filters' in test.test_config:
                        try:
                            _filters = filter.filters_args(test.test_config['filters'])
                        except AssertionError, e:
                            raise utils.talosError(str(e))

                    vals.extend(result.values(_filters))
                result_strings.append(self.construct_results(vals, testname=testname, **info_dict))
                utils.stamped_msg("Generating results file: %s" % test.name(), "Stopped")

            # counter results
            for cd in test.all_counter_results:
                for counter_type, values in cd.items():
                    # get the counter name
                    counterName = '%s_%s' % (test.name() , self.shortName(counter_type))
                    if not values:
                        # failed to collect any data for this counter
                        utils.stamped_msg("No results collected for: " + counterName, "Error")
# NOTE: we are not going to enforce this warning for now as this happens too frequently: bugs 803413, 802475, 805925
#                        raise utils.talosError("Unable to proceed with missing counter '%s'" % counterName)
# (jhammel: we probably should do this in e.g. results.py vs in graphserver-specific code anyway)

                    # exclude counters whose values are tuples (bad for graphserver)
                    if len(values) > 0 and isinstance(values[0], list):
                        continue

                    # counter values
                    vals = [[x, 'NULL'] for x in values]

                    # append test name extension but only for tpformat tests
                    if test.format == 'tpformat':
                        counterName += self.results.test_name_extension

                    info = info_dict.copy()
                    info['testname'] = counterName

                    # append the counter string
                    utils.stamped_msg("Generating results file: %s" % counterName, "Started")
                    result_strings.append(self.construct_results(vals, **info))
                    utils.stamped_msg("Generating results file: %s" % counterName, "Stopped")
Example #4
0
    def __call__(self):
        """
        results to send to graphserver:
        construct all the strings of data, one string per test and one string
        per counter
        """

        result_strings = []

        info_dict = dict(
            title=self.results.title,
            date=self.results.date,
            branch_name=self.results.browser_config['branch_name'],
            sourcestamp=self.results.browser_config['sourcestamp'],
            buildid=self.results.browser_config['buildid'],
            browser_name=self.results.browser_config['browser_name'],
            browser_version=self.results.browser_config['browser_version'])

        for test in self.results.results:
            logging.debug("Working with test: %s", test.name())

            # get full name of test
            testname = test.name()
            if test.format == 'tpformat':
                # for some reason, we append the test extension to tp results
                # but not ts
                # http://hg.mozilla.org/build/talos/file/170c100911b6/talos
                # /run_tests.py#l176
                testname += test.extension()

            logging.debug("Generating results file: %s" % test.name())

            # HACK: when running xperf, we upload xperf counters to the graph
            # server but we do not want to
            # upload the test results as they will confuse the graph server
            if not (test.format == 'tpformat' and test.using_xperf):
                vals = []
                for result in test.results:
                    filtered_val = result.values(testname,
                                                 test.test_config['filters'])
                    vals.extend([[i['filtered'], j] for i, j in filtered_val])
                result_strings.append(
                    self.construct_results(vals,
                                           testname=testname,
                                           **info_dict))

            # counter results
            for cd in test.all_counter_results:
                for counter_type, values in cd.items():
                    # get the counter name
                    counterName = '%s_%s' % (test.name(),
                                             self.shortName(counter_type))
                    if not values:
                        # failed to collect any data for this counter
                        utils.stamped_msg(
                            "No results collected for: " + counterName,
                            "Error")


# NOTE: we are not going to enforce this warning for now as this happens too
# frequently: bugs 803413, 802475, 805925
#                        raise utils.TalosError("Unable to proceed with missing
# counter '%s'" % counterName)
# (jhammel: we probably should do this in e.g. results.py vs in
# graphserver-specific code anyway)

# exclude counters whose values are tuples (bad for
# graphserver)
                    if len(values) > 0 and isinstance(values[0], list):
                        print "Not uploading counter data for %s" % counterName
                        print values
                        continue

                    if test.mainthread() and 'mainthreadio' in counterName:
                        print("Not uploading Mainthread IO data for %s" %
                              counterName)
                        print values
                        continue

                    # counter values
                    vals = [[x, 'NULL'] for x in values]

                    # append test name extension but only for tpformat tests
                    if test.format == 'tpformat':
                        counterName += test.extension()

                    info = info_dict.copy()
                    info['testname'] = counterName

                    # append the counter string
                    utils.stamped_msg(
                        "Generating results file: %s" % counterName, "Started")
                    result_strings.append(self.construct_results(vals, **info))
                    utils.stamped_msg(
                        "Generating results file: %s" % counterName, "Stopped")

        return result_strings
Example #5
0
    # results links
    results_urls, results_options = configurator.output_options()
    talos_results.check_output_formats(results_urls, **results_options)

    results_log = browser_config['results_log']

    # setup a webserver, if --develop is specified to PerfConfigurator.py
    httpd = None
    if browser_config['develop'] == True:
        httpd = setup_webserver(browser_config['webserver'])
        if httpd:
            httpd.start()

    # run the tests
    utils.startTimer()
    utils.stamped_msg(title, "Started")
    for test in tests:
        testname = test['name']
        test['browser_log'] = browser_config['browser_log']
        utils.stamped_msg("Running test " + testname, "Started")

        if os.path.exists('logcat.log'):
            os.unlink('logcat.log')

        try:
            mytest = TTest(browser_config['remote'])
            if mytest:
                talos_results.add(mytest.runTest(browser_config, test))
            else:
                utils.stamped_msg("Error found while running %s" % testname,
                                  "Error")
Example #6
0
def test_file(filename):
  """Runs the Ts and Tp tests on the given config file and generates a report.
  
  Args:
    filename: the name of the file to run the tests on
  """
  
  browser_config = []
  tests = []
  title = ''
  testdate = ''
  csv_dir = ''
  results_server = ''
  results_link = ''
  results = {}
  
  # Read in the profile info from the YAML config file
  config_file = open(filename, 'r')
  yaml_config = yaml.load(config_file)
  config_file.close()
  for item in yaml_config:
    if item == 'title':
      title = yaml_config[item]
    elif item == 'testdate':
      testdate = yaml_config[item]
    elif item == 'csv_dir':
       csv_dir = os.path.normpath(yaml_config[item])
       if not os.path.exists(csv_dir):
         print "FAIL: path \"" + csv_dir + "\" does not exist"
         sys.exit(0)
    elif item == 'results_server':
       results_server = yaml_config[item]
    elif item == 'results_link' :
       results_link = yaml_config[item]
  if (results_link != results_server != ''):
    if not post_file.link_exists(results_server, results_link):
      sys.exit(0)
  browser_config = {'preferences'  : yaml_config['preferences'],
                    'extensions'   : yaml_config['extensions'],
                    'firefox'      : yaml_config['firefox'],
                    'branch'       : yaml_config['branch'],
                    'buildid'      : yaml_config['buildid'],
                    'profile_path' : yaml_config['profile_path'],
                    'env'          : yaml_config['env'],
                    'dirs'         : yaml_config['dirs'],
                    'init_url'     : yaml_config['init_url']}
  #normalize paths to work accross platforms
  browser_config['firefox'] = os.path.normpath(browser_config['firefox'])
  if browser_config['profile_path'] != {}:
    browser_config['profile_path'] = os.path.normpath(browser_config['profile_path'])
  for dir in browser_config['dirs']:
    browser_config['dirs'][dir] = os.path.normpath(browser_config['dirs'][dir])
  tests = yaml_config['tests']
  config_file.close()
  if (testdate != ''):
    date = int(time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
  else:
    date = int(time.time()) #TODO get this into own file
  utils.debug("using testdate: %d" % date)
  utils.debug("actual date: %d" % int(time.time()))

  utils.stamped_msg(title, "Started")
  for test in tests:
    utils.stamped_msg("Running test " + test, "Started")
    try:
      browser_dump, counter_dump = ttest.runTest(browser_config, tests[test])
    except talosError, e:
      utils.stamped_msg("Failed " + test, "Stopped")
      print 'FAIL: Busted: ' + test
      print 'FAIL: ' + e.msg
      sys.exit(0)
    utils.debug("Received test results: " + " ".join(browser_dump))
    results[test] = [browser_dump, counter_dump]
    utils.stamped_msg("Completed test " + test, "Stopped")
Example #7
0
def test_file(filename):
    """Runs the talos tests on the given config file and generates a report.
  
  Args:
    filename: the name of the file to run the tests on
  """

    browser_config = []
    tests = []
    title = ''
    testdate = ''
    csv_dir = ''
    results_server = ''
    results_link = ''
    old_results_server = ''
    old_results_link = ''
    results = {}

    # Read in the profile info from the YAML config file
    config_file = open(filename, 'r')
    yaml_config = yaml.load(config_file)
    config_file.close()
    for item in yaml_config:
        if item == 'title':
            title = yaml_config[item]
        elif item == 'testdate':
            testdate = yaml_config[item]
        elif item == 'csv_dir':
            csv_dir = os.path.normpath(yaml_config[item])
            if not os.path.exists(csv_dir):
                print "FAIL: path \"" + csv_dir + "\" does not exist"
                sys.exit(0)
        elif item == 'results_server':
            results_server = yaml_config[item]
        elif item == 'results_link':
            results_link = yaml_config[item]
        elif item == 'old_results_server':
            old_results_server = yaml_config[item]
        elif item == 'old_results_link':
            old_results_link = yaml_config[item]
    if (results_link != results_server != ''):
        if not post_file.link_exists(results_server, results_link):
            sys.exit(0)
    if (old_results_link != old_results_server != ''):
        if not post_file.link_exists(old_results_server, old_results_link):
            sys.exit(0)
    browser_config = {
        'preferences': yaml_config['preferences'],
        'extensions': yaml_config['extensions'],
        'browser_path': yaml_config['browser_path'],
        'browser_log': yaml_config['browser_log'],
        'symbols_path': yaml_config.get('symbols_path', None),
        'browser_wait': yaml_config['browser_wait'],
        'process': yaml_config['process'],
        'extra_args': yaml_config['extra_args'],
        'branch': yaml_config['branch'],
        'buildid': yaml_config['buildid'],
        'profile_path': yaml_config['profile_path'],
        'env': yaml_config['env'],
        'dirs': yaml_config['dirs'],
        'init_url': yaml_config['init_url']
    }
    if 'branch_name' in yaml_config:
        browser_config['branch_name'] = yaml_config['branch_name']
    if 'test_name_extension' in yaml_config:
        browser_config['test_name_extension'] = yaml_config[
            'test_name_extension']
    else:
        browser_config['test_name_extension'] = ''
    #normalize paths to work accross platforms
    browser_config['browser_path'] = os.path.normpath(
        browser_config['browser_path'])
    if browser_config['profile_path'] != {}:
        browser_config['profile_path'] = os.path.normpath(
            browser_config['profile_path'])
    for dir in browser_config['dirs']:
        browser_config['dirs'][dir] = os.path.normpath(
            browser_config['dirs'][dir])
    tests = yaml_config['tests']
    config_file.close()
    if (testdate != ''):
        date = int(
            time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())  #TODO get this into own file
    utils.debug("using testdate: %d" % date)
    utils.debug("actual date: %d" % int(time.time()))
    print 'RETURN:s: %s' % title
    #pull buildid & sourcestamp from browser
    browser_config = browserInfo(browser_config)

    utils.startTimer()
    utils.stamped_msg(title, "Started")
    for test in tests:
        testname = test['name']
        utils.stamped_msg("Running test " + testname, "Started")
        try:
            browser_dump, counter_dump = ttest.runTest(browser_config, test)
            utils.debug("Received test results: " + " ".join(browser_dump))
            results[testname] = [browser_dump, counter_dump]
            # If we're doing CSV, write this test immediately (bug 419367)
            if csv_dir != '':
                send_to_csv(csv_dir, {testname: results[testname]})
        except talosError, e:
            utils.stamped_msg("Failed " + testname, "Stopped")
            print 'FAIL: Busted: ' + testname
            print 'FAIL: ' + e.msg
        utils.stamped_msg("Completed test " + testname, "Stopped")
Example #8
0
def send_to_graph(results_server, results_link, machine, date, browser_config, results, amo):
  links = ''
  result_strings = []
  result_testnames = []

  #construct all the strings of data, one string per test and one string  per counter
  for testname in results:
    vals = []
    fullname = testname
    browser_dump, counter_dump, print_format = results[testname]
    utils.debug("Working with test: " + testname)
    utils.debug("Sending results: " + " ".join(browser_dump))
    utils.stamped_msg("Generating results file: " + testname, "Started")
    if print_format == 'tsformat':
      #non-tpformat results
      for bd in browser_dump:
        vals.extend([[x, 'NULL'] for x in bd.split('|')])
    elif print_format == 'tpformat':
      #tpformat results
      fullname += browser_config['test_name_extension']
      for bd in browser_dump:
        bd.rstrip('\n')
        page_results = bd.splitlines()
        for line in page_results:
          val, page = process_tpformat(line)
          if val > -1 :
            vals.append([val, page])
    else:
      raise talosError("Unknown print format in send_to_graph")
    result_strings.append(construct_results(machine, fullname, browser_config, date, vals, amo))
    result_testnames.append(fullname)
    utils.stamped_msg("Generating results file: " + testname, "Stopped")
    #counters collected for this test
    for cd in counter_dump:
      for count_type in cd:
        counterName = testname + '_' + shortName(count_type)
        if cd[count_type] == []: #failed to collect any data for this counter
          utils.stamped_msg("No results collected for: " + counterName, "Error")
          continue
        vals = [[x, 'NULL'] for x in cd[count_type]]
        if print_format == "tpformat":
          counterName += browser_config['test_name_extension']
        utils.stamped_msg("Generating results file: " + counterName, "Started")
        result_strings.append(construct_results(machine, counterName, browser_config, date, vals, amo))
        result_testnames.append(counterName)
        utils.stamped_msg("Generating results file: " + counterName, "Stopped")
    
  #send all the strings along to the graph server
  for data_string, testname in zip(result_strings, result_testnames):
    RETRIES = 5
    wait_time = 5
    times = 0
    msg = ""
    while (times < RETRIES):
      try:
        utils.stamped_msg("Transmitting test: " + testname, "Started")
        links += process_Request(post_file.post_multipart(results_server, results_link, [("key", "value")], [("filename", "data_string", data_string)]))
        break
      except talosError, e:
        msg = e.msg
      except Exception, e:
        msg = str(e)
      times += 1
      time.sleep(wait_time)
      wait_time += wait_time
def test_file(filename):
    """Runs the Ts and Tp tests on the given config file and generates a report.
  
  Args:
    filename: the name of the file to run the tests on
  """

    browser_config = []
    tests = []
    title = ''
    testdate = ''
    csv_dir = ''
    results_server = ''
    results_link = ''
    results = {}

    # Read in the profile info from the YAML config file
    config_file = open(filename, 'r')
    yaml_config = yaml.load(config_file)
    config_file.close()
    for item in yaml_config:
        if item == 'title':
            title = yaml_config[item]
        elif item == 'testdate':
            testdate = yaml_config[item]
        elif item == 'csv_dir':
            csv_dir = os.path.normpath(yaml_config[item])
            if not os.path.exists(csv_dir):
                print "FAIL: path \"" + csv_dir + "\" does not exist"
                sys.exit(0)
        elif item == 'results_server':
            results_server = yaml_config[item]
        elif item == 'results_link':
            results_link = yaml_config[item]
    if (results_link != results_server != ''):
        if not post_file.link_exists(results_server, results_link):
            sys.exit(0)
    browser_config = {
        'preferences': yaml_config['preferences'],
        'extensions': yaml_config['extensions'],
        'firefox': yaml_config['firefox'],
        'branch': yaml_config['branch'],
        'buildid': yaml_config['buildid'],
        'profile_path': yaml_config['profile_path'],
        'env': yaml_config['env'],
        'dirs': yaml_config['dirs'],
        'init_url': yaml_config['init_url']
    }
    #normalize paths to work accross platforms
    browser_config['firefox'] = os.path.normpath(browser_config['firefox'])
    if browser_config['profile_path'] != {}:
        browser_config['profile_path'] = os.path.normpath(
            browser_config['profile_path'])
    for dir in browser_config['dirs']:
        browser_config['dirs'][dir] = os.path.normpath(
            browser_config['dirs'][dir])
    tests = yaml_config['tests']
    config_file.close()
    if (testdate != ''):
        date = int(
            time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())  #TODO get this into own file
    utils.debug("using testdate: %d" % date)
    utils.debug("actual date: %d" % int(time.time()))

    utils.stamped_msg(title, "Started")
    for test in tests:
        utils.stamped_msg("Running test " + test, "Started")
        try:
            browser_dump, counter_dump = ttest.runTest(browser_config,
                                                       tests[test])
        except talosError, e:
            utils.stamped_msg("Failed " + test, "Stopped")
            print 'FAIL: Busted: ' + test
            print 'FAIL: ' + e.msg
            sys.exit(0)
        utils.debug("Received test results: " + " ".join(browser_dump))
        results[test] = [browser_dump, counter_dump]
        utils.stamped_msg("Completed test " + test, "Stopped")
def send_to_graph(results_server, results_link, title, date, browser_config,
                  results):
    tbox = title
    url_format = "http://%s/%s"
    link_format = "<a href=\"%s\">%s</a>"
    #value, testname, tbox, timeval, date, branch, buildid, type, data
    result_format = "%.2f,%s,%s,%d,%d,%s,%s,%s,%s,\n"
    result_format2 = "%.2f,%s,%s,%d,%d,%s,%s,%s,\n"
    links = ''

    for res in results:
        browser_dump, counter_dump = results[res]
        utils.debug("Working with test: " + res)
        utils.debug("Sending results: " + " ".join(browser_dump))
        utils.stamped_msg("Transmitting test: " + res, "Started")
        filename = tempfile.mktemp()
        tmpf = open(filename, "w")
        if res in ('ts', 'twinopen'):
            i = 0
            for val in browser_dump:
                val_list = val.split('|')
                for v in val_list:
                    tmpf.write(result_format %
                               (float(v), res, tbox, i, date,
                                browser_config['branch'],
                                browser_config['buildid'], "discrete", "ms"))
                    i += 1
        else:
            # each line of the string is of the format i;page_name;median;mean;min;max;time vals\n
            name = ''
            if ((res == 'tp') or (res == 'tp_js')):
                name = '_loadtime'
            for bd in browser_dump:
                bd.rstrip('\n')
                page_results = bd.splitlines()
                i = 0
                for mypage in page_results:
                    r = mypage.split(';')
                    #skip this line if it isn't the correct format
                    if len(r) == 1:
                        continue
                    r[1] = r[1].rstrip('/')
                    if r[1].find('/') > -1:
                        page = r[1].split('/')[1]
                    else:
                        page = r[1]
                    try:
                        val = float(r[2])
                    except ValueError:
                        print 'WARNING: value error for median in tp'
                        val = 0
                    tmpf.write(result_format %
                               (val, res + name, tbox, i, date,
                                browser_config['branch'],
                                browser_config['buildid'], "discrete", page))
                    i += 1
        tmpf.flush()
        tmpf.close()
        links += post_chunk(results_server, results_link, res, filename)
        os.remove(filename)
        for cd in counter_dump:
            for count_type in cd:
                val_list = cd[count_type]
                chunks = chunk_list(val_list)
                chunk_link = ''
                i = 0
                for chunk in chunks:
                    filename = tempfile.mktemp()
                    tmpf = open(filename, "w")
                    for val in chunk:
                        tmpf.write(result_format2 %
                                   (float(val), res + "_" +
                                    count_type.replace("%", "Percent"), tbox,
                                    i, date, browser_config['branch'],
                                    browser_config['buildid'], "discrete"))
                        i += 1
                    tmpf.flush()
                    tmpf.close()
                    chunk_link = post_chunk(
                        results_server, results_link,
                        '%s_%s (%d values)' % (res, count_type, len(chunk)),
                        filename)
                    os.remove(filename)
                links += chunk_link
        utils.stamped_msg("Transmitting test: " + res, "Stopped")

    first_results = ''
    last_results = ''
    full_results = '\nRETURN:<p style="font-size:smaller;">Details:<br>'
    lines = links.split('\n')
    for line in lines:
        if line == "":
            continue
        values = line.split(":")
        linkName = values[1]
        if linkName in ('tp_pbytes', 'tp_%cpu'):
            continue
        if float(values[2]) > 0:
            linkName += ": " + str(values[2])
            url = url_format % (results_server, values[0])
            link = link_format % (url, linkName)
            first_results = first_results + "\nRETURN:" + link + "<br>"
        else:
            url = url_format % (results_server, values[0])
            link = link_format % (url, linkName)
            last_results = last_results + '| ' + link + ' '
    full_results = first_results + full_results + last_results + '|</p>'
    print full_results
    utils.stamped_msg(title, "Started")
    for test in tests:
        utils.stamped_msg("Running test " + test, "Started")
        try:
            browser_dump, counter_dump = ttest.runTest(browser_config,
                                                       tests[test])
        except talosError, e:
            utils.stamped_msg("Failed " + test, "Stopped")
            print 'FAIL: Busted: ' + test
            print 'FAIL: ' + e.msg
            sys.exit(0)
        utils.debug("Received test results: " + " ".join(browser_dump))
        results[test] = [browser_dump, counter_dump]
        utils.stamped_msg("Completed test " + test, "Stopped")
    utils.stamped_msg(title, "Stopped")

    #process the results
    if (results_server != '') and (results_link != ''):
        #send results to the graph server
        utils.stamped_msg("Sending results", "Started")
        send_to_graph(results_server, results_link, title, date,
                      browser_config, results)
        utils.stamped_msg("Completed sending results", "Stopped")
    if csv_dir != '':
        send_to_csv(csv_dir, results)


if __name__ == '__main__':
    optlist, args = getopt.getopt(sys.argv[1:], 'dn', ['debug', 'noisy'])
    for o, a in optlist:
Example #12
0
def test_file(filename):
  """Runs the talos tests on the given config file and generates a report.
  
  Args:
    filename: the name of the file to run the tests on
  """
  
  browser_config = []
  tests = []
  title = ''
  testdate = ''
  csv_dir = ''
  results_server = ''
  results_link = ''
  results = {}
  
  # Read in the profile info from the YAML config file
  config_file = open(filename, 'r')
  yaml_config = yaml.load(config_file)
  config_file.close()
  for item in yaml_config:
    if item == 'title':
      title = yaml_config[item]
    elif item == 'testdate':
      testdate = yaml_config[item]
    elif item == 'csv_dir':
       csv_dir = os.path.normpath(yaml_config[item])
       if not os.path.exists(csv_dir):
         print "FAIL: path \"" + csv_dir + "\" does not exist"
         sys.exit(0)
    elif item == 'results_server':
       results_server = yaml_config[item]
    elif item == 'results_link' :
       results_link = yaml_config[item]
  if (results_link != results_server != ''):
    if not post_file.link_exists(results_server, results_link):
      sys.exit(0)
  browser_config = {'preferences'  : yaml_config['preferences'],
                    'extensions'   : yaml_config['extensions'],
                    'browser_path' : yaml_config['browser_path'],
                    'browser_log'  : yaml_config['browser_log'],
                    'symbols_path' : yaml_config.get('symbols_path', None),
                    'browser_wait' : yaml_config['browser_wait'],
                    'process'      : yaml_config['process'],
                    'extra_args'   : yaml_config['extra_args'],
                    'branch'       : yaml_config['branch'],
                    'buildid'      : yaml_config['buildid'],
                    'env'          : yaml_config['env'],
                    'dirs'         : yaml_config['dirs'],
                    'init_url'     : yaml_config['init_url']}
  if 'branch_name' in yaml_config:
      browser_config['branch_name'] = yaml_config['branch_name']
  if 'test_name_extension' in yaml_config:
      browser_config['test_name_extension'] = yaml_config['test_name_extension']
  else:
      browser_config['test_name_extension'] = ''
  #normalize paths to work accross platforms
  browser_config['browser_path'] = os.path.normpath(browser_config['browser_path'])
  for dir in browser_config['dirs']:
    browser_config['dirs'][dir] = os.path.normpath(browser_config['dirs'][dir])
  tests = yaml_config['tests']
  config_file.close()
  if (testdate != ''):
    date = int(time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
  else:
    date = int(time.time()) #TODO get this into own file
  utils.debug("using testdate: %d" % date)
  utils.debug("actual date: %d" % int(time.time()))
  print 'RETURN:s: %s' % title
  #pull buildid & sourcestamp from browser
  browser_config = browserInfo(browser_config)

  if ffprocess.checkAllProcesses(browser_config['process']):
    print "FAIL: all firefox processes must be closed before tests can be run"
    sys.exit(0)

  utils.startTimer()
  utils.stamped_msg(title, "Started")
  for test in tests:
    testname = test['name']
    utils.stamped_msg("Running test " + testname, "Started")
    try:
      browser_dump, counter_dump, print_format = ttest.runTest(browser_config, test)
      utils.debug("Received test results: " + " ".join(browser_dump))
      results[testname] = [browser_dump, counter_dump, print_format]
      # If we're doing CSV, write this test immediately (bug 419367)
      if csv_dir != '':
        send_to_csv(csv_dir, {testname : results[testname]})
    except talosError, e:
      utils.stamped_msg("Failed " + testname, "Stopped")
      print 'FAIL: Busted: ' + testname
      print 'FAIL: ' + e.msg.replace('\n','\nRETURN:')
    utils.stamped_msg("Completed test " + testname, "Stopped")
Example #13
0
def test_file(filename, to_screen):
    """Runs the talos tests on the given config file and generates a report.
  
  Args:
    filename: the name of the file to run the tests on
    to_screen: boolean, determine if all results should be outputed directly to stdout
  """

    browser_config = []
    tests = []
    title = ''
    testdate = ''
    csv_dir = ''
    results_server = ''
    results_link = ''
    results = {}

    # Read in the profile info from the YAML config file
    config_file = open(filename, 'r')
    yaml_config = yaml.load(config_file)
    config_file.close()
    for item in yaml_config:
        if item == 'title':
            title = yaml_config[item]
        elif item == 'testdate':
            testdate = yaml_config[item]
        elif item == 'csv_dir':
            csv_dir = os.path.normpath(yaml_config[item])
            if not os.path.exists(csv_dir):
                print "FAIL: path \"" + csv_dir + "\" does not exist"
                sys.exit(0)
        elif item == 'results_server':
            results_server = yaml_config[item]
        elif item == 'results_link':
            results_link = yaml_config[item]
    if (results_link != results_server != ''):
        if not post_file.link_exists(results_server, results_link):
            sys.exit(0)
    browser_config = {
        'preferences': yaml_config['preferences'],
        'extensions': yaml_config['extensions'],
        'browser_path': yaml_config['browser_path'],
        'browser_log': yaml_config['browser_log'],
        'symbols_path': yaml_config.get('symbols_path', None),
        'browser_wait': yaml_config['browser_wait'],
        'process': yaml_config['process'],
        'extra_args': yaml_config['extra_args'],
        'branch': yaml_config['branch'],
        'buildid': yaml_config['buildid'],
        'env': yaml_config['env'],
        'dirs': yaml_config.get('dirs', {}),
        'bundles': yaml_config.get('bundles', {}),
        'init_url': yaml_config['init_url']
    }

    if 'child_process' in yaml_config:
        browser_config['child_process'] = yaml_config['child_process']
    else:
        browser_config['child_process'] = 'plugin-container'
    if 'branch_name' in yaml_config:
        browser_config['branch_name'] = yaml_config['branch_name']
    if 'test_name_extension' in yaml_config:
        browser_config['test_name_extension'] = yaml_config[
            'test_name_extension']
    else:
        browser_config['test_name_extension'] = ''

    if 'sourcestamp' in yaml_config:
        browser_config['sourcestamp'] = yaml_config['sourcestamp']
    if 'repository' in yaml_config:
        browser_config['repository'] = yaml_config['repository']

    if 'deviceip' in yaml_config:
        browser_config['host'] = yaml_config['deviceip']
    else:
        browser_config['host'] = ''
    if 'deviceport' in yaml_config:
        browser_config['port'] = yaml_config['deviceport']
    else:
        browser_config['port'] = ''
    if 'webserver' in yaml_config:
        browser_config['webserver'] = yaml_config['webserver']
    else:
        browser_config['webserver'] = ''
    if 'deviceroot' in yaml_config:
        browser_config['deviceroot'] = yaml_config['deviceroot']
    else:
        browser_config['deviceroot'] = ''
    if 'remote' in yaml_config:
        browser_config['remote'] = yaml_config['remote']
    else:
        browser_config['remote'] = False

    #normalize paths to work accross platforms
    dm = None
    if (browser_config['remote'] == True):
        import devicemanager
        dm = devicemanager.DeviceManager(browser_config['host'],
                                         browser_config['port'])
    browser_config['browser_path'] = os.path.normpath(
        browser_config['browser_path'])
    for dir in browser_config['dirs']:
        browser_config['dirs'][dir] = os.path.normpath(
            browser_config['dirs'][dir])
    for bname in browser_config['bundles']:
        browser_config['bundles'][bname] = os.path.normpath(
            browser_config['bundles'][bname])
    tests = yaml_config['tests']
    config_file.close()
    if (testdate != ''):
        date = int(
            time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
    else:
        date = int(time.time())  #TODO get this into own file
    utils.debug("using testdate: %d" % date)
    utils.debug("actual date: %d" % int(time.time()))
    print 'RETURN:s: %s' % title
    #pull buildid & sourcestamp from browser
    browser_config = browserInfo(browser_config, devicemanager=dm)

    utils.startTimer()
    utils.stamped_msg(title, "Started")
    for test in tests:
        testname = test['name']
        utils.stamped_msg("Running test " + testname, "Started")
        try:
            mytest = TTest(browser_config['remote'])
            browser_dump, counter_dump, print_format = mytest.runTest(
                browser_config, test)
            utils.debug("Received test results: " + " ".join(browser_dump))
            results[testname] = [browser_dump, counter_dump, print_format]
            # If we're doing CSV, write this test immediately (bug 419367)
            if csv_dir != '':
                send_to_csv(csv_dir, {testname: results[testname]})
            if to_screen:
                send_to_csv(None, {testname: results[testname]})
        except talosError, e:
            utils.stamped_msg("Failed " + testname, "Stopped")
            print 'FAIL: Busted: ' + testname
            print 'FAIL: ' + e.msg.replace('\n', '\nRETURN:')
        utils.stamped_msg("Completed test " + testname, "Stopped")
Example #14
0
def send_to_graph(results_server, results_link, machine, date, browser_config,
                  results):
    links = ''
    result_strings = []

    #construct all the strings of data, one string per test and one string  per counter
    for testname in results:
        vals = []
        fullname = testname
        browser_dump, counter_dump, print_format = results[testname]
        utils.debug("Working with test: " + testname)
        utils.debug("Sending results: " + " ".join(browser_dump))
        utils.stamped_msg("Generating results file: " + testname, "Started")
        if print_format == 'tsformat':
            #non-tpformat results
            for bd in browser_dump:
                vals.extend([[x, 'NULL'] for x in bd.split('|')])
        elif print_format == 'tpformat':
            #tpformat results
            fullname += browser_config['test_name_extension']
            for bd in browser_dump:
                bd.rstrip('\n')
                page_results = bd.splitlines()
                for line in page_results:
                    val, page = process_tpformat(line)
                    if val > -1:
                        vals.append([val, page])
        else:
            raise talosError("Unknown print format in send_to_graph")
        result_strings.append(
            construct_results(machine, fullname, browser_config['branch_name'],
                              browser_config['sourcestamp'],
                              browser_config['buildid'], date, vals))
        utils.stamped_msg("Generating results file: " + testname, "Stopped")
        #counters collected for this test
        for cd in counter_dump:
            for count_type in cd:
                vals = [[x, 'NULL'] for x in cd[count_type]]
                counterName = testname + '_' + shortName(count_type)
                utils.stamped_msg("Generating results file: " + counterName,
                                  "Started")
                if print_format == "tpformat":
                    counterName += browser_config['test_name_extension']
                utils.stamped_msg("Generating results file: " + counterName,
                                  "Started")
                result_strings.append(
                    construct_results(machine, counterName,
                                      browser_config['branch_name'],
                                      browser_config['sourcestamp'],
                                      browser_config['buildid'], date, vals))
                utils.stamped_msg("Generating results file: " + counterName,
                                  "Stopped")

    #send all the strings along to the graph server
    for data_string in result_strings:
        RETRIES = 5
        wait_time = 5
        times = 0
        msg = ""
        while (times < RETRIES):
            try:
                utils.stamped_msg("Transmitting test: " + testname, "Started")
                links += process_Request(
                    post_file.post_multipart(
                        results_server, results_link, [("key", "value")],
                        [("filename", "data_string", data_string)]))
                break
            except talosError, e:
                times += 1
                msg = e.msg
                time.sleep(wait_time)
                wait_time += wait_time
        if times == RETRIES:
            raise talosError("Failed to send data %d times... quitting\n%s" %
                             (RETRIES, msg))
        utils.stamped_msg("Transmitting test: " + testname, "Stopped")
Example #15
0
    msg = ""
    while (times < RETRIES):
      try:
        utils.stamped_msg("Transmitting test: " + testname, "Started")
        links += process_Request(post_file.post_multipart(results_server, results_link, [("key", "value")], [("filename", "data_string", data_string)]))
        break
      except talosError, e:
        msg = e.msg
      except Exception, e:
        msg = str(e)
      times += 1
      time.sleep(wait_time)
      wait_time += wait_time
    if times == RETRIES:
        raise talosError("Graph server unreachable (%d attempts)\n%s" % (RETRIES, msg))
    utils.stamped_msg("Transmitting test: " + testname, "Stopped")

  return links

def results_from_graph(links, results_server, amo):
  if amo:
    #only get a pass/fail back from the graph server
    lines = links.split('\n')
    for line in lines:
      if line == "":
        continue
      if line.lower() in ('success',):
        print 'RETURN:addon results inserted successfully'
    
  else:
    #take the results from the graph server collection script and put it into a pretty format for the waterfall
Example #16
0
def send_to_csv(csv_dir, results):
  import csv
  def avg_excluding_max(val_list):
    """return float rounded to two decimal places, converted to string
       calculates the average value in the list exluding the max value"""
    i = len(val_list)
    total = sum(float(v) for v in val_list)
    maxval = max(float(v) for v in val_list)
    if total > maxval:
      avg = str(round((total - maxval)/(i-1), 2))
    else:
      avg = str(round(total, 2))
    return avg

  for res in results:
    browser_dump, counter_dump, print_format = results[res]
    if csv_dir:
      writer = csv.writer(open(os.path.join(csv_dir, res + '.csv'), "wb"))
    else: #working with stdout
      writer = csv.writer(sys.stdout)
    if print_format == 'tsformat':
      i = 0
      res_list = []
      writer.writerow(['i', 'val'])
      for val in browser_dump:
        val_list = val.split('|')
        for v in val_list:
          writer.writerow([i, v])
          i += 1
          res_list.append(v)
      writer.writerow(['RETURN: ' + res + ': ' + avg_excluding_max(res_list),])
    elif print_format == 'tpformat':
      writer.writerow(['i', 'page', 'median', 'mean', 'min' , 'max', 'runs'])
      for bd in browser_dump:
        bd.rstrip('\n')
        page_results = bd.splitlines()
        i = 0
        res_list = []
        for mypage in page_results:
          r = mypage.split(';')
          #skip this line if it isn't the correct format
          if len(r) == 1:
              continue
          r[1] = r[1].rstrip('/')
          if r[1].find('/') > -1 :
             page = r[1].split('/')[1]
          else:
             page = r[1]
          res_list.append(r[2])
          writer.writerow([i, page, r[2], r[3], r[4], r[5], '|'.join(r[6:])])
          i += 1
        writer.writerow(['RETURN: ' + res + ': ' + avg_excluding_max(res_list), ])
    else:
      raise talosError("Unknown print format in send_to_csv")
    for cd in counter_dump:
      for count_type in cd:
        counterName = res + '_' + shortName(count_type)
        if cd[count_type] == []: #failed to collect any data for this counter
          utils.stamped_msg("No results collected for: " + counterName, "Error")
          continue
        if csv_dir:
          writer = csv.writer(open(os.path.join(csv_dir, counterName + '.csv'), "wb"))
        else:
          writer = csv.writer(sys.stdout)
        writer.writerow(['i', 'value'])
        i = 0
        for val in cd[count_type]:
          writer.writerow([i, val])
          i += 1
        if isMemoryMetric(shortName(count_type)):
          writer.writerow(['RETURN: ' + counterName + ': ' + filesizeformat(avg_excluding_max(cd[count_type])),])
        elif count_type == 'responsiveness':
          writer.writerow(['RETURN: ' + counterName + ': ' + responsiveness_Metric(cd[count_type]),])
        else:
          writer.writerow(['RETURN: ' + counterName + ': ' + avg_excluding_max(cd[count_type]),])
Example #17
0
  utils.debug("actual date: %d" % int(time.time()))

  utils.stamped_msg(title, "Started")
  for test in tests:
    utils.stamped_msg("Running test " + test, "Started")
    try:
      browser_dump, counter_dump = ttest.runTest(browser_config, tests[test])
    except talosError, e:
      utils.stamped_msg("Failed " + test, "Stopped")
      print 'FAIL: Busted: ' + test
      print 'FAIL: ' + e.msg
      sys.exit(0)
    utils.debug("Received test results: " + " ".join(browser_dump))
    results[test] = [browser_dump, counter_dump]
    utils.stamped_msg("Completed test " + test, "Stopped")
  utils.stamped_msg(title, "Stopped")

  #process the results
  if (results_server != '') and (results_link != ''):
    #send results to the graph server
    utils.stamped_msg("Sending results", "Started")
    send_to_graph(results_server, results_link, title, date, browser_config, results)
    utils.stamped_msg("Completed sending results", "Stopped")
  if csv_dir != '':
    send_to_csv(csv_dir, results)
  
if __name__=='__main__':
  optlist, args = getopt.getopt(sys.argv[1:], 'dn', ['debug', 'noisy'])
  for o, a in optlist:
    if o in ('-d', "--debug"):
      print 'setting debug'
Example #18
0
def test_file(filename, to_screen, amo):
  """Runs the talos tests on the given config file and generates a report.
  
  Args:
    filename: the name of the file to run the tests on
    to_screen: boolean, determine if all results should be outputed directly to stdout
  """
  
  browser_config = []
  tests = []
  title = ''
  testdate = ''
  csv_dir = ''
  results_server = ''
  results_link = ''
  results = {}
  
  # Read in the profile info from the YAML config file
  config_file = open(filename, 'r')
  yaml_config = yaml.load(config_file)
  config_file.close()
  for item in yaml_config:
    if item == 'title':
      title = yaml_config[item]
    elif item == 'testdate':
      testdate = yaml_config[item]
    elif item == 'csv_dir':
       csv_dir = os.path.normpath(yaml_config[item])
       if not os.path.exists(csv_dir):
         print "FAIL: path \"" + csv_dir + "\" does not exist"
         sys.exit(0)
    elif item == 'results_server':
       results_server = yaml_config[item]
    elif item == 'results_link' :
       results_link = yaml_config[item]
  if (results_link != results_server != ''):
    if not post_file.link_exists(results_server, results_link):
      print 'WARNING: graph server link does not exist'
  browser_config = {'preferences'  : yaml_config['preferences'],
                    'extensions'   : yaml_config['extensions'],
                    'browser_path' : yaml_config['browser_path'],
                    'browser_log'  : yaml_config['browser_log'],
                    'symbols_path' : yaml_config.get('symbols_path', None),
                    'browser_wait' : yaml_config['browser_wait'],
                    'process'      : yaml_config['process'],
                    'extra_args'   : yaml_config['extra_args'],
                    'branch'       : yaml_config['branch'],
                    'title'        : yaml_config.get('title', ''),
                    'buildid'      : yaml_config['buildid'],
                    'env'          : yaml_config['env'],
                    'dirs'         : yaml_config.get('dirs', {}),
                    'bundles'      : yaml_config.get('bundles', {}),
                    'init_url'     : yaml_config['init_url'],
                    'child_process'      : yaml_config.get('child_process', 'plugin-container'),
                    'branch_name'        : yaml_config.get('branch_name', ''),
                    'test_name_extension': yaml_config.get('test_name_extension', ''),
                    'sourcestamp'        : yaml_config.get('sourcestamp', 'NULL'),
                    'repository'         : yaml_config.get('repository', 'NULL'),
                    'host'               : yaml_config.get('deviceip', ''),
                    'port'               : yaml_config.get('deviceport', ''),
                    'webserver'          : yaml_config.get('webserver', ''),
                    'deviceroot'         : yaml_config.get('deviceroot', ''),
                    'remote'             : yaml_config.get('remote', False),
                    'test_timeout'       : yaml_config.get('test_timeout', 1200),
                    'addon_id'           : yaml_config.get('addon_id', 'NULL'),
                    'bcontroller_config' : yaml_config.get('bcontroller_config', 'bcontroller.yml'),
                    'xperf_path'         : yaml_config.get('xperf_path', None)}

  #normalize paths to work accross platforms
  dm = None
  if (browser_config['remote'] == True):
    import devicemanager
    if (browser_config['port'] == -1):
        import devicemanagerADB
        dm = devicemanagerADB.DeviceManagerADB(browser_config['host'], browser_config['port'])
    else:
        import devicemanagerSUT
        dm = devicemanagerSUT.DeviceManagerSUT(browser_config['host'], browser_config['port'])

  browser_config['browser_path'] = os.path.normpath(browser_config['browser_path'])
  for dir in browser_config['dirs']:
    browser_config['dirs'][dir] = os.path.normpath(browser_config['dirs'][dir])
  for bname in browser_config['bundles']:
    browser_config['bundles'][bname] = os.path.normpath(browser_config['bundles'][bname])
  tests = yaml_config['tests']
  config_file.close()
  if (testdate != ''):
    date = int(time.mktime(time.strptime(testdate, '%a, %d %b %Y %H:%M:%S GMT')))
  else:
    date = int(time.time()) #TODO get this into own file
  utils.debug("using testdate: %d" % date)
  utils.debug("actual date: %d" % int(time.time()))
  print 'RETURN:s: %s' % title
  #pull buildid & sourcestamp from browser
  browser_config = browserInfo(browser_config, devicemanager = dm)

  if (browser_config['remote'] == True):
    procName = browser_config['browser_path'].split('/')[-1]
    if (dm.processExist(procName)):
      dm.killProcess(procName)

  utils.startTimer()
  utils.stamped_msg(title, "Started")
  for test in tests:
    testname = test['name']
    utils.stamped_msg("Running test " + testname, "Started")
    try:
      mytest = TTest(browser_config['remote'])
      browser_dump, counter_dump, print_format = mytest.runTest(browser_config, test)
      utils.debug("Received test results: " + " ".join(browser_dump))
      results[testname] = [browser_dump, counter_dump, print_format]
      # If we're doing CSV, write this test immediately (bug 419367)
      if csv_dir != '':
        send_to_csv(csv_dir, {testname : results[testname]})
      if to_screen or amo:
        send_to_csv(None, {testname : results[testname]})
    except talosError, e:
      utils.stamped_msg("Failed " + testname, "Stopped")
      print 'FAIL: Busted: ' + testname
      print 'FAIL: ' + e.msg.replace('\n','\nRETURN:')
    utils.stamped_msg("Completed test " + testname, "Stopped")
Example #19
0
def send_to_graph(results_server, results_link, title, date, browser_config, results):
  tbox = title
  url_format = "http://%s/%s"
  link_format= "<a href=\"%s\">%s</a>"
  #value, testname, tbox, timeval, date, branch, buildid, type, data
  result_format = "%.2f,%s,%s,%d,%d,%s,%s,%s,%s,\n"
  result_format2 = "%.2f,%s,%s,%d,%d,%s,%s,%s,\n"
  links = ''

  for res in results:
    browser_dump, counter_dump = results[res]
    utils.debug("Working with test: " + res)
    utils.debug("Sending results: " + " ".join(browser_dump))
    utils.stamped_msg("Transmitting test: " + res, "Started")
    filename = tempfile.mktemp()
    tmpf = open(filename, "w")
    if res in ('ts', 'twinopen'):
       i = 0
       for val in browser_dump:
        val_list = val.split('|')
        for v in val_list:
          tmpf.write(result_format % (float(v), res, tbox, i, date, browser_config['branch'], browser_config['buildid'], "discrete", "ms"))
          i += 1
    else:
      # each line of the string is of the format i;page_name;median;mean;min;max;time vals\n
      name = ''
      if ((res == 'tp') or (res == 'tp_js')):
          name = '_loadtime'
      for bd in browser_dump:
        bd.rstrip('\n')
        page_results = bd.splitlines()
        i = 0
        for mypage in page_results:
          r = mypage.split(';')
          #skip this line if it isn't the correct format
          if len(r) == 1:
              continue
          r[1] = r[1].rstrip('/')
          if r[1].find('/') > -1 :
             page = r[1].split('/')[1]
          else:
             page = r[1]
          try:
            val = float(r[2])
          except ValueError:
            print 'WARNING: value error for median in tp'
            val = 0
          tmpf.write(result_format % (val, res + name, tbox, i, date, browser_config['branch'], browser_config['buildid'], "discrete", page))
          i += 1
    tmpf.flush()
    tmpf.close()
    links += post_chunk(results_server, results_link, res, filename)
    os.remove(filename)
    for cd in counter_dump:
      for count_type in cd:
        val_list = cd[count_type]
        chunks = chunk_list(val_list)
        chunk_link = ''
        i = 0
        for chunk in chunks:
          filename = tempfile.mktemp()
          tmpf = open(filename, "w")
          for val in chunk:
              tmpf.write(result_format2 % (float(val), res + "_" + count_type.replace("%", "Percent"), tbox, i, date, browser_config['branch'], browser_config['buildid'], "discrete"))
              i += 1
          tmpf.flush()
          tmpf.close()
          chunk_link = post_chunk(results_server, results_link, '%s_%s (%d values)' % (res, count_type, len(chunk)), filename)
          os.remove(filename)
        links += chunk_link
    utils.stamped_msg("Transmitting test: " + res, "Stopped")
 
  first_results = ''
  last_results = '' 
  full_results = '\nRETURN:<p style="font-size:smaller;">Details:<br>'  
  lines = links.split('\n')
  for line in lines:
    if line == "":
      continue
    values = line.split(":")
    linkName = values[1]
    if linkName in ('tp_pbytes', 'tp_%cpu'):
      continue
    if float(values[2]) > 0:
      linkName += ": " + str(values[2])
      url = url_format % (results_server, values[0])
      link = link_format % (url, linkName)
      first_results = first_results + "\nRETURN:" + link + "<br>"
    else:
      url = url_format % (results_server, values[0])
      link = link_format % (url, linkName)
      last_results = last_results + '| ' + link + ' '
  full_results = first_results + full_results + last_results + '|</p>'
  print full_results