class XPerfAutoLog(object): def __init__(self, filename = None): self.testGroup = None if filename != None: config_file = open(filename, 'r') self.yaml_config = yaml.load(config_file) config_file.close() self.autolog_init() def autolog_init(self): testos = 'win7' #currently we only run xperf on windows 7 testname = self.yaml_config.get('testname', '') testplatform = 'win32' #currently we only run xperf on win32 if testname == '': return self.testGroup = RESTfulAutologTestGroup( testgroup = testname, os = testos, platform = testplatform, machine = self.yaml_config['title'], starttime = int(time.time()), builder = '%s_%s-opt_test-%s' % (self.yaml_config['title'], os, testname), restserver = 'http://10.2.76.100/autologserver' ) self.testGroup.set_primary_product( tree = self.yaml_config['repository'].split('/')[-1], buildtype = 'opt', #we only run talos on opt builds buildid = self.yaml_config['buildid'], revision = self.yaml_config['sourcestamp'], ) def addData(self, filename, readcount, readbytes, writecount, writebytes): if (self.testGroup == None): self.autolog_init() if (self.testGroup == None): return self.testGroup.add_perf_data( test = self.yaml_config['testname'], type = 'diskIO', name = filename[filename.rfind('\\') + 1:], reads = readcount, read_bytes = readbytes, writes = writecount, write_bytes = writebytes ) def post(self): if (self.testGroup != None): self.testGroup.submit()
def parse_and_submit(self): """ Parse the logs generated by runtests.py and submit results to autolog """ results = {} chrome_results = {} prof = re.compile("Profile::((\w+):\s*(\d+))") failure = re.compile("Failed:\s+([0-9]*)") logs = open(self.plain_log_file, 'r') #parse out which test is being run. #also, parse out if the test failed for line in logs.readlines(): matches = prof.findall(line) for match in matches: if results.has_key(match[1]): results[match[1]] += int(match[2]) else: results[match[1]] = int(match[2]) fail = failure.search(line) if fail: if fail.group(1) is not "0": self.log.info("Plain tests failed, not submitting data to autolog") return logs.close() for k, v in results.iteritems(): results[k] = results[k] / PLAIN_REPEATS logs = open(self.chrome_log_file, 'r') for line in logs.readlines(): matches = prof.findall(line) for match in matches: if chrome_results.has_key(match[1]): chrome_results[match[1]] += int(match[2]) else: chrome_results[match[1]] = int(match[2]) fail = failure.search(line) if fail: if fail.group(1) is not "0": self.log.info("Chrome tests failed, not submitting data to autolog") return logs.close() for k, v in chrome_results.iteritems(): chrome_results[k] = chrome_results[k] / CHROME_REPEATS results.update(chrome_results) self.log.info("results: %s" % results) #submit if len(results) is not 0: testgroup = RESTfulAutologTestGroup( testgroup = 'mochitest-perf', os = self.platform, platform = self.platform, builder = self.builddata['buildid'], starttime = int(time.time()) ) testgroup.set_primary_product( tree = self.builddata['tree'], buildtype = self.builddata['buildtype'], buildid = self.builddata['buildid'], revision = self.revision, ) for test, value in results.iteritems(): test_type = 'LoadTime' if 'RunTime' in test: test_type = 'RunTime' testgroup.add_perf_data( test = 'mochitest-perf', name = test, type = test_type, time = value ) self.log.info("Submitting to autolog") testgroup.submit()