def process_data(self, testdir, raw_iter): self.testdir = testdir totals = {} for filename in glob.iglob(os.path.join(testdir, 'httperf_run*.txt')): nrun = int(re.match('.*/httperf_run(\d+)_', filename).group(1)) result = self._process_run(nrun) totals[nrun] = result fields = 'run connect_rate request_rate reply_rate bandwidth errors'.split( ) final = RowResults(fields) for run in sorted(totals.keys()): total = totals[run] errsum = sum([getattr(total, f) for f in total._err_fields]) final.add_row([ run, total.connect_rate, total.request_rate, total.reply_rate, total.bandwidth, errsum ]) # XXX: often the last run will have errors in it, due to the control algorithm #if errsum: # final.mark_failed() # If we saw a severe failure (assertion failure, kernel panic, or user # level panic) in the webserver, fail the test if not super(HTTPerfTest, self).passed(): final.mark_failed('\n'.join(self.server_failures)) return final
def process_data(self, testdir, raw_iter): results = RowResults(['time0', 'time1', 'time2', 'time1 - time0', 'time2 - time0']) passed = False for line in raw_iter: m = re.match("page\s+(\d+)\s+time0\s+(\d+)time1\s+(\d+)time2\s+(\d+)", line) if m: passed = True time0 = int(m.group(2)) time1 = int(m.group(3)) time2 = int(m.group(4)) results.add_row([time0, time1, time2, time1 - time0, time2 - time0]) if not passed: results.mark_failed() return results
def process_data(self, testdir, raw_iter): results = RowResults( ['time0', 'time1', 'time2', 'time1 - time0', 'time2 - time0']) passed = False for line in raw_iter: m = re.match( "page\s+(\d+)\s+time0\s+(\d+)time1\s+(\d+)time2\s+(\d+)", line) if m: passed = True time0 = int(m.group(2)) time1 = int(m.group(3)) time2 = int(m.group(4)) results.add_row( [time0, time1, time2, time1 - time0, time2 - time0]) if not passed: results.mark_failed() return results
def process_data(self, testdir, raw_iter): res = RowResults(["cores", "compute_time", "create_time"]) createtime = {} computetime = {} seencores = set() maxcores = None for line in raw_iter: m = re.match(r"\[harness: run on (\d+)/(\d+) cores\]", line) if m: runcores = int(m.group(1)) thismaxcores = int(m.group(2)) if maxcores is None: maxcores = thismaxcores elif maxcores != thismaxcores: res.mark_failed() # inconsistent max #cores in output if runcores in seencores or runcores > maxcores: res.mark_failed() # inconsistent #cores for this run seencores.add(runcores) continue m = re.match(r"Createtime\s+(\d+)\s+(\d+)", line) if m: createtime[int(m.group(1))] = int(m.group(2)) continue m = re.match(r"Computetime\s+(\d+)\s+(\d+)", line) if m: computetime[int(m.group(1))] = int(m.group(2)) allcores = set(createtime.keys()).union(computetime.keys()) if allcores == set() or allcores != seencores: res.mark_failed() nan = float('nan') allcores = list(allcores) allcores.sort() for c in allcores: a = computetime.get(c, nan) b = createtime.get(c, nan) res.add_row([c, a, b]) if a == nan or b == nan: res.mark_failed() return res
def process_data(self, testdir, rawiter): self.regex = re.compile(self.REGEX) result = RowResults(['op', 'buffer', 'block', 'bandwidth']) if not bandwidth.has_key(self.machine): result.mark_failed( 'No data about this disk, please set the initial performance values.' ) return result matches = 0 for line in rawiter: match = self.regex.match(line) if match: matches += 1 buffer_size, bs, bw = match.groups() buffer_size = int(buffer_size) bs = int(bs) bw = float(bw) operation = self.OP.lower() if not bandwidth[self.machine].has_key(operation): result.mark_failed( 'No data about this benchmark, please set the initial performance values.' ) return result if not bandwidth[self.machine][operation].has_key(bs): result.mark_failed('No data for {} with bs {}.'.format( operation, bs)) return result lower_bound = bandwidth[self.machine][operation][bs] * (1 - 0.15) upper_bound = bandwidth[self.machine][operation][bs] * (1 + 0.20) result.add_row((operation, buffer_size, bs, bw)) if bw <= lower_bound: error = "{} for {} bytes blocks not within expected range (was {}, should be >= {}).".format( operation, bs, bw, lower_bound) debug.log(error) result.mark_failed(reason=error) elif bw >= upper_bound: error = "Achieved {} bandwidth for {} bytes blocks was better ({}) than expected ({}).".format( operation, bs, bw, upper_bound) debug.log(error) debug.log( "This is good, if you can explain it! Adjust the bandwidth numbers in blk_tests.py and re-run the test." ) result.mark_failed(reason=error) else: pass if line.startswith("AHCI testing completed.") and matches > 0: return result result.mark_failed( 'Did not see end of test or got no bandwidth numbers.') return result