def process_data(self, testdir, rawiter): corestr = None lastdata = None for line in rawiter: m = re.match("Running on (\d+) cores.", line) if m: ncores = int(m.group(1)) results = RowResults([ "core %d to %d" % (n, (n + 1) % ncores) for n in range(ncores) ]) corestr = "\d+: " for n in range(ncores): corestr += "(\d+) " continue if corestr != None: m = re.match(corestr, line) if m: data = [int(m.group(n)) for n in range(1, ncores + 1)] if lastdata != None: diffs = [data[0] - lastdata] else: diffs = [0] diffs += [(data[n] - data[n - 1]) for n in range(1, ncores)] results.add_row(diffs) lastdata = data[ncores - 1] return results
def process_data(self, testdir, rawiter): results = RowResults(['threads', 'delay', 'slowdown']) process_sum = 0 sums = [] baseline = [] for line in rawiter: m = re.match("workcnt (\d+): (\d+)", line) if m: if int(m.group(1)) != 0: process_sum += int(m.group(2)) continue m = re.match("number of threads: (\d+), delay: (\d+)", line) if m: sums.append([m.group(1), m.group(2), process_sum]) if int(m.group(2)) == 0: baseline.append([m.group(1), process_sum]) process_sum = 0 for sum in sums: for [t,p] in baseline: if t == sum[0]: basesum = p break procsum = float(sum[2]) / float(basesum) results.add_row([sum[0], sum[1], procsum]) return results
def process_data(self, testdir, rawiter): corestr = None lastdata = None for line in rawiter: m = re.match("Running on (\d+) cores.", line) if m: ncores = int(m.group(1)) results = RowResults(["core %d to %d" % (n, (n + 1) % ncores) for n in range(ncores)]) corestr = "\d+: " for n in range(ncores): corestr += "(\d+) " continue if corestr != None: m = re.match(corestr, line) if m: data = [int(m.group(n)) for n in range(1, ncores + 1)] if lastdata != None: diffs = [data[0] - lastdata] else: diffs = [0] diffs += [(data[n] - data[n - 1]) for n in range(1, ncores)] results.add_row(diffs) lastdata = data[ncores - 1] return results
def process_data(self, testdir, rawiter): results = RowResults(['threads', 'delay', 'slowdown']) process_sum = 0 sums = [] baseline = [] for line in rawiter: m = re.match("workcnt (\d+): (\d+)", line) if m: if int(m.group(1)) != 0: process_sum += int(m.group(2)) continue m = re.match("number of threads: (\d+), delay: (\d+)", line) if m: sums.append([m.group(1), m.group(2), process_sum]) if int(m.group(2)) == 0: baseline.append([m.group(1), process_sum]) process_sum = 0 for sum in sums: for [t, p] in baseline: if t == sum[0]: basesum = p break procsum = float(sum[2]) / float(basesum) results.add_row([sum[0], sum[1], procsum]) return results
def process_data(self, testdir, rawiter): debug.verbose(">> processing data") # the test passed if we can produce results results = RowResults(['op', 'cycles/iter', '#iters']) # and assertions are disabled valid = False for line in rawiter: if line.startswith('Operating system bug'): valid = False break if 'ASSERTIONS DISABLED' in line: valid = True #if line.startswith(self.get_finish_string()): # break if line.startswith("appel_li:"): # found data line: <op cycles/iter #iters> elems = line.strip().split(':') if len(elems) < 3: continue _, op, data = elems if ' ' not in data: continue cyc, _, count, _ = data.strip().split(' ') results.add_row([op.strip(), cyc, count]) if not valid: return PassFailResult(False) return results
def get_row_results(self, name): results = RowResults(['nodeid', 'mdbsize', 'oplatency']) for nodeid in sorted(self.nodedata.keys()): if nodeid not in self.tscperus.keys(): # skip nodes that aren't running benchmark for some cases continue for d in sorted(self.nodedata[nodeid].keys()): for v in self.nodedata[nodeid][d]: results.add_row([nodeid, d, v]) return results
def process_data(self, testdir, raw_iter): cols = ('Requested Throughput,Achieved Throughput,Sent Throughput,' 'Packet Size,Min,Avg,Max,Standard Deviation,Median') results = RowResults(cols.split(',')) with open(os.path.join(testdir, LOGFILENAME), 'r') as logfile: for line in logfile: m = re.match('(\d+),(\d+),(\d+),(\d+),(\d+),(\d+),(\d+),' '(\d+\.\d+),(\d+)', line) assert(m) # line must match, otherwise we have junk output vals = [float(s) if '.' in s else int(s) for s in m.groups()] results.add_row(vals) return results
def process_data(self, testdir, raw_iter): cols = ('Requested Throughput,Achieved Throughput,Sent Throughput,' 'Packet Size,Min,Avg,Max,Standard Deviation,Median') results = RowResults(cols.split(',')) with open(os.path.join(testdir, LOGFILENAME), 'r') as logfile: for line in logfile: m = re.match( '(\d+),(\d+),(\d+),(\d+),(\d+),(\d+),(\d+),' '(\d+\.\d+),(\d+)', line) assert (m) # line must match, otherwise we have junk output vals = [float(s) if '.' in s else int(s) for s in m.groups()] results.add_row(vals) return results
def process_data(self, testdir, rawiter): results = RowResults(['impl', 'reset', 'measure', 'count', 'ticks']) caps = RowResults(['run', 'reset', 'count', 'base', 'bits', 'flags'], name="caps") impl = None reset = None measure = None count = None dumping = False for line in rawiter: m = re.match(r"\[mdb_bench of (\w+)\]", line) if m: impl = m.group(1) continue m = re.match(r"\[mdb_bench dumping\]", line) if m: dumping = True if self.boot_phase: continue m = re.match(r"([^:/]+)/(\d+):dump:([^:]+): 0x([0-9a-fA-F]+)/(\d+) ([c.][a.][d.])", line) if m: caps.add_row([m.group(3), m.group(1), m.group(2), m.group(4), m.group(5), m.group(6)]) continue m = re.match(r"([^:]+):([^:]+): (\d+)/(\d+)", line) if m: reset = m.group(1) measure = m.group(2) count = m.group(4) ticks = m.group(3) results.add_row([impl, reset, measure, count, ticks]) continue results = [results] if dumping: results.append(caps) return results
def process_data(self, testdir, rawiter): results = RowResults(['workload', 'uid', 'operation', 'count']) resultline = re.compile("\[core \d+\]\[(\d+)\] (\w+)=(\d+)") found_bench = False for line in rawiter: if line.startswith("frequency_bench starting"): found_bench = True if found_bench: m = resultline.match(line) if m: results.add_row( ['procmgmt', m.group(1), m.group(2), m.group(3)]) return results
def process_data(self, testdir, rawiter): results = RowResults(['impl', 'reset', 'measure', 'count', 'ticks']) caps = RowResults(['run', 'reset', 'count', 'base', 'bits', 'flags'], name="caps") impl = None reset = None measure = None count = None dumping = False for line in rawiter: m = re.match(r"\[mdb_bench of (\w+)\]", line) if m: impl = m.group(1) continue m = re.match(r"\[mdb_bench dumping\]", line) if m: dumping = True if self.boot_phase: continue m = re.match( r"([^:/]+)/(\d+):dump:([^:]+): 0x([0-9a-fA-F]+)/(\d+) ([c.][a.][d.])", line) if m: caps.add_row([ m.group(3), m.group(1), m.group(2), m.group(4), m.group(5), m.group(6) ]) continue m = re.match(r"([^:]+):([^:]+): (\d+)/(\d+)", line) if m: reset = m.group(1) measure = m.group(2) count = m.group(4) ticks = m.group(3) results.add_row([impl, reset, measure, count, ticks]) continue results = [results] if dumping: results.append(caps) return results
def process_data(self, testdir, raw_iter): self.testdir = testdir totals = {} for filename in glob.iglob(os.path.join(testdir, "httperf_run*.txt")): nrun = int(re.match(".*/httperf_run(\d+)_", filename).group(1)) result = self._process_run(nrun) totals[nrun] = result fields = "run connect_rate request_rate reply_rate bandwidth errors".split() final = RowResults(fields) for run in sorted(totals.keys()): total = totals[run] errsum = sum([getattr(total, f) for f in total._err_fields]) final.add_row([run, total.connect_rate, total.request_rate, total.reply_rate, total.bandwidth, errsum]) # XXX: often the last run will have errors in it, due to the control algorithm # if errsum: # final.mark_failed() return final
def process_data(self, testdir, raw_iter): results = RowResults(['time0', 'time1', 'time2', 'time1 - time0', 'time2 - time0']) passed = False for line in raw_iter: m = re.match("page\s+(\d+)\s+time0\s+(\d+)time1\s+(\d+)time2\s+(\d+)", line) if m: passed = True time0 = int(m.group(2)) time1 = int(m.group(3)) time2 = int(m.group(4)) results.add_row([time0, time1, time2, time1 - time0, time2 - time0]) if not passed: results.mark_failed() return results
def process_data(self, testdir, raw_iter): res = RowResults(["cores", "compute_time"]) computetime = {} ct = 0 for line in raw_iter: m = re.match(r" Time in seconds =\s+(\d+.\d+)", line) if m: ct = float(m.group(1)); continue m = re.match(r" Total processes =\s+(\d+)", line) if m: computetime[int(m.group(1))] = ct allcores = computetime.keys() allcores.sort() nan = float('nan') for c in allcores: res.add_row([c, computetime.get(c, nan)]) return res
def process_data(self, testdir, raw_iter): self.testdir = testdir totals = {} for filename in glob.iglob(os.path.join(testdir, 'httperf_run*.txt')): nrun = int(re.match('.*/httperf_run(\d+)_', filename).group(1)) result = self._process_run(nrun) totals[nrun] = result fields = 'run connect_rate request_rate reply_rate bandwidth errors'.split( ) final = RowResults(fields) for run in sorted(totals.keys()): total = totals[run] errsum = sum([getattr(total, f) for f in total._err_fields]) final.add_row([ run, total.connect_rate, total.request_rate, total.reply_rate, total.bandwidth, errsum ]) # XXX: often the last run will have errors in it, due to the control algorithm #if errsum: # final.mark_failed() # If we saw a severe failure (assertion failure, kernel panic, or user # level panic) in the webserver, fail the test if not super(HTTPerfTest, self).passed(): final.mark_failed('\n'.join(self.server_failures)) return final
def process_data(self, testdir, raw_iter): self.testdir = testdir totals = {} for filename in glob.iglob(os.path.join(testdir, 'httperf_run*.txt')): nrun = int(re.match('.*/httperf_run(\d+)_', filename).group(1)) result = self._process_run(nrun) totals[nrun] = result fields = 'run connect_rate request_rate reply_rate bandwidth errors'.split( ) final = RowResults(fields) for run in sorted(totals.keys()): total = totals[run] errsum = sum([getattr(total, f) for f in total._err_fields]) final.add_row([ run, total.connect_rate, total.request_rate, total.reply_rate, total.bandwidth, errsum ]) # XXX: often the last run will have errors in it, due to the control algorithm #if errsum: # final.mark_failed() return final
def process_data(self, testdir, raw_iter): results = RowResults( ['time0', 'time1', 'time2', 'time1 - time0', 'time2 - time0']) passed = False for line in raw_iter: m = re.match( "page\s+(\d+)\s+time0\s+(\d+)time1\s+(\d+)time2\s+(\d+)", line) if m: passed = True time0 = int(m.group(2)) time1 = int(m.group(3)) time2 = int(m.group(4)) results.add_row( [time0, time1, time2, time1 - time0, time2 - time0]) if not passed: results.mark_failed() return results
def process_data(self, testdir, rawiter): self.regex = re.compile(self.REGEX) result = RowResults(['op', 'buffer', 'block', 'bandwidth']) if not bandwidth.has_key(self.machine): result.mark_failed( 'No data about this disk, please set the initial performance values.' ) return result matches = 0 for line in rawiter: match = self.regex.match(line) if match: matches += 1 buffer_size, bs, bw = match.groups() buffer_size = int(buffer_size) bs = int(bs) bw = float(bw) operation = self.OP.lower() if not bandwidth[self.machine].has_key(operation): result.mark_failed( 'No data about this benchmark, please set the initial performance values.' ) return result if not bandwidth[self.machine][operation].has_key(bs): result.mark_failed('No data for {} with bs {}.'.format( operation, bs)) return result lower_bound = bandwidth[self.machine][operation][bs] * (1 - 0.15) upper_bound = bandwidth[self.machine][operation][bs] * (1 + 0.20) result.add_row((operation, buffer_size, bs, bw)) if bw <= lower_bound: error = "{} for {} bytes blocks not within expected range (was {}, should be >= {}).".format( operation, bs, bw, lower_bound) debug.log(error) result.mark_failed(reason=error) elif bw >= upper_bound: error = "Achieved {} bandwidth for {} bytes blocks was better ({}) than expected ({}).".format( operation, bs, bw, upper_bound) debug.log(error) debug.log( "This is good, if you can explain it! Adjust the bandwidth numbers in blk_tests.py and re-run the test." ) result.mark_failed(reason=error) else: pass if line.startswith("AHCI testing completed.") and matches > 0: return result result.mark_failed( 'Did not see end of test or got no bandwidth numbers.') return result
def process_data(self, testdir, raw_iter): res = RowResults(["cores", "compute_time", "create_time"]) createtime = {} computetime = {} seencores = set() maxcores = None for line in raw_iter: m = re.match(r"\[harness: run on (\d+)/(\d+) cores\]", line) if m: runcores = int(m.group(1)) thismaxcores = int(m.group(2)) if maxcores is None: maxcores = thismaxcores elif maxcores != thismaxcores: res.mark_failed() # inconsistent max #cores in output if runcores in seencores or runcores > maxcores: res.mark_failed() # inconsistent #cores for this run seencores.add(runcores) continue m = re.match(r"Createtime\s+(\d+)\s+(\d+)", line) if m: createtime[int(m.group(1))] = int(m.group(2)) continue m = re.match(r"Computetime\s+(\d+)\s+(\d+)", line) if m: computetime[int(m.group(1))] = int(m.group(2)) allcores = set(createtime.keys()).union(computetime.keys()) if allcores == set() or allcores != seencores: res.mark_failed() nan = float('nan') allcores = list(allcores) allcores.sort() for c in allcores: a = computetime.get(c, nan) b = createtime.get(c, nan) res.add_row([c, a, b]) if a == nan or b == nan: res.mark_failed() return res