def process_simple(self, ids=[], datas=[], series=[], runfilter=None, datafilter=None, aggr=aggregate.concat): fs = [metrics.metric(data) for data in datas] runkey = lambda run: map(lambda m: metrics.evalmetric(m, run), series) runsets = self.makerunsets(ids, runfilter, datafilter) runseries = self.makerunsets(series, runfilter, datafilter) print 'Data:' print '\\begin{itemize}' for d in datas: print '\item %s' % d print '\\end{itemize}' print 'Series:' print '\\begin{itemize}' count = 0 for i, (key, runset) in enumerate(runseries): print '\item S%s: %s' % (i + 1, ', '.join( ['%s: %s' % (s, k) for s, k in zip(series, key)])) count += 1 print '\\end{itemize}' print '\\begin{tabular}{|%s|}' % ('c' * len(ids) + (('|' + ('c' * (len(datas)))))) grouped_runsets = [] for key, runset in runsets: grouped_runsets.append(list(groupby_sorted(runset, key=runkey))) for serieindex in range(count): print '\hline' print '\\multicolumn{%s}{|c|}{Id} & ' % len( ids) + ' & ' + '\\multicolumn{%s}{|c|}{S%s}' % (len(datas), serieindex) print '\\\\' print ' & ' + ' & '.join(list([str(data) for data in datas]) * 1) print '\\\\' print '\hline' for grouped_runset, (key, runset) in zip(grouped_runsets, runsets): gk, rungroup = grouped_runset[serieindex] print ' & '.join([str(k).replace('\\', '') for k in key] \ + [str(aggr(map(f,rungroup))).rjust(4) for f in fs]) \ + '\n\\\\' print '\hline' print '\hline \n \\end{tabular}'
def filterruns(self, runfilter, datafilter): runs = self.runs if runfilter: runs = filter(runfilter, self.runs) if datafilter: fdatafilter = lambda run: all([ metrics.evalmetric(k, run) == val for k, val in datafilter.iteritems() ]) runs = filter(fdatafilter, runs) self.runs = runs
def makerunsets(self, series, runfilter, datafilter, store=False): runkey = lambda run: map(lambda m: metrics.evalmetric(m, run), series) runs = self.runs if runfilter: runs = filter(runfilter, self.runs) if datafilter: fdatafilter = lambda run: all([ metrics.evalmetric(k, run) == val for k, val in datafilter.iteritems() ]) runs = filter(fdatafilter, runs) if len(series) == 0: runsets = [(["run"], runs)] elif store: runsets = groupby_store(sorted(runs, key=runkey), runkey) else: runsets = itertools.groupby(sorted(runs, key=runkey), runkey) return runsets
def process(self, ids=[], datas=[], series=[], runfilter=None, datafilter=None, aggr=aggregate.concat): runsets = list(self.makerunsets(ids, runfilter, datafilter, True)) runseries = list(self.makerunsets(series, runfilter, datafilter, True)) runkey = lambda run: map(lambda m: metrics.evalmetric(m, run), series) for i,(key,runserie) in enumerate(runseries): print '#Serie %s: %s' % (i+1, ', '.join(['%s: %s' % (s,k) for s,k in zip(series,key)])) for i,(key,runset) in enumerate(runsets): print '#Runset %s: %s' % (i+1, ', '.join(['%s: %s' % (s,k) for s,k in zip(ids,key)])) for runset in runsets: self.handle_runset(runset, runseries, datas, aggr, runkey)
def process(self, ids=[], datas=[], series=[], runfilter=None, datafilter=None, aggr=aggregate.concat): fs = [metrics.metric(data) for data in datas] runkey = lambda run: map(lambda m: metrics.evalmetric(m, run), series) runsets = self.makerunsets(ids, runfilter, datafilter) runseries = self.makerunsets(series, runfilter, datafilter) print 'Data:' print '\\begin{itemize}' for d in datas: print '\item %s' % d print '\\end{itemize}' print 'Series:' print '\\begin{itemize}' count = 0 for i, (key, runset) in enumerate(runseries): print '\item S%s: %s' % (i + 1, ', '.join( ['%s: %s' % (s, k) for s, k in zip(series, key)])) count += 1 print '\\end{itemize}' print '\\begin{tabular}{|%s|}' % ('c' * len(ids) + (('|' + ('c' * (len(datas)))) * count)) print '\hline' print '\\multicolumn{%s}{|c|}{Id} & ' % len(ids) + ' & '.join([ '\\multicolumn{%s}{|c|}{S%s}' % (len(datas), i + 1) for i in range(count) ]) print '\\\\' print ' & ' + ' & '.join(list([str(data) for data in datas]) * count) print '\\\\' print '\hline' # Rungroup: las 3 corridas de una determinada densidad for key, runset in runsets: print ' & '.join([str(k).replace('\\', '') for k in key] \ + flatten([[str(aggr(map(f,rungroup))).rjust(4) for f in fs] for k,rungroup in groupby_sorted(runset, key=runkey)])) \ + '\n\\\\' print '\hline \n \\end{tabular}'
def makerow(run): return tuple( [metrics.evalmetric(metric, run) for metric in fields])