def eval_dir(path, markdown=False, dprefix=False, evalmethod=None, numlabel=False): """ Evaluate the results in the dir by MRR Argument: dirname -- the path to the diretory containing evaluation results """ if evalmethod is None: ef = mrr else: ef = globals()[evalmethod] files = sorted(os.listdir(path)) names = sorted(set([n.rsplit('.', 1)[0][3:] for n in files if n.endswith('.res')]), key=lambda item: (len(item), item)) if dprefix: prefices = sorted(set([n[:2] for n in files])) else: prefices = PREFICES table = Texttable(max_width=0) if markdown: table.set_asmarkdown() else: table.set_deco(0) table.set_cols_dtype(['t'] + ['f'] * len(prefices)) table.set_cols_align(['l'] + ['r'] * len(prefices)) table.set_precision(4) table.add_rows([['', ] + prefices]) for n in names: scores = list() for prefix in prefices: try: eva = NP.array([v for v in iterrank( os.path.join(path, '%s_%s.res' % (prefix, n)))], dtype=NP.float64) scores.append(ef(eva)) except IOError: scores.append('N/A') if numlabel: n = ' '.join([m.group() for m in NUMBER.finditer(n)]) table.add_row([n, ] + scores) print table.draw()