def test_table(): assert ("\\begin{tabular}{lr}\n" "\\toprule\n" " 0 & 1 \\\\\n" "\\midrule\n" " foo & 1 \\\\\n" " bar & 2 \\\\\n" "\\bottomrule\n" "\\end{tabular}\n" == latex.table((("foo", 1), ("bar", 2))))
def test_table_columns(): assert ("\\begin{tabular}{lr}\n" "\\toprule\n" "type & value \\\\\n" "\\midrule\n" " foo & 1 \\\\\n" " bar & 2 \\\\\n" "\\bottomrule\n" "\\end{tabular}\n" == latex.table((("foo", 1), ("bar", 2)), columns=("type", "value")))
def test_table(self): self._test("\\begin{tabular}{lr}\n" "\\toprule\n" " 0 & 1 \\\\\n" "\\midrule\n" " foo & 1 \\\\\n" " bar & 2 \\\\\n" "\\bottomrule\n" "\\end{tabular}\n", latex.table((("foo", 1), ("bar", 2))))
def test_table_columns(self): self._test("\\begin{tabular}{lr}\n" "\\toprule\n" "type & value \\\\\n" "\\midrule\n" " foo & 1 \\\\\n" " bar & 2 \\\\\n" "\\bottomrule\n" "\\end{tabular}\n", latex.table((("foo", 1), ("bar", 2)), columns=("type", "value")))
def visualise_classification_job(db, job): basedir = "img/classification/{}/".format(job) fs.mkdir(basedir) fs.mkdir(basedir + "classifiers") fs.mkdir(basedir + "err_fns") visualise.err_fn_performance(db, basedir + "err_fns.png", job=job) # Bar plot of all results. visualise.classification(db, "img/classification/{}.png".format(job), job=job) # Per-classifier plots. for i,classifier in enumerate(db.classification_classifiers): visualise.classifier_speedups(db, classifier, basedir + "classifiers/{}.png".format(i), job=job) # Per-err_fn plots. for err_fn in db.err_fns: visualise.err_fn_speedups(db, err_fn, basedir + "err_fns/{}.png".format(err_fn), job=job, sort=True) # Results table. query = db.execute( "SELECT classifier,err_fn,Count(*) AS count\n" "FROM classification_results\n" "WHERE job=? GROUP BY classifier,err_fn", (job,) ) results = [] for classifier,err_fn,count in query: correct, illegal, refused, performance, speedup = zip(*[ row for row in db.execute( "SELECT correct,illegal,refused,performance,speedup\n" "FROM classification_results\n" "WHERE job=? AND classifier=? AND err_fn=?", (job, classifier, err_fn) ) ]) results.append([ classifier, err_fn, (sum(correct) / count) * 100, (sum(illegal) / count) * 100, (sum(refused) / count) * 100, min(performance) * 100, labmath.geomean(performance) * 100, max(performance) * 100, min(speedup), labmath.geomean(speedup), max(speedup) ]) str_args = { "float_format": lambda f: "{:.2f}".format(f) } for i in range(len(results)): results[i][0] = ml.classifier_basename(results[i][0]) columns=( "CLASSIFIER", "ERR_FN", "ACC %", "INV %", "REF %", "Omin %", "Oavg %", "Omax %", "Smin", "Savg", "Smax", ) latex.table(results, output=fs.path(experiment.TAB_ROOT, job + ".tex"), columns=columns, **str_args)
def test_table_bad_rows(): with pytest.raises(latex.Error): latex.table((("foo", 1), ("bar", 2), ("car", )))
def test_table_bad_columns(): with pytest.raises(latex.Error): latex.table((("foo", 1), ("bar", 2)), columns=("type", "value", "too", "many", "values"))
def test_table_bad_rows(self): with self.assertRaises(latex.Error): latex.table((("foo", 1), ("bar", 2), ("car",)))
def test_table_bad_columns(self): with self.assertRaises(latex.Error): latex.table((("foo", 1), ("bar", 2)), columns=("type", "value", "too", "many", "values"))
def visualise_classification_job(db, job): basedir = "img/classification/{}/".format(job) fs.mkdir(basedir) fs.mkdir(basedir + "classifiers") fs.mkdir(basedir + "err_fns") visualise.err_fn_performance(db, basedir + "err_fns.png", job=job) # Bar plot of all results. visualise.classification(db, "img/classification/{}.png".format(job), job=job) # Per-classifier plots. for i, classifier in enumerate(db.classification_classifiers): visualise.classifier_speedups(db, classifier, basedir + "classifiers/{}.png".format(i), job=job) # Per-err_fn plots. for err_fn in db.err_fns: visualise.err_fn_speedups(db, err_fn, basedir + "err_fns/{}.png".format(err_fn), job=job, sort=True) # Results table. query = db.execute( "SELECT classifier,err_fn,Count(*) AS count\n" "FROM classification_results\n" "WHERE job=? GROUP BY classifier,err_fn", (job, )) results = [] for classifier, err_fn, count in query: correct, illegal, refused, performance, speedup = zip(*[ row for row in db.execute( "SELECT correct,illegal,refused,performance,speedup\n" "FROM classification_results\n" "WHERE job=? AND classifier=? AND err_fn=?", (job, classifier, err_fn)) ]) results.append([ classifier, err_fn, (sum(correct) / count) * 100, (sum(illegal) / count) * 100, (sum(refused) / count) * 100, min(performance) * 100, labmath.geomean(performance) * 100, max(performance) * 100, min(speedup), labmath.geomean(speedup), max(speedup) ]) str_args = {"float_format": lambda f: "{:.2f}".format(f)} for i in range(len(results)): results[i][0] = ml.classifier_basename(results[i][0]) columns = ( "CLASSIFIER", "ERR_FN", "ACC %", "INV %", "REF %", "Omin %", "Oavg %", "Omax %", "Smin", "Savg", "Smax", ) latex.table(results, output=fs.path(experiment.TAB_ROOT, job + ".tex"), columns=columns, **str_args)