def _eval_ds(self, ds, pgrades): kappa = KappaScore(ds.getGrades(), pgrades) print "Kappa Score %f" % kappa.quadratic_weighted_kappa() ds_str = "" if ds.isTrainSet(): ds_str = "train" feat_mat = self.train_feat_mat else: ds_str = "test" feat_mat = self.test_feat_mat real_pgrades = [self.model.predict(x) for x in feat_mat] i = 0 lines = ds.getRawText() f = open("output/diffs.set%d.domain%d.%s" % (ds.getEssaySet(), ds.getDomain(), ds_str), "w") f.write("#real_diff\tresolved_diff\tgt_grade\tpred_score\tpred_grade\tessay\n") for grade in ds.getGrades(): pgrade = pgrades[i] real_pgrade = real_pgrades[i] line = lines[i] f.write( "%f\t%d\t%d\t%f\t%d\t%s\n" % (math.fabs(real_pgrade - float(grade)), math.fabs(pgrade - grade), grade, real_pgrade, pgrade, line) ) i += 1 return kappa
def test_confusion_matrix(self): score = KappaScore([1,2],[1,2]) conf_mat = score.confusion_matrix() self.assertEqual(conf_mat,[[1,0],[0,1]]) score = KappaScore([1,2],[1,2],0,2) conf_mat = score.confusion_matrix() self.assertEqual(conf_mat,[[0,0,0],[0,1,0],[0,0,1]]) score = KappaScore([1,1,2,2,4],[1,1,3,3,5]) conf_mat = score.confusion_matrix() self.assertEqual(conf_mat,[[2,0,0,0,0],[0,0,2,0,0],[0,0,0,0,0], [0,0,0,0,1],[0,0,0,0,0]]) score = KappaScore([1,2],[1,2],1,4) conf_mat = score.confusion_matrix() self.assertEqual(conf_mat,[[1,0,0,0],[0,1,0,0],[0,0,0,0],[0,0,0,0]])