def test_plr(self): data = datasets['dumb2'] clf = PLR() clf.train(data) # prediction has to be perfect self.failUnless((clf.predict(data.samples) == data.targets).all())
def test_plr(self): data = datasets['dumb2'] clf = PLR() clf.train(data) # prediction has to be perfect self.assertTrue((clf.predict(data.samples) == data.targets).all())
def test_plr_state(self): data = datasets['dumb2'] clf = PLR() clf.train(data) clf.ca.enable('estimates') clf.ca.enable('predictions') p = clf.predict(data.samples) self.failUnless((p == clf.ca.predictions).all()) self.failUnless(np.array(clf.ca.estimates).shape == np.array(p).shape)
def test_plr_state(self): data = datasets['dumb2'] clf = PLR() clf.train(data) # Also get "sensitivity". Was introduced to check a bug with # processing dataset with numeric labels sa = clf.get_sensitivity_analyzer() sens = sa(data) clf.ca.enable('estimates') clf.ca.enable('predictions') p = clf.predict(data.samples) self.assertTrue((p == clf.ca.predictions).all()) self.assertTrue(np.array(clf.ca.estimates).shape == np.array(p).shape)