def test_smlr_sensitivities(self): data = normal_feature_dataset(perlabel=10, nlabels=2, nfeatures=4) # use SMLR on binary problem, but not fitting all weights clf = SMLR(fit_all_weights=False) clf.train(data) # now ask for the sensitivities WITHOUT having to pass the dataset # again sens = clf.get_sensitivity_analyzer(force_train=False)(None) self.assertTrue(sens.shape == (len(data.UT) - 1, data.nfeatures))
def test_smlr_state(self): data = datasets["dumb"] clf = SMLR() clf.train(data) clf.ca.enable("estimates") clf.ca.enable("predictions") p = np.asarray(clf.predict(data.samples)) self.assertTrue((p == clf.ca.predictions).all()) self.assertTrue(np.array(clf.ca.estimates).shape[0] == np.array(p).shape[0])
def test_smlr_state(): data = datasets['dumb'] clf = SMLR() clf.train(data) clf.ca.enable('estimates') clf.ca.enable('predictions') p = np.asarray(clf.predict(data.samples)) assert_array_equal(p, clf.ca.predictions) assert_equal(np.array(clf.ca.estimates).shape[0], np.array(p).shape[0])
def test_smlr_state(self): data = datasets['dumb'] clf = SMLR() clf.train(data) clf.ca.enable('estimates') clf.ca.enable('predictions') p = np.asarray(clf.predict(data.samples)) self.failUnless((p == clf.ca.predictions).all()) self.failUnless(np.array(clf.ca.estimates).shape[0] == np.array(p).shape[0])
def test_smlr_state(self): data = datasets['dumb'] clf = SMLR() clf.train(data) clf.ca.enable('estimates') clf.ca.enable('predictions') p = np.asarray(clf.predict(data.samples)) self.assertTrue((p == clf.ca.predictions).all()) self.assertTrue( np.array(clf.ca.estimates).shape[0] == np.array(p).shape[0])
def train_readout_mnlogit(stimset, samples): (ds_train, ds_valid) = to_mvpa_dataset(stimset, samples) clf = SMLR() clf.train(ds_train) preds = clf.predict(ds_valid) actual = ds_valid.sa['targets'] zeq = np.array([a == p for (a,p) in zip(actual, preds)]) nc = float(len((zeq == True).nonzero()[0])) #print '%d correct out of %d' % (nc, len(preds)) percent_correct = nc / float(len(preds)) #print 'SMLogit Percent Correct: %0.3f' % percent_correct return percent_correct
def test_smlr(self): data = datasets["dumb"] clf = SMLR() clf.train(data) # prediction has to be perfect # # XXX yoh: whos said that?? ;-) # # There is always a tradeoff between learning and # generalization errors so... but in this case the problem is # more interesting: absent bias disallows to learn data you # have here -- there is no solution which would pass through # (0,0) predictions = clf.predict(data.samples) self.assertTrue((predictions == data.targets).all())
def test_smlr(self): data = datasets['dumb'] clf = SMLR() clf.train(data) # prediction has to be perfect # # XXX yoh: whos said that?? ;-) # # There is always a tradeoff between learning and # generalization errors so... but in this case the problem is # more interesting: absent bias disallows to learn data you # have here -- there is no solution which would pass through # (0,0) predictions = clf.predict(data.samples) self.assertTrue((predictions == data.targets).all())