def test_lars_sensitivities(self): data = datasets['chirp_linear'] # use LARS on binary problem clf = LARS() clf.train(data) # now ask for the sensitivities WITHOUT having to pass the dataset # again sens = clf.get_sensitivity_analyzer(force_train=False)(None) self.failUnless(sens.shape == (1, data.nfeatures))
def test_lars_sensitivities(self): data = datasets['chirp_linear'] # use LARS on binary problem clf = LARS() clf.train(data) # now ask for the sensitivities WITHOUT having to pass the dataset # again sens = clf.get_sensitivity_analyzer(force_train=False)(None) self.assertTrue(sens.shape == (1, data.nfeatures))
def test_lars_state(self): #data = datasets['dumb2'] # for some reason the R code fails with the dumb data data = datasets['chirp_linear'] clf = LARS() clf.train(data) clf.ca.enable('predictions') p = clf.predict(data.samples) self.assertTrue((p == clf.ca.predictions).all())
def test_lars_state(self): #data = datasets['dumb2'] # for some reason the R code fails with the dumb data data = datasets['chirp_linear'] clf = LARS() clf.train(data) clf.ca.enable('predictions') p = clf.predict(data.samples) self.failUnless((p == clf.ca.predictions).all())
def test_lars(self): # not the perfect dataset with which to test, but # it will do for now. #data = datasets['dumb2'] # for some reason the R code fails with the dumb data data = datasets['chirp_linear'] clf = LARS() clf.train(data) # prediction has to be almost perfect # test with a correlation pre = clf.predict(data.samples) cor = pearsonr(pre, data.targets) if cfg.getboolean('tests', 'labile', default='yes'): self.assertTrue(cor[0] > .8)
def test_lars(self): # not the perfect dataset with which to test, but # it will do for now. #data = datasets['dumb2'] # for some reason the R code fails with the dumb data data = datasets['chirp_linear'] clf = LARS() clf.train(data) # prediction has to be almost perfect # test with a correlation pre = clf.predict(data.samples) cor = pearsonr(pre, data.targets) if cfg.getboolean('tests', 'labile', default='yes'): self.failUnless(cor[0] > .8)
#sg.SVM(svm_impl=impl, kernel_type='RBF', # descr='sg.RBFSVMR()/%s' % impl), ] if len(clfswh['svm', 'linear']) > 0: # if any SVM implementation is known, import default ones from mvpa2.clfs.svm import * # lars from R via RPy if externals.exists('lars'): import mvpa2.clfs.lars as lars from mvpa2.clfs.lars import LARS for model in lars.known_models: # XXX create proper repository of classifiers! lars_clf = RegressionAsClassifier( LARS(descr="LARS(%s)" % model, model_type=model), descr='LARS(model_type=%r) classifier' % model) clfswh += lars_clf # is a regression, too lars_regr = LARS(descr="_LARS(%s)" % model, model_type=model) regrswh += lars_regr # clfswh += MulticlassClassifier(lars, # descr='Multiclass %s' % lars.descr) ## Still fails unittests battery although overhauled otherwise. ## # enet from R via RPy2 ## if externals.exists('elasticnet'): ## from mvpa2.clfs.enet import ENET ## clfswh += RegressionAsClassifier(ENET(), ## descr="RegressionAsClassifier(ENET())")