def test_glmnet_r_sensitivities(): data = datasets['chirp_linear'] clf = GLMNET_R() clf.train(data) # now ask for the sensitivities WITHOUT having to pass the dataset # again sens = clf.get_sensitivity_analyzer(force_train=False)(None) assert_equal(sens.shape, (1, data.nfeatures))
def test_glmnet_state(): #data = datasets['dumb2'] # for some reason the R code fails with the dumb data data = datasets['chirp_linear'] clf = GLMNET_R() clf.train(data) clf.ca.enable('predictions') p = clf.predict(data.samples) assert_array_equal(p, clf.ca.predictions)
def test_glmnet_r(): # not the perfect dataset with which to test, but # it will do for now. #data = datasets['dumb2'] # for some reason the R code fails with the dumb data data = datasets['chirp_linear'] clf = GLMNET_R() clf.train(data) # prediction has to be almost perfect # test with a correlation pre = clf.predict(data.samples) corerr = corr_error(pre, data.targets) if cfg.getboolean('tests', 'labile', default='yes'): assert_true(corerr < .2)
# clfswh += MulticlassClassifier(lars, # descr='Multiclass %s' % lars.descr) ## Still fails unittests battery although overhauled otherwise. ## # enet from R via RPy2 ## if externals.exists('elasticnet'): ## from mvpa2.clfs.enet import ENET ## clfswh += RegressionAsClassifier(ENET(), ## descr="RegressionAsClassifier(ENET())") ## regrswh += ENET(descr="ENET()") # glmnet from R via RPy if externals.exists('glmnet'): from mvpa2.clfs.glmnet import GLMNET_C, GLMNET_R clfswh += GLMNET_C(descr="GLMNET_C()") regrswh += GLMNET_R(descr="GLMNET_R()") # LDA/QDA clfswh += LDA(descr='LDA()') clfswh += QDA(descr='QDA()') if externals.exists('skl'): _skl_version = externals.versions['skl'] _skl_api09 = _skl_version >= '0.9' def _skl_import(submod, class_): if _skl_api09: submod_ = __import__('sklearn.%s' % submod, fromlist=[submod]) else: submod_ = __import__('scikits.learn.%s' % submod, fromlist=[submod])