def test_ridge_reg_state(self): data = datasets['dumb'] clf = RidgeReg() clf.train(data) clf.ca.enable('predictions') p = clf.predict(data.samples) self.assertTrue((p == clf.ca.predictions).all())
def test_ridge_reg(self): # not the perfect dataset with which to test, but # it will do for now. data = datasets['dumb'] clf = RidgeReg() clf.train(data) # prediction has to be almost perfect # test with a correlation pre = clf.predict(data.samples) cor = pearsonr(pre,data.targets) self.assertTrue(cor[0] > .8)
def test_ridge_reg(self): # not the perfect dataset with which to test, but # it will do for now. data = datasets['dumb'] clf = RidgeReg() clf.train(data) # prediction has to be almost perfect # test with a correlation pre = clf.predict(data.samples) cor = pearsonr(pre, data.targets) self.assertTrue(cor[0] > .8)
ds.sa['subject'] = np.repeat(index, len(ds)) #ds.fa['parcel'] = msk_data ds_all.append(ds) verbose(2, "subject %i of %i loaded" % (index, nsubs)) fds = vstack(ds_all) #stack datasets #classifier algorithm if clf_type is 'SVM': clf = LinearCSVMC(tube_epsilon=0.01) elif clf_type is 'SVM-rbf': clf = RbfCSVMC(tube_epsilon=0.01) elif clf_type is 'ridgeReg': clf = RidgeReg() elif clf_type is 'gpr': clf = GPR() # #feature selection # fsel = SensitivityBasedFeatureSelection( # OneWayAnova(), # FractionTailSelector(0.05, mode='select', tail='upper')) # fclf = FeatureSelectionClassifier(clf, fsel) #cross-validation algorithm if cv_type is 'split_half': cv = CrossValidation(clf, HalfPartitioner(count=2, selection_strategy='random', attr='subject'), errorfx=mean_match_accuracy)