def evaluateCvOuter(self, X, y, folds): """ Computer the average AUC using k-fold cross validation and the linear kernel. """ Parameter.checkInt(folds, 2, float('inf')) idx = cross_val.StratifiedKFold(y, folds) metricMethods = [Evaluator.auc2, Evaluator.roc] if self.kernel == "linear": logging.debug("Running linear rank SVM ") trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2( X, y, idx, self.modelSelectLinear, self.predict, metricMethods) elif self.kernel == "rbf": logging.debug("Running RBF rank SVM") trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2( X, y, idx, self.modelSelectRBF, self.predict, metricMethods) bestTrainAUCs = trainMetrics[0] bestTrainROCs = trainMetrics[1] bestTestAUCs = testMetrics[0] bestTestROCs = testMetrics[1] bestParams = {} bestMetaDicts = {} allMetrics = [bestTrainAUCs, bestTrainROCs, bestTestAUCs, bestTestROCs] return (bestParams, allMetrics, bestMetaDicts)
def evaluateCvOuter(self, X, y, folds): """ Computer the average AUC using k-fold cross validation and the linear kernel. """ Parameter.checkInt(folds, 2, float('inf')) idx = cross_val.StratifiedKFold(y, folds) metricMethods = [Evaluator.auc2, Evaluator.roc] if self.kernel == "linear": logging.debug("Running linear rank SVM ") trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2(X, y, idx, self.modelSelectLinear, self.predict, metricMethods) elif self.kernel == "rbf": logging.debug("Running RBF rank SVM") trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2(X, y, idx, self.modelSelectRBF, self.predict, metricMethods) bestTrainAUCs = trainMetrics[0] bestTrainROCs = trainMetrics[1] bestTestAUCs = testMetrics[0] bestTestROCs = testMetrics[1] bestParams = {} bestMetaDicts = {} allMetrics = [bestTrainAUCs, bestTrainROCs, bestTestAUCs, bestTestROCs] return (bestParams, allMetrics, bestMetaDicts)
def evaluateCvOuter(self, X, y, folds): """ Computer the average AUC using k-fold cross validation and the linear kernel. """ Parameter.checkInt(folds, 2, float('inf')) idx = StratifiedKFold(y, folds) metricMethods = [Evaluator.auc2, Evaluator.roc] trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2(X, y, idx, self.modelSelect, self.predict, metricMethods) bestTrainAUCs = trainMetrics[0] bestTrainROCs = trainMetrics[1] bestTestAUCs = testMetrics[0] bestTestROCs = testMetrics[1] bestParams = {} bestMetaDicts = {} allMetrics = [bestTrainAUCs, bestTrainROCs, bestTestAUCs, bestTestROCs] return (bestParams, allMetrics, bestMetaDicts)