def run_LDA(X, y, num_components): lda = sklearn.lda.LDA(n_components=num_components) lda_array = lda.fit(X, y).transform(X) return lda, lda_array
def _calculate(self, X, y, categorical): import sklearn.lda if len(y.shape) == 1 or y.shape[1] == 1: kf = sklearn.cross_validation.StratifiedKFold(y, n_folds=10) else: kf = sklearn.cross_validation.KFold(y.shape[0], n_folds=10) accuracy = 0. try: for train, test in kf: lda = sklearn.lda.LDA() if len(y.shape) == 1 or y.shape[1] == 1: lda.fit(X[train], y[train]) else: lda = OneVsRestClassifier(lda) lda.fit(X[train], y[train]) predictions = lda.predict(X[test]) accuracy += sklearn.metrics.accuracy_score(predictions, y[test]) return accuracy / 10 except scipy.linalg.LinAlgError as e: self.logger.warning("LDA failed: %s Returned 0 instead!" % e) return np.NaN except ValueError as e: self.logger.warning("LDA failed: %s Returned 0 instead!" % e) return np.NaN
def _calculate(self, X, y, categorical): import sklearn.lda if len(y.shape) == 1 or y.shape[1] == 1: kf = sklearn.cross_validation.StratifiedKFold(y, n_folds=10) else: kf = sklearn.cross_validation.KFold(y.shape[0], n_folds=10) accuracy = 0. try: for train, test in kf: lda = sklearn.lda.LDA() if len(y.shape) == 1 or y.shape[1] == 1: lda.fit(X[train], y[train]) else: lda = OneVsRestClassifier(lda) lda.fit(X[train], y[train]) predictions = lda.predict(X[test]) accuracy += sklearn.metrics.accuracy_score( predictions, y[test]) return accuracy / 10 except scipy.linalg.LinAlgError as e: self.logger.warning("LDA failed: %s Returned 0 instead!" % e) return np.NaN except ValueError as e: self.logger.warning("LDA failed: %s Returned 0 instead!" % e) return np.NaN
def _calculate(self, X, y, categorical): import sklearn.neighbors kf = sklearn.cross_validation.StratifiedKFold(y, n_folds=10) accuracy = 0. for train, test in kf: lda = sklearn.neighbors.KNeighborsClassifier(1) lda.fit(X[train], y[train]) predictions = lda.predict(X[test]) accuracy += sklearn.metrics.accuracy_score(predictions, y[test]) return accuracy / 10
def _calculate(self, X, y, categorical): import sklearn.lda kf = sklearn.cross_validation.StratifiedKFold(y, n_folds=10) accuracy = 0. try: for train, test in kf: lda = sklearn.lda.LDA() lda.fit(X[train], y[train]) predictions = lda.predict(X[test]) accuracy += sklearn.metrics.accuracy_score(predictions, y[test]) return accuracy / 10 except scipy.linalg.LinAlgError as e: logging.warning("LDA failed: %s Returned 0 instead!" % e) return 0 except ValueError as e: logging.warning("LDA failed: %s Returned 0 instead!" % e) return 0
def run_LDA(X, y, num_components): # 3 # LDA가 PCA방식으로 차원을 낮추는 것 보다 더 성능이 더 좋다. # 각 클래스 정보에 맞는 방법을 찾기 때문.(PCA는 이와 관계 없이 일괄적이다.) X = np.array(X) y = np.array(y) lda = sklearn.lda.LDA(n_components=num_components) lda_array = lda.fit(X, y).transform(X) return lda, lda_array
def run_LDA(X, y, num_components): # 2 lda = sklearn.lda.LDA(n_components=num_components) lda_array = lda.fit(X, y).transform(X) return lda, lda_array
def run_LDA(X, y, num_components): # 3 lda = sklearn.lda.LDA(n_components=num_components) # 축소할 차원 set lda_array = lda.fit(X, y).transform(X) # DF-data 와 클래스 정보까지 고려하여 fitting 시킨다. return lda, lda_array