def compute(self, X, y): [D, self.W, self.mu] = fisherfaces(asRowMatrix(X), y, self.num_components) # store labels self.y = y # store projections for xi in X: self.projections.append(project(self.W, xi.reshape(1, -1), self.mu))
def compute(self, X, y): [D, self.W, self.mu] = fisherfaces(asRowMatrix(X),y, self.num_components) # store labels self.y = y # store projections for xi in X: self.projections.append(project(self.W, xi.reshape(1,-1), self.mu))
def compute(self, X, y): self.logger.debug("SVM TRAINING (C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % ( self.param.C, self.param.gamma, self.param.p, self.param.nu, self.param.coef0, self.param.degree)) # turn data into a row vector (needed for libsvm) X = asRowMatrix(X) y = np.asarray(y) problem = svm_problem(y, X.tolist()) self.svm = svm_train(problem, self.param) self.y = y
def grid_search(model, X, y, tuned_parameters): # Check if the Classifier in the Model is actually an SVM: if not isinstance(model.classifier, SVM): raise TypeError("classifier must be of type SVM!") # First compute the features for this SVM-based model: features = model.feature.compute(X, y) # Turn the List of Features into a matrix with each feature as Row: Xrow = asRowMatrix(features) # Split the dataset in two equal parts X_train, X_test, y_train, y_test = train_test_split(Xrow, y, test_size=0.5, random_state=0) # Define the Classifier: scores = ['precision', 'recall'] # Evaluate the Model: for score in scores: print("# Tuning hyper-parameters for %s" % score) print() clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_macro' % score) clf.fit(X_train, y_train) print("Best parameters set found on development set:") print() print(clf.best_params_) print() print("Grid scores on development set:") print() means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_true, y_pred = y_test, clf.predict(X_test) print(classification_report(y_true, y_pred)) print()
def compute(self, X, y): if not self.W and not self.mu: # 主成分分析,获取特征值,特征向量,和平均值 [D, self.W, self.mu] = fisherfaces(asRowMatrix(X), y, self.num_components) print "特征值,特征向量,和平均值 计算完毕...." # store labels # 识别的类别存放的地方 self.y = y self.X = X # store projections for xi in X: # 预处理 # 将图像与特征向量做点积 self.projections.append(project(self.W, xi.reshape(1, -1), self.mu)) print "预处理完毕..."
def compute(self, X, y): [D, self.W, self.mu] = pca(asRowMatrix(X), y, self.num_components) self.y = y for xi in X: self.projections.append(project(self.W, xi.reshape(1, -1), self.mu))
import sys import numpy as np from subspace import pca from util import normalize, asRowMatrix, read_images from visual import subplot import matplotlib.cm as cm [X, y] = read_images('./faces') print(np.asarray(X).shape) print(asRowMatrix(X).shape) # [D, W, mu] = pca(asRowMatrix(X), y) # E=[] # for i in xrange(min(len(X), 16)): # e = W[:, i].reshape(X[0].shape) # E.append(normalize(e, 0, 255)) # subplot(title='Eigenface', images=E, rows=4, cols=4, sptitle='Eigenface', colormap=cm.jet, filename='python_pca_eigenfaces.png')
def train(self, X, y): [D, self.W, self.mu] = PCA(asRowMatrix(X),y) self.y = y for xi in self.X: self.projections.append(project(self.W, xi.reshape(1,-1), self.mu))
def compute(self, X, y): X = asRowMatrix(X) y = np.asarray(y) self.svm.fit(X, y) self.y = y