def ReduceDimensionality(trainData, testData): ## Reduce the dimensionality of the data. mean, eigenvecs = cv2.PCACompute(trainData) # Looks like keeping ~50 eigenvectors is about the most optimal classification for # the ocr10 dataset. mean, eigenvecs = cv2.PCACompute(trainData, mean, eigenvecs, 50) projectTraining = cv2.PCAProject(trainData, mean, eigenvecs) projectTest = cv2.PCAProject(testData, mean, eigenvecs) return projectTraining, projectTest
def performPCA(images): # Allocate space for all images in one data matrix. The size of the data matrix is # ( w * h * c, numImages ) where, w = width of an image in the dataset. # h = height of an image in the dataset. c is for the number of color channels. numImages = len(images) sz = images[0].shape channels = 1 # grayescale data = np.zeros((numImages, sz[0] * sz[1] * channels), dtype=np.float32) # store images as floating point vectors normalized 0 -> 1 for i in range(0, numImages): image = np.float32(images[i])/255.0 data[i,:] = image.flatten() # N.B. data is stored as rows # compute the eigenvectors from the stack of image vectors created mean, eigenVectors = cv2.PCACompute(data, mean=None, maxComponents=args.eigenfaces) # use the eigenvectors to project the set of images to the new PCA space representation coefficients = cv2.PCAProject(data, mean, eigenVectors) # calculate the covariance and mean of the PCA space representation of the images # (skipping the first N eigenfaces that often contain just illumination variance, default N=3 ) covariance_coeffs, mean_coeffs = cv2.calcCovarMatrix(coefficients[:,args.eigenfaces_to_skip:args.eigenfaces], mean=None, flags=cv2.COVAR_NORMAL | cv2.COVAR_ROWS, ctype = cv2.CV_32F) return (mean, eigenVectors, coefficients, mean_coeffs, covariance_coeffs)
def is_line(image): if not have_solid_field(image): return False pixels = np.vstack(image.nonzero()).transpose().astype(np.float32) mean, eigenvectors = cv2.PCACompute(pixels, mean=None) projects = cv2.PCAProject(pixels, mean, eigenvectors) return np.std(projects, axis=0)[1] < 1
def doThePCA(dirFaces): testMatrix = None folders = os.listdir(dirFaces) labels = [] for folder in folders: faces = os.listdir(dirFaces + "/" + folder) print(dirFaces + folder) for face in faces: img = cv2.imread(dirFaces + folder + "/" + face) if img is None: print("¯\_(ツ)_/¯ Unable to load " + dirFaces + folder + "/" + face) continue gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) size = gray.shape w = size[0] h = size[1] grayVector = gray.reshape(w * h) try: testMatrix = np.vstack((testMatrix, grayVector)) labels.append(folder + "/" + face) except: testMatrix = grayVector print(len(grayVector)) labels.append(folder + "/" + face) print("Computing mean and Eigen vectors") mean, eigenVectors = cv2.PCACompute(testMatrix, mean=None, maxComponents=len(testMatrix)) # averageFace = mean.reshape(size) # cv2.imwrite(dirFaces+"/average.jpg", averageFace) print("Computing weights") all = cv2.PCAProject(testMatrix, mean, eigenVectors) picklePCA(all, labels) return all
def project(self, images_features): assert images_features.ndim == 4 data = np.reshape(images_features, (-1, images_features.shape[3])) coeffs = cv2.PCAProject(data, self.mean, self.eigen_vecs) re_shape = list(images_features.shape) re_shape[3] = len(self.eigen_vecs) re_features = np.reshape(coeffs, re_shape) return re_features
def project_shape_to_param(self, shape_vec): """ projects shape vector to ASM parameters :type shape_vec: ShapeVector :return: vector of parameters :rtype: numpy.ndarray """ return cv.PCAProject(shape_vec, self.pca_shape, self.eigenvectors)
def create_descriptors_pca(self, dim=90): ''' 计算描述子pca :param dim: :return: ''' print("start create_descriptors_pca ...") query = DB.DescriptorModel.select( DB.DescriptorModel.id, DB.DescriptorModel.descriptor).tuples().iterator() features = numpy.array(map(lambda x: [x[0]] + list(x[1]), query)) print("create_descriptors_pca,count=%d,dim=%d" % (len(features), dim)) start = time() print("build eigenvectors start time %s" % start) mean, eigenvectors = cv2.PCACompute(features[:, 1:], None, maxComponents=dim) fitted = cv2.PCAProject(features[:, 1:], mean, eigenvectors) #pca = PCA(n_components=dim) #fitted = pca.fit_transform(features[:,1:]) print("build eigenvectors cost time %s" % (time() - start)) print("saving data ...") #scaler = preprocessing.MinMaxScaler() #pca = scaler.fit_transform(pca) DB.db.connect() with DB.db.transaction(): DB.PcaModel.drop_table(fail_silently=True) DB.PcaModel.create_table() #res = DB.TrainingResult() #res.name = "daisy_pca" #res.data = pca #res.save() for i in range(0, len(fitted)): model = DB.PcaModel() model.pca = fitted[i] model.feature = features[i][0] model.save() DB.TrainingResult.delete().where( DB.TrainingResult.name == "pca_mean").execute() DB.TrainingResult.delete().where( DB.TrainingResult.name == "pca_eigenvectors").execute() tr = DB.TrainingResult() tr.name = "pca_mean" tr.data = mean tr.save() tr = DB.TrainingResult() tr.name = "pca_eigenvectors" tr.data = eigenvectors tr.save() print("create_descriptors_pca done")
def pca_reduce(lmls,num_comp): lms = np.copy(lmls).T eigenvals, eigenvecs, mean_n = pca(lms,num_comp) mean_n = np.reshape(mean_n,(mean_n.shape[0],1)) eigenvals = np.reshape(eigenvals,(eigenvals.shape[0],1)) terms = cv2.PCAProject(lms.T,mean_n,eigenvecs) return mean_n, eigenvecs.T, eigenvals, terms
def calc_descriptor(self, image): ldescriptors = self.local_feature.calc_descriptors(image=image) if ldescriptors is None: return None raw_gdescriptor = self.global_feature.calc_descriptor( ldescriptors=ldescriptors) pca_gdescriptor = cv2.PCAProject(data=raw_gdescriptor.reshape(1, -1), mean=self.pca_mean, eigenvectors=self.pca_eigenvectors) return pca_gdescriptor.flatten()
def getPCADescriptors(target, PCABaseFileName, maxComponents): Timer.start('getPCADescriptors') PCABase = np.load(PCABaseFileName) meanPCA, eigenvectorsPCA = PCABase descriptors32x32 = np.load( getDir(target, 'resized') + 'descriptors32x32.npy') descriptorsPCA = cv2.PCAProject(descriptors32x32, meanPCA, eigenvectorsPCA) np.save( getDir(target, 'resized') + str(maxComponents) + 'descriptorsPCA.npy', descriptorsPCA) Timer.stop('getPCADescriptors')
def extractFeature(self, im): try: feature = self.__extracter.extractFeature(im, "classifier") im_1 = np.flip(im, axis=1) feature_1 = self.__extracter.extractFeature(im_1, "classifier") result = np.hstack([feature, feature_1]) result = result.reshape(1, -1) res = cv2.PCAProject(result, self.__pca_mean, self.__pca_eigen_vector) return res.reshape(-1).tolist() except: return []
def HOG_PCA_Matrix(dataset, pokemon): originalFeatureSize = computeHOG(dataset[0]).shape[0] hogMatrix = np.empty((dataset.shape[0], originalFeatureSize)) for i in range(0, dataset.shape[0]): hist = computeHOG(dataset[i]) for k in range(0, originalFeatureSize): hogMatrix[i][k] = hist[k][0] #Run PCA to reduce feature dimensions, and save it for future use mean, eigenVectors = cv.PCACompute(hogMatrix, mean=None, maxComponents=240) hogMatrix = cv.PCAProject(hogMatrix, mean, eigenVectors) PCAMeanOutput = pcaFolderPath + pokemon + 'PCA_Mean.joblib' dump(mean, PCAMeanOutput) PCAEigenOutput = pcaFolderPath + pokemon + 'PCA_Eigen.joblib' dump(eigenVectors, PCAEigenOutput) return hogMatrix
def trainInitialSVM(): train_data, labels = getTrainData(hog, ".\/img\pos\*.jpg", ".\/img\/neg\*.jpg") mean_input = np.mean(train_data, axis=0).reshape(1, -1) # print(mean_input.shape) mean, eigenvectors = cv2.PCACompute(train_data, mean_input, cv2.PCA_DATA_AS_ROW, 512) # np.save("./hog_descriptors", train_data) np.save("./labels", labels) np.save("./pca_eigenvectors", eigenvectors) # np.save("./pca_mean", mean) # train_data = np.load("./hog_descriptors.npy") # labels = np.load("./labels.npy") # eigenvectors = np.load("./pca_eigenvectors.npy") # mean = np.load("./pca_mean.npy") projection = cv2.PCAProject(train_data, mean, eigenvectors) np.save('./pca_projection', projection)
def feature_pca(feature, pca_file, train_file): """ get dimension-reduced feature by Principle Component Analysis\n feature: numpy array, each row is a data point """ mean = np.array([]) eigenvectors = np.array([]) if not os.path.isfile(pca_file): mean, eigenvectors = train_pca(train_file) np.savez(pca_file, m=mean, v=eigenvectors) else: data = np.load(pca_file) mean = np.array(data['m'], data['m'].dtype) eigenvectors = np.array(data['v'], data['v'].dtype) data.close() print "load pca model: ", mean.shape, eigenvectors.shape result = cv2.PCAProject(feature, mean, eigenvectors) return result
def get_curve(data, E=0.001, s=500): mean, px = cv2.PCACompute(data=data, mean=np.array([data.mean(axis=0)])) new_data = cv2.PCAProject(data, mean, px) new_data = np.array(sorted(new_data, key=lambda x: x[0])) def dist(t1, t2, sigma): return exp(-(t2[0] - t1[0])**2 / (2 * sigma)) left_index = 0 right_index = 0 index = 0 all_px = new_data.shape[0] curve = [] while index < all_px: is_ = False while (right_index < all_px) and (dist(new_data[index], new_data[right_index], s) > E): right_index += 1 while (left_index < index) and (dist(new_data[index], new_data[left_index], s) < E): left_index += 1 up = np.sum(np.array([ x * dist(x, new_data[index], s) for x in new_data[left_index:right_index] ]), axis=0) down = np.sum(np.array([ dist(x, new_data[index], s) for x in new_data[left_index:right_index] ]), axis=0) res = up / down curve.append(res) index += 1 return cv2.PCABackProject(np.array(curve), mean, px)
def computeFeatures(img): # Reshape for PCA copy = img.reshape(-1, 3).copy() # PCA mean, eigenvectors = cv2.PCACompute(copy, np.array([]), maxComponents=3) pca = cv2.PCAProject(copy, mean, eigenvectors) pca = pca.reshape(img.shape) # Extract first component pca = pca[:, :, 0] pca = (pca - np.min(pca)) / (np.max(pca) - np.min(pca)) * 255 pca = cv2.convertScaleAbs(pca) # Truncate all sides by 5% since they tend to be background pca = pca[round(pca.shape[0] * 0.05):round(pca.shape[0] * 0.95), round(pca.shape[1] * 0.05):round(pca.shape[1] * 0.95)] # Extract SIFT features and descriptors (frame, desc) = cysift.sift(pca, peak_thresh=10, edge_thresh=20, compute_descriptor=True) # CLAHE clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8)) pca = clahe.apply(pca) # Load codebook codebook = pickle.load(open("codebook.pkl", "rb")) # Construct visual word histogram code, distortion = vq(desc, codebook) featvect, bins = np.histogram(code, codebook.shape[0], normed=True) return featvect
def get_curve(data, E=0.001, s=200): mean, px = cv2.PCACompute(data=data, mean=np.array([data.mean(axis=0)])) new_data = cv2.PCAProject(data, mean, px) new_data = np.array(sorted(new_data, key=lambda x: x[0])) def dist(t1, t2, sigma): return exp(-(t2 - t1)**2 / (2 * sigma)) left_index = 0 right_index = 0 all_px = new_data.shape[0] curve = [] is_ = 0 min_, max_ = int(min(new_data[:, 0])) - 1, int(max(new_data[:, 0])) + 1 for index in range(min_, max_, 10): is_ += 1 while (right_index < all_px) and (dist(index, new_data[right_index][0], s) > E): right_index += 1 while (left_index < is_) and (dist(index, new_data[left_index][0], s) < E): left_index += 1 up = np.sum(np.array([ x * dist(x[0], index, s) for x in new_data[left_index:right_index] ]), axis=0) down = np.sum(np.array( [dist(x[0], index, s) for x in new_data[left_index:right_index]]), axis=0) res = up / down curve.append(res) index += 1 return cv2.PCABackProject(np.array(curve), mean, px)
def __init__(self, image, parent=None): super(PcaWidget, self).__init__(parent) self.component_combo = QComboBox() self.component_combo.addItems([self.tr(f"#{i + 1}") for i in range(3)]) self.distance_radio = QRadioButton(self.tr("Distance")) self.distance_radio.setToolTip(self.tr("Distance from the closest point on selected component")) self.project_radio = QRadioButton(self.tr("Projection")) self.project_radio.setToolTip(self.tr("Projection onto the selected principal component")) self.crossprod_radio = QRadioButton(self.tr("Cross product")) self.crossprod_radio.setToolTip(self.tr("Cross product between input and selected component")) self.distance_radio.setChecked(True) self.last_radio = self.distance_radio self.invert_check = QCheckBox(self.tr("Invert")) self.invert_check.setToolTip(self.tr("Output bitwise complement")) self.equalize_check = QCheckBox(self.tr("Equalize")) self.equalize_check.setToolTip(self.tr("Apply histogram equalization")) rows, cols, chans = image.shape x = np.reshape(image, (rows * cols, chans)).astype(np.float32) mu, ev, ew = cv.PCACompute2(x, np.array([])) p = np.reshape(cv.PCAProject(x, mu, ev), (rows, cols, chans)) x0 = image.astype(np.float32) - mu self.output = [] for i, v in enumerate(ev): cross = np.cross(x0, v) distance = np.linalg.norm(cross, axis=2) / np.linalg.norm(v) project = p[:, :, i] self.output.extend([norm_mat(distance, to_bgr=True), norm_mat(project, to_bgr=True), norm_img(cross)]) table_data = [ [mu[0, 2], mu[0, 1], mu[0, 0]], [ev[0, 2], ev[0, 1], ev[0, 0]], [ev[1, 2], ev[1, 1], ev[1, 0]], [ev[2, 2], ev[2, 1], ev[2, 0]], [ew[2, 0], ew[1, 0], ew[0, 0]], ] table_widget = QTableWidget(5, 4) table_widget.setHorizontalHeaderLabels([self.tr("Element"), self.tr("Red"), self.tr("Green"), self.tr("Blue")]) table_widget.setItem(0, 0, QTableWidgetItem(self.tr("Mean vector"))) table_widget.setItem(1, 0, QTableWidgetItem(self.tr("Eigenvector 1"))) table_widget.setItem(2, 0, QTableWidgetItem(self.tr("Eigenvector 2"))) table_widget.setItem(3, 0, QTableWidgetItem(self.tr("Eigenvector 3"))) table_widget.setItem(4, 0, QTableWidgetItem(self.tr("Eigenvalues"))) for i in range(len(table_data)): modify_font(table_widget.item(i, 0), bold=True) for j in range(len(table_data[i])): table_widget.setItem(i, j + 1, QTableWidgetItem(str(table_data[i][j]))) # item = QTableWidgetItem() # item.setBackgroundColor(QColor(mu[0, 2], mu[0, 1], mu[0, 0])) # table_widget.setItem(0, 4, item) # table_widget.resizeRowsToContents() # table_widget.resizeColumnsToContents() table_widget.setEditTriggers(QAbstractItemView.NoEditTriggers) table_widget.setSelectionMode(QAbstractItemView.SingleSelection) table_widget.setMaximumHeight(190) self.viewer = ImageViewer(image, image, None) self.process() self.component_combo.currentIndexChanged.connect(self.process) self.distance_radio.clicked.connect(self.process) self.project_radio.clicked.connect(self.process) self.crossprod_radio.clicked.connect(self.process) self.invert_check.stateChanged.connect(self.process) self.equalize_check.stateChanged.connect(self.process) top_layout = QHBoxLayout() top_layout.addWidget(QLabel(self.tr("Component:"))) top_layout.addWidget(self.component_combo) top_layout.addWidget(QLabel(self.tr("Mode:"))) top_layout.addWidget(self.distance_radio) top_layout.addWidget(self.project_radio) top_layout.addWidget(self.crossprod_radio) top_layout.addWidget(self.invert_check) top_layout.addWidget(self.equalize_check) top_layout.addStretch() bottom_layout = QHBoxLayout() bottom_layout.addWidget(table_widget) main_layout = QVBoxLayout() main_layout.addLayout(top_layout) main_layout.addWidget(self.viewer) main_layout.addLayout(bottom_layout) self.setLayout(main_layout)
mu, eig = cv2.PCACompute(X, np.array([]), maxComponents=3) print(mu) print(eig) # Calcular eigenvalores (energia) XMedia0 = X - mu covar = np.dot(np.transpose(XMedia0), XMedia0) # print(covar.shape) eVal, eVec = cv2.eigen(covar)[1:] print('Eigenvalores') print(eVal.shape, eVec.shape) print(eVal, eVec) # Proyeccion PCA # print(mu[:,:2], eig[:2,:]) X2 = cv2.PCAProject(X, mu, eig) print(X2.shape) fig = plt.figure() ax = plt.axes(projection='3d') ax.scatter3D(X2[:, 0], X2[:, 1], X2[:, 2], c=yentrena, cmap=plt.cm.Paired) # print(np.zeros((90,1)).shape) # plt.scatter( X2[:,0], X2[:,1], c=yentrena, cmap=plt.cm.Paired) plt.xlabel("PC 1") plt.ylabel("PC 2") ax.set_zlabel('PC 3') # plt.axis( 'equal' ) plt.show()
x_ = np.vstack((x, y)).T # 按垂直方向(行顺序)堆叠数组构成一个新的数组 import cv2 mu, eig = cv2.PCACompute(x_, np.array([])) import matplotlib.pyplot as plt plt.style.use('ggplot') ''' plt.plot(x, y, 'o', zorder=1) plt.quiver(mean[0], mean[1], eig[:, 0], eig[:, 1], zorder=3, scale=0.2, units='xy') plt.text(mean[0] + 5 * eig[0, 0], mean[1] + 5 * eig[0, 1], 'u1', zorder = 5 , fontsize = 16, bbox = dict(facecolor='white', alpha=0.6)) plt.text(mean[0] + 7 * eig[1, 0], mean[1] + 4 * eig[1, 1], 'u1', zorder = 5 , fontsize = 16, bbox = dict(facecolor='white', alpha=0.6)) plt.axis([0, 40, 0, 40]) plt.xlabel('feature 1') plt.ylabel('feature 2') plt.show() ''' # cv2 旋转 x2 = cv2.PCAProject(x_, mu, eig) # plt.axis([-20, 20, -10, 10]) # sklearn 进行独立成分分析 from sklearn import decomposition ica = decomposition.FastICA() x2 = ica.fit_transform(x_) plt.plot(x2[:, 0], x2[:, 1], 'o') plt.xlabel('feature 1') plt.ylabel('feature 2') plt.axis([-0.2, 0.2, -0.2, 0.2]) plt.show()
def projecao_PCA(vetor_para_PCA, media, auto_vetores): projecao = cv.PCAProject(vetor_para_PCA, media, auto_vetores) return projecao
def pokePCA(hogMatrix, pokemon): PCA_Mean = load('svms/' + pokemon + 'PCA_Mean.joblib') PCA_Eigen = load('svms/' + pokemon + 'PCA_Eigen.joblib') pca = cv.PCAProject(hogMatrix, PCA_Mean, PCA_Eigen) return pca
def main(): if len(sys.argv) not in (3, 4): print('usage: python mnist_pca_knn.py [-i] PCA_K KNN_K') print() print('note: set PCA_K to 0 to disable PCA') print() sys.exit(1) args = sys.argv[1:] try: interactive_idx = args.index('-i') args.pop(interactive_idx) interactive = True except: interactive = False assert len(args) == 2 pca_k = int(args[0]) assert pca_k >= 0 and pca_k <= MNIST_DIMS knn_k = int(args[1]) assert knn_k > 0 and knn_k < 12 train_labels, train_images, test_labels, test_images = get_mnist_data() train_images = train_images.astype(np.float32) train_images = train_images.reshape(-1, MNIST_DIMS) # make row vectors if pca_k > 0: # note we could use cv2.PCACompute to do this but # instead we use a pre-computed eigen-decomposition # of the data if available mean, eigenvectors = load_precomputed_pca(train_images, pca_k) print('reducing dimensionality of training set...') train_vecs = cv2.PCAProject(train_images, mean, eigenvectors) print('done\n') else: mean = None eigenvectors = None train_vecs = train_images print('train_images:', train_images.shape) print('train_vecs:', train_vecs.shape) print() test_images = test_images.astype(np.float32) test_images = test_images.reshape(-1, MNIST_DIMS) if pca_k > 0: print('reducing dimensionality of test set...') test_vecs = cv2.PCAProject(test_images, mean, eigenvectors) print('done\n') else: test_vecs = test_images print('test_images:', test_images.shape) print('test_vecs:', test_vecs.shape) print() matcher = get_knn_matcher() if interactive: interactive_demo(mean, eigenvectors, train_images, train_vecs, train_labels, test_images, test_vecs, test_labels, matcher, knn_k) return num_test = len(test_images) total_errors = 0 start = datetime.datetime.now() print(f'evaluating knn accuracy with k={knn_k}...') for start_idx in range(0, num_test, BATCH_SIZE): end_idx = min(start_idx + BATCH_SIZE, num_test) cur_batch_size = end_idx - start_idx idx, labels_pred = match_knn(matcher, knn_k, test_vecs[start_idx:end_idx], train_vecs, train_labels) labels_true = test_labels[start_idx:end_idx] total_errors += (labels_true != labels_pred).sum() error_rate = 100.0 * total_errors / end_idx print( f'{total_errors:4d} errors after {end_idx:5d} test examples (error rate={error_rate:.2f}%)' ) elapsed = (datetime.datetime.now() - start).total_seconds() print( f'total time={elapsed:.2f} seconds ({elapsed/end_idx:.4f}s per image)')
trainData_x = validData_x trainData_xmean = (trainData_x.mean(axis=0)).reshape( 1, (trainData_x.shape[1])) meanData, eigenvector = cv.PCACompute(trainData_x, trainData_xmean) saveFile = open(dir_out + '\\' + PCADataName, 'wb') cPickle.dump([meanData, eigenvector], saveFile) saveFile.close() else: saveFile = open(dir_out + '\\' + PCADataName, 'rb') meanData, eigenvector = cPickle.load(saveFile) print 1 trainData_x = cv.PCAProject(trainData_x, meanData, eigenvector) validData_x = cv.PCAProject(validData_x, meanData, eigenvector) testData_x = cv.PCAProject(testData_x, meanData, eigenvector) # num_data,dim = trainData_x.shape # # # dim = 800 # x = trainData_x[0:dim] # x = x.T # mean_x = x.mean(axis = 0) # x0 = x - mean_x #tile 整块扩展矩阵 # sigma = np.dot(x0,x0.T)/num_data # U,S,V = np.linalg.svd(sigma) # epsilon = 0.1 # ZCAWhite = U.dot( np.diag((1.0/np.sqrt(S+epsilon))).dot(U.T)) #U*(1/sqrt(s+epsilon))*U' #
col = (i % ncol) * width eigenvector = W[:, i] # unflatten the vector eigen_img = eigenvector.reshape(img.shape) imgarray[row:(row + height), col:(col + width)] = rescale(eigen_img) big_img[:, :] = np.kron(imgarray, [[1, 1], [1, 1]]) colorized_img = cv2.applyColorMap(big_img, cv2.COLORMAP_JET) cv2.imshow("eigenfaces", colorized_img) cv2.waitKey(0) cv2.destroyWindow("eigenfaces") # Show the array of various projections of the same face. #components = range(10, W.shape[1], (W.shape[1] - 10) / (nrow * ncol)) components = range(25, 400, 25) for i in range(nrow * ncol): row = (i / ncol) * height col = (i % ncol) * width eigenvectors = W[:, :components[i]] data = img.reshape((1, img.size)) projections = cv2.PCAProject(data, mean, eigenvectors.transpose()) proj_img = projections.dot(eigenvectors.transpose()) proj_img = proj_img.reshape(img.shape) imgarray[row:(row + height), col:(col + width)] = rescale(proj_img) big_img[:, :] = np.kron(imgarray, [[1, 1], [1, 1]]) cv2.imshow("projections -- {} to {}".format(components[0], components[-1]), big_img) cv2.waitKey(0) cv2.destroyAllWindows()
dim_feat = 2048 qfeats_path = 'part-00000-2048_1' qfeats_file = open(qfeats_path, 'r') qcontent = qfeats_file.readlines() num = len(qcontent) feats = [] names = [] for line in qcontent: tmplist = line.split(',') name = tmplist[0] feat = [float(value) for value in tmplist[1:]] feats.append(feat) names.append(name) featArray = np.array(feats) print(mean.shape) print(eigenvectors.shape) #print(eigenvectors) data_compressed = cv2.PCAProject(featArray, mean, eigenvectors) data_compressed_normal = normalize(data_compressed, copy=False) for i, name in enumerate(names): print(name + ' ' + ' '.join([str(value) for value in data_compressed_normal[i]]))
import numpy as np import cv2 print('opencv version:', cv2.__version__) image_path = '/home/zexi/rubiks_cube/LINEMOD/rubikscube/JPEGImages/000001.jpg' image = cv2.imread(image_path) b, g, r = cv2.split(image) row, col = b.shape[0], b.shape[1] b_linear = (b.reshape(1, row * col)).astype(np.float64) mean_b = np.mean(b_linear, axis=0) mean, eigenvectors = cv2.PCACompute(b_linear, mean_b.reshape(1, -1)) points = cv2.PCAProject(b_linear, np.array(), None) image_linear = image.reshape(1, image.shape[0] * image.shape[1]) image_linear = image_linear.astype(np.float64) mean_b, _ = cv2.meanStdDev(b) g_linear = g.reshape(1, row * col) r_linear = r.reshape(1, row * col) row, col = image.shape[0], image.shape[1] b, g, r = cv2.split(image) mean_g, _ = cv2.meanStdDev(g) mean_r, _ = cv2.meanStdDev(r)
"/DCNFS/users/student/ppaulson/coen166/att_faces_10/s" + str(i + 1) + '/' + str(s) + '.pgm', 0) if img is None: print("Error: import image s" + str(i + 1) + "#" + str(s) + " failed.") exit(0) flat = img.flatten() data_matrix.append(flat) #PCA for k in kset: mean, eigenvectors = cv2.PCACompute(np.array(data_matrix), mean=None, maxComponents=k) #Project onto rank-k subspace output = cv2.PCAProject(np.array(data_matrix), mean, eigenvectors) #Train KNN knn = cv2.KNearest() responses = [] for i in range(10): for j in range(6): responses.append([i + 1]) knn.train(output, np.array(responses)) #Evaluate testing images print("K:" + str(k) + "\n") numCorrect = 0 for i in range(10): for s in testingset: img = cv2.imread( "/DCNFS/users/student/ppaulson/coen166/att_faces_10/s" + str(i + 1) + '/' + str(s) + '.pgm', 0)
for (x, y, w, h) in faces: # draw each face bounding box and extract regions of interest (roi) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) # project detected face to PCA space roi_gray = gray[y:y + h, x:x + w] roi_gray = cv2.resize(roi_gray, (args.face_size, args.face_size)) # try to compensate for illumination variance roi_gray = cv2.equalizeHist(roi_gray) roi_gray = np.float32(roi_gray) / 255.0 # normalise as 0 -> 1 face_coefficients = cv2.PCAProject(roi_gray.flatten().reshape( 1, args.face_size * args.face_size), mean, eigenVectors) # measure distance to PCA coefficient for each face and find best # match face_index, face_distance = find_matching_face( face_coefficients, coefficients, covariance_coeffs) # show best match / display name and Mahalanobis distance for best # match cv2.putText(frame, names[face_index] + ": " + str(round(face_distance, 2)), (x, y + h + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
pca_source = [] labels = [] for file in files: if file == "explain2.txt": continue labels.append(file) im = cv2.imread(file_dir + file, 0) im = cv2.equalizeHist(im, 0) size = im.shape pca_source.append(im.flatten()) pca_source = np.asarray(pca_source) mean, eigvec = cv2.PCACompute(pca_source, mean=None) mean = np.asarray(mean) average = np.asarray(mean.reshape(size), 'uint8') eigvec = np.asarray(eigvec) vec = cv2.PCAProject(pca_source, mean, eigvec) vec = [x[:64] for x in vec] temp = dict() for i in range(len(labels)): label = labels[i][:2] temp.setdefault(label, []).append(vec[i]) output = dict() for label in temp: if len(temp[label]) == 4: output[label] = temp[label] pickle.dump(output, open(dump_file, 'wb'))