def eval_band_cv(X, y, times=10): """ :param X: :param y: :param times: n times k-fold cv :return: knn/svm/elm=>(OA+std, Kappa+std) """ p = Processor() img_ = maxabs_scale(X) estimator = [ KNN(n_neighbors=5), SVC(C=1e4, kernel='rbf', gamma=1.), ELM_Classifier(200) ] estimator_pre, y_test_all = [[], [], []], [] for i in range(times): # repeat N times K-fold CV skf = StratifiedKFold(n_splits=3, shuffle=True) for train_index, test_index in skf.split(img_, y): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] y_test_all.append(y_test) for c in range(3): estimator[c].fit(X_train, y_train) estimator_pre[c].append(estimator[c].predict(X_test)) clf = ['knn', 'svm', 'elm'] score = [] for z in range(3): ca, oa, aa, kappa = p.save_res_4kfolds_cv(estimator_pre[z], y_test_all, file_name=clf[z] + 'score.npz', verbose=True) score.append([oa, kappa]) return score
def eval_feature_cv(self, X, y, times=3, test_size=0.95, random_state=None): print(X.shape) # X = normalize(X) p = Processor() estimator = [KNN(n_neighbors=5), SVC(C=1e6, kernel='rbf')] estimator_pre, y_test_all = [[], []], [] for i in range(times): # repeat N times K-fold CV X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=random_state, shuffle=True, stratify=y) # train_index, test_index = p_Cora.stratified_train_test_index(y, test_size) # X_train, X_test = X[train_index], X[test_index] # y_train, y_test = y[train_index], y[test_index] y_test_all.append(y_test) for c in range(len(estimator)): estimator[c].fit(X_train, y_train) y_pre = estimator[c].predict(X_test) estimator_pre[c].append(y_pre) # score_Cora = [] score_dic = { 'knn': { 'ca': [], 'oa': [], 'aa': [], 'kappa': [] }, 'svm': { 'ca': [], 'oa': [], 'aa': [], 'kappa': [] } } key_ = ['knn', 'svm'] for z in range(len(estimator)): ca, oa, aa, kappa = p.save_res_4kfolds_cv(estimator_pre[z], y_test_all, file_name=None, verbose=False) # score_Cora.append([oa, kappa, aa, ca]) score_dic[key_[z]]['ca'] = ca score_dic[key_[z]]['oa'] = oa score_dic[key_[z]]['aa'] = aa score_dic[key_[z]]['kappa'] = kappa return score_dic
def eval_band_cv(X, y, times=10, test_size=0.95): p = Processor() estimator = [KNN(n_neighbors=3), SVC(C=1e5, kernel='rbf', gamma=1.)] estimator_pre, y_test_all = [[], []], [] for i in range(times): # repeat N times K-fold CV X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=None, shuffle=True, stratify=y) # skf = StratifiedKFold(n_splits=20, shuffle=True) # for test_index, train_index in skf.split(img_correct, gt_correct): # X_train, X_test = img_correct[train_index], img_correct[test_index] # y_train, y_test = gt_correct[train_index], gt_correct[test_index] y_test_all.append(y_test) for c in range(len(estimator)): estimator[c].fit(X_train, y_train) y_pre = estimator[c].predict(X_test) estimator_pre[c].append(y_pre) # score = [] score_dic = { 'knn': { 'ca': [], 'oa': [], 'aa': [], 'kappa': [] }, 'svm': { 'ca': [], 'oa': [], 'aa': [], 'kappa': [] } } key_ = ['knn', 'svm'] for z in range(len(estimator)): ca, oa, aa, kappa = p.save_res_4kfolds_cv(estimator_pre[z], y_test_all, file_name=None, verbose=False) # score.append([oa, kappa, aa, ca]) score_dic[key_[z]]['ca'] = ca score_dic[key_[z]]['oa'] = oa score_dic[key_[z]]['aa'] = aa score_dic[key_[z]]['kappa'] = kappa return score_dic
def train_semi(self, X, Y, mask): print('constructing hypergraph...') G = self.generate_hypergraph(X, normalize=True) print( 'construction completed. training semi-classification network...') self.init_net(self.task_name, X, G, self.n_clz, mask) merged = tf.summary.merge_all() writer = tf.summary.FileWriter('./logs', self.sess.graph) saver = tf.train.Saver() loss_his = [] acc_his = {'oa': [], 'aa': [], 'kappa': [], 'ca': []} # [] for step_i in range(self.epoch): train_feed_dict = { self.x_placeholder: X, self.y_placeholder: Y, self.is_training: True } _, loss, summary = self.sess.run( [self.train_op, self.loss_op, merged], feed_dict=train_feed_dict) print('epoch %s ==> loss=%s' % (step_i, loss)) loss_his.append(loss) writer.add_summary(summary, step_i) # =============== test ================== # # print logs after self.verb_per_iter iterations if self.verb_per_iter is not None and ( step_i + 1) % self.verb_per_iter == 0: y_pre = self.predict(X, self.task_name) p = Processor() ca, oa, aa, kappa = p.score( np.argmax(Y[np.nonzero(mask == 0)], axis=1), np.argmax(y_pre[np.nonzero(mask == 0)], axis=1)) print('epoch %s ==> acc=%s' % (step_i, (oa, aa, kappa))) acc_his['oa'].append(oa) acc_his['aa'].append(aa) acc_his['kappa'].append(kappa) acc_his['ca'] = ca # saver.save(self.sess, self.model_path, write_meta_graph=False) np.savez(self.model_root_dir + '/history.npz', loss=loss_his, acc=acc_his) # saver.save(self.sess, self.model_path) if self.verb_per_iter is not None: return acc_his
def eval_band(new_img, gt, train_inx, test_idx): p = Processor() # img_, gt_ = p.get_correct(new_img, gt) gt_ = gt img_ = maxabs_scale(new_img) # X_train, X_test, y_train, y_test = train_test_split(img_, gt_, test_size=0.4, random_state=42) X_train, X_test, y_train, y_test = img_[train_inx], img_[test_idx], gt_[train_inx], gt_[test_idx] knn_classifier = KNN(n_neighbors=5) knn_classifier.fit(X_train, y_train) # score = cross_val_score(knn_classifier, img_, y=gt_, cv=3) y_pre = knn_classifier.predict(X_test) score = accuracy_score(y_test, y_pre) # score = np.mean(score) return score
def train_dim(self, X, y=None): print('constructing hypergraph...') G = self.generate_hypergraph(X, normalize=True) print( 'construction completed. training dimentionality reduction network...' ) self.init_net(self.task_name, X, G) merged = tf.summary.merge_all() writer = tf.summary.FileWriter('./logs', self.sess.graph) saver = tf.train.Saver() loss_his = [] acc_his = [] for step_i in range(self.epoch): train_feed_dict = {self.x_placeholder: X, self.is_training: True} _, loss, summary = self.sess.run( [self.train_op, self.loss_op, merged], feed_dict=train_feed_dict) print('epoch %s ==> loss=%s' % (step_i, loss)) loss_his.append(loss) writer.add_summary(summary, step_i) # =============== test ================== # # print logs after self.verb_per_iter iterations if self.verb_per_iter is not None and ( step_i + 1) % self.verb_per_iter == 0: z = self.predict(X, self.task_name) p = Processor() score = self.eval_feature_cv(z, y, times=3, test_size=0.9, random_state=331) print('epoch %s ==> acc=%s' % (step_i, (score['knn']['oa'][0], score['svm']['oa'][0]))) # acc_his['oa'].append(oa) # acc_his['aa'].append(aa) # acc_his['kappa'].append(kappa) # acc_his['ca'] = ca acc_his.append(score) # saver.save(self.sess, self.model_path, write_meta_graph=False) np.savez(self.model_root_dir + '/history.npz', loss=loss_his, acc=acc_his, fea=z) # saver.save(self.sess, self.model_path) if self.verb_per_iter is not None: return acc_his
from sklearn.metrics import accuracy_score from classes.SNMF import BandSelection_SNMF import numpy as np if __name__ == '__main__': # root = '/Users/cengmeng/PycharmProjects/python/Deep-subspace-clustering-networks/Data/' root = '/content/' # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'Pavia', 'Pavia_gt' # im_, gt_ = 'KSC', 'KSC_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' p = Processor() img, gt = p.prepare_data(img_path, gt_path) n_row, n_column, n_band = img.shape train_inx, test_idx = p.get_tr_tx_index(p.get_correct(img, gt)[1], test_size=0.9) img = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape( (n_row, n_column, n_band)) x_input = img.reshape(n_row * n_column, n_band) num_class = 15 snmf = BandSelection_SNMF(num_class) X_new = snmf.predict(x_input).reshape(n_row, n_column, num_class) a, b = p.get_correct(X_new, gt) b = p.standardize_label(b)
root = 'F:\\Python\\HSI_Files\\' # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'KSC', 'KSC_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' # # gt_path = 'F:\Python\HSI_Files\Indian_pines_gt.mat' # img_path = 'F:\Python\HSI_Files\Indian_pines_corrected.mat' # # gt_path = 'F:\Python\HSI_Files\Pavia_gt.mat' # img_path = 'F:\Python\HSI_Files\Pavia.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) n_row, n_clo, n_bands = img.shape print('img=', img.shape) # pca_img = p.pca_transform(n_comp, img.reshape(n_row * n_clo, n_bands)).reshape(n_row, n_clo, n_comp) X, y = p.get_correct(img, gt) print(X.shape) ''' ___________________________________________________________________ Data pre-processing ''' # remove these samples with small numbers classes = np.unique(y) print('size:', X.shape, 'n_classes:', classes.shape[0]) for c in classes:
__________________________________________________________________ # Load HSI data # ''' root = 'F:\\Python\\HSI_Files\\' # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' # im_, gt_ = 'Salinas_corrected', 'Salinas_gt' # im_, gt_ = 'Botswana', 'Botswana_gt' # im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'KSC', 'KSC_gt' im_, gt_ = 'wuhanTM', 'wuhanTM_gt' # img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) n_row, n_clo, n_bands = img.shape print('img=', img.shape) if n_bands > 20: pca_img = p.pca_transform(20, img.reshape(n_row * n_clo, n_bands)).reshape(n_row, n_clo, 20) X, y = p.get_correct(pca_img, gt) else: X, y = p.get_correct(img, gt) print(X.shape) ''' ___________________________________________________________________ Data pre-processing
from sklearn.metrics import accuracy_score from BandSelection.classes.SNMF import BandSelection_SNMF import numpy as np if __name__ == '__main__': root = '/Users/cengmeng/PycharmProjects/python/Deep-subspace-clustering-networks/Data/' # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' # im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'Pavia', 'Pavia_gt' im_, gt_ = 'KSC', 'KSC_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) n_row, n_column, n_band = img.shape train_inx, test_idx = p.get_tr_tx_index(p.get_correct(img, gt)[1], test_size=0.9) img_train = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape( (n_row, n_column, n_band)) img_train = np.transpose(img_train, axes=(2, 0, 1)) # Img.transpose() img_train = np.reshape(img_train, (n_band, n_row, n_column, 1)) x_input = img.reshape(n_row * n_column, n_band) model_path = './pretrain-model-COIL20/model.ckpt' num_class = 7 S = BandSelection_SNMF(x_input=x_input, model_path=model_path)
from __future__ import print_function import numpy as np from EMO_ELM.classes.ELM import BaseELM from Toolbox.Preprocessing import Processor from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier p = Processor() path = 'F:\Python\EMO_ELM\demo\experimental_results\KSC-1000iter-50hidden-sparsity_acc_X_proj_differ-rho.npz' p2 = 'F:\Python\EMO_ELM\demo\experimental_results\KSC-X_projection-10hidden-5000iter.npz' npz = np.load(path) X = npz['X'] acc = npz['acc'] y = np.load(p2)['y'] all_aa, all_oa, all_kappa = [], [], [] skf = StratifiedKFold(n_splits=5, random_state=55, shuffle=True) for j in range(X.shape[0]): X_ = X[j] y_pres = [] y_tests = [] for i in range(1): for train_index, test_index in skf.split(X_, y): # [index] X_train, X_test = X_[train_index], X_[test_index] # [index] y_train, y_test = y[train_index], y[test_index] elm = BaseELM(500, C=1e8) y_predicted = elm.fit(X_train, y_train).predict(X_test) y_pres.append(y_predicted) y_tests.append(y_test) ca_, oa, aa, kappa = p.save_res_4kfolds_cv(np.asarray(y_pres), np.asarray(y_tests), file_name=None, verbose=True) all_oa.append(oa)
from sklearn.decomposition import PCA import time root = 'HSI_Datasets/' im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' # im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'PaviaU', 'PaviaU_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' print('\nDataset: ', img_path) PATCH_SIZE = 9 # # 9 default, normally the bigger the better nb_comps = 4 # # num of PCs, 4 default, it can be moderately increased # load img and gt p = Processor() img, gt = p.prepare_data(img_path, gt_path) # # take a smaller sub-scene for computational efficiency if im_ == 'SalinasA_corrected': REG_Coef_, NEIGHBORING_, RO_ = 1e1, 30, 0.8 REG_Coef_K, NEIGHBORING_K, RO_K, GAMMA = 1e2, 30, 0.8, 0.2 if im_ == 'Indian_pines_corrected': img, gt = img[30:115, 24:94, :], gt[30:115, 24:94] PATCH_SIZE = 13 REG_Coef_, NEIGHBORING_, RO_ = 1e2, 30, 0.4 REG_Coef_K, NEIGHBORING_K, RO_K, GAMMA = 1e3, 30, 0.8, 10 if im_ == 'PaviaU': img, gt = img[150:350, 100:200, :], gt[150:350, 100:200] REG_Coef_, NEIGHBORING_, RO_ = 1e3, 20, 0.6 REG_Coef_K, NEIGHBORING_K, RO_K, GAMMA = 6 * 1e4, 30, 0.8, 100
############################ ''' __________________________________________________________________ Load UCI data sets ''' path = 'F:\Python\UCIDataset-matlab\UCI_25.mat' mat = loadmat(path) keys = mat.keys() keys.remove('__version__') keys.remove('__header__') keys.remove('__globals__') keys.sort() save_name = 'result.npz' # # load data # keys=['satellite','segment' ,'soybean','vowel' ,'wdbc','yeast' ,'zoo'] p = Processor() key = 'Iris' data = mat[key] X, y = data[:, 1:].astype('float32'), data[:, 0].astype('int8') ''' ___________________________________________________________________ Data pre-processing ''' # remove these samples with small numbers classes = np.unique(y) print('size:', X.shape, 'n_classes:', classes.shape[0]) for c in classes: if np.nonzero(y == c)[0].shape[0] < 10: X = np.delete(X, np.nonzero(y == c), axis=0) y = np.delete(y, np.nonzero(y == c))
if __name__ == '__main__': root = 'D:\Python\HSI_Files\\' # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'Pavia', 'Pavia_gt' # im_, gt_ = 'PaviaU', 'PaviaU_gt' # im_, gt_ = 'Salinas_corrected', 'Salinas_gt' # im_, gt_ = 'Botswana', 'Botswana_gt' # im_, gt_ = 'KSC', 'KSC_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) n_row, n_column, n_band = img.shape X_img = minmax_scale(img.reshape(n_row * n_column, n_band).transpose()) X_img = X_img.transpose().reshape((n_row, n_column, n_band)) img_correct, gt_correct = p.get_correct(X_img, gt) gt_correct = p.standardize_label(gt_correct) X_img_2D = X_img.reshape(n_row * n_column, n_band) X_img_2D = minmax_scale(X_img_2D.transpose()).transpose() n_selected_band = 5 algorithm = [ EGCSR_BS_Clustering(n_selected_band, regu_coef=1e4, n_neighbors=3, ro=0.8),
from BandSelection.classes.SpaBS import SpaBS import numpy as np from BandSelection.classes.utility import eval_band if __name__ == '__main__': root = '/Users/cengmeng/PycharmProjects/python/Deep-subspace-clustering-networks/Data/' # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' # im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'Pavia', 'Pavia_gt' im_, gt_ = 'KSC', 'KSC_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) n_row, n_column, n_band = img.shape X_img = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape( (n_row, n_column, n_band)) img_correct, gt_correct = p.get_correct(X_img, gt) train_inx, test_idx = p.get_tr_tx_index(gt_correct, test_size=0.4) # img_train = np.transpose(img_train, axes=(2, 0, 1)) # Img.transpose() # img_train = np.reshape(img_train, (n_band, n_row, n_column, 1)) # x_input = img.reshape(n_row * n_column, n_band) model_path = './pretrain-model-COIL20/model.ckpt' n_select_band = 20
from BandSelection.classes.SpaBS import SpaBS if __name__ == '__main__': root = 'F:\Python\HSI_Files\\' #'/Users/cengmeng/PycharmProjects/python/Deep-subspace-clustering-networks/Data/' # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'Pavia', 'Pavia_gt' # im_, gt_ = 'Botswana', 'Botswana_gt' # im_, gt_ = 'KSC', 'KSC_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) # Img, Label = Img[:256, :, :], Label[:256, :] n_row, n_column, n_band = img.shape X_img = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape( (n_row, n_column, n_band)) X_img_2D = X_img.reshape(n_row * n_column, n_band) img_correct, gt_correct = p.get_correct(X_img, gt) gt_correct = p.standardize_label(gt_correct) train_inx, test_idx = p.get_tr_tx_index(gt_correct, test_size=0.4) n_input = [n_row, n_column] kernel_size = [7] n_hidden = [32] batch_size = n_band model_path = './pretrain-model-COIL20/model.ckpt'
if __name__ == '__main__': root = '/content/' #'/Users/cengmeng/PycharmProjects/python/Deep-subspace-clustering-networks/Data/' # #im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt' im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt' # im_, gt_ = 'Pavia', 'Pavia_gt' # im_, gt_ = 'Botswana', 'Botswana_gt' # im_, gt_ = 'KSC', 'KSC_gt' img_path = root + im_ + '.mat' gt_path = root + gt_ + '.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) # Img, Label = Img[:256, :, :], Label[:256, :] n_row, n_column, n_band = img.shape X_img = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape( (n_row, n_column, n_band)) img_correct, gt_correct = p.get_correct(X_img, gt) gt_correct = p.standardize_label(gt_correct) X_img_2D = X_img.reshape(n_row * n_column, n_band) train_inx, test_idx = p.get_tr_tx_index(gt_correct, test_size=0.4) n_input = [n_row, n_column] kernel_size = [11] n_hidden = [32] batch_size = n_band model_path = './pretrain-model-COIL20/model.ckpt'
""" computing classification criterion including per-class accuracy, OA, AA, and Kappa using different features """ from __future__ import print_function import numpy as np from sklearn.preprocessing import MinMaxScaler from EMO_ELM.classes.ELM import BaseELM from sklearn.model_selection import StratifiedKFold, cross_val_score from EMO_ELM.classes.ELM_nonlinear_RP import NRP_ELM from Toolbox.Preprocessing import Processor p = Processor() path_1 = 'F:\Python\EMO_ELM\demo\experimental_results\X_projection\X_proj-Indian_pines_corrected-nh=10-iter=5000.npz' path_2 = 'F:\Python\EMO_ELM\demo\experimental_results\Sparsity-dim-X_proj\Indian_pines-sparsity.npz' npz = np.load(path_1) X = np.load(path_2)['X_proj'][2] y = npz['y'] time = npz['time'] print(time.tolist()) # ------------ # remove samples whose number is very small for IndianPines # ------------ for c in np.unique(y): if np.nonzero(y == c)[0].shape[0] < 250: y = np.delete(y, np.nonzero(y == c)) y = p.standardize_label(y) """ ================= following lines add new random mapping =================
algorithm comparision ''' # keys = ['Iris', 'Wine', 'wdbc', 'cotton', 'vowel', 'yeast', 'soybean'] keys = ['yeast', 'satellite', 'shuttle'] if __name__ == '__main__': acc_final = {} for key in keys: print('processing ', key) data = mat[key] X, y = data[:, 1:].astype('float32'), data[:, 0].astype('int8') classes = np.unique(y) for c in classes: if np.nonzero(y == c)[0].shape[0] < 10: X = np.delete(X, np.nonzero(y == c), axis=0) y = np.delete(y, np.nonzero(y == c)) p = Processor() y = p.standardize_label(y) X = minmax_scale(X) print('num classes:', np.unique(y).__len__(), X.shape) # # execute classification # X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, stratify=y) # he_elm = HE_ELM([[BaseELM(10, dropout_prob=0.9)]*5, ]*1, KELM(C=1e-5, kernel='rbf'), is_nonmlize=True) accs_outer = [] n_learner = 30 n_hidden = 20 # range_ = range(10) if X.shape[0] >= 1000 else range(20) for j in range(10) if X.shape[0] >= 2000 else range(20): X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6,
img_path = 'F:\Python\HSI_Files\SalinasA_corrected.mat' # # gt_path = 'F:\Python\HSI_Files\wuhanTM_gt.mat' # img_path = 'F:\Python\HSI_Files\wuhanTM.mat' # # gt_path = 'F:\Python\HSI_Files\Indian_pines_gt.mat' # img_path = 'F:\Python\HSI_Files\Indian_pines_corrected.mat' # # gt_path = 'F:\Python\HSI_Files\Pavia_gt.mat' # img_path = 'F:\Python\HSI_Files\Pavia.mat' # gt_path = 'F:\Python\HSI_Files\KSC_gt.mat' # img_path = 'F:\Python\HSI_Files\KSC.mat' print(img_path) p = Processor() img, gt = p.prepare_data(img_path, gt_path) n_row, n_clo, n_bands = img.shape print('img=', img.shape) # pca_img = p.pca_transform(n_comp, img.reshape(n_row * n_clo, n_bands)).reshape(n_row, n_clo, n_comp) X, y = p.get_correct(img, gt) print(X.shape) ''' ___________________________________________________________________ Data pre-processing ''' # remove these samples with small numbers classes = np.unique(y) for c in classes: if np.nonzero(y == c)[0].shape[0] < 10: