def transform(self, X, y=None): import numpy as np from sklearn.decomposition import PCA import pysptools.noise as ns X = X.astype('float32') # apply brightness normalization # if raster if self.BrightnessNormalization == True: def norm(r): norm = r / np.sqrt(np.sum((r**2), 0)) return norm if len(X.shape) == 3: X = np.apply_along_axis(norm, 2, X) # if 2D array if len(X.shape) == 2: X = np.apply_along_axis(norm, 0, X) w = ns.Whiten() wdata = w.apply(X) numBands = X.shape[2] h, w, numBands = wdata.shape X = np.reshape(wdata, (w * h, numBands)) pca = PCA() mnf = pca.fit_transform(X) mnf = np.reshape(mnf, (h, w, numBands)) mnf = mnf[:, :, :self.n_components] var = np.cumsum( np.round(pca.explained_variance_ratio_, decimals=4) * 100) return mnf, var
def explained_variance(img): from sklearn.decomposition import PCA w = ns.Whiten() wdata = w.apply(img) numBands = r.count h, w, numBands = wdata.shape X = np.reshape(wdata, (w * h, numBands)) pca = PCA() mnf = pca.fit_transform(X) return print( np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4) * 100))
def whiten(data): w = ns.Whiten() return w.apply(data)
explained_variance(img) else: if args['method'] == 1: # Load raster/convert to ndarray format name = os.path.basename(inImage) r = rasterio.open(inImage) r2 = r.read() # Apply Brightness Normalization if the option -p is added if args["preprop"] == True: r2 = np.apply_along_axis(BrigthnessNormalization, 0, r2) img = reshape_as_image(r2) # Apply MNF -m 1 print("Creating MNF components of " + name) # Apply MNF namualy acording to pysptools w = ns.Whiten() wdata = w.apply(img) numBands = r.count h, w, numBands = wdata.shape X = np.reshape(wdata, (w * h, numBands)) pca = PCA() mnf = pca.fit_transform(X) mnf = np.reshape(mnf, (h, w, numBands)) if args["SavitzkyGolay"] == True: dn = ns.SavitzkyGolay() mnf[:, :, 1:2] = dn.denoise_bands(mnf[:, :, 1:2], 15, 2) mnf = mnf[:, :, :n_components] saveMNF(mnf, r) # Apply MNF coefficients to the other images for i in range(len(imageList)):