def predict(model, X, X_test, y_test, target_names, classes_authorized, spy_colors, label_dictionary): classification, confusion, test_loss, test_accuracy = reports(model, X_test, y_test, target_names) print(classification) plt.figure(figsize=(13, 10)) plot_confusion_matrix(confusion, classes=target_names, title='Confusion matrix, without normalization') X_garbage, train_data, test_data = pp.load_data() y = np.add(train_data, test_data) y = pp.delete_useless_classes(y, classes_authorized) outputs = create_predicted_image(X, y, model, 5, y.shape[0], y.shape[1]) print("PREDICTED IMAGE:") predict_image = spectral.imshow(classes=outputs.astype(int), figsize=(5, 5)) label_patches = [patches.Patch(color=spy_colors[x] / 255., label=label_dictionary[x]) for x in np.unique(y)] plt.legend(handles=label_patches, ncol=2, fontsize='medium', loc='upper center', bbox_to_anchor=(0.5, -0.05)) plt.show() ground_truth = spectral.imshow(classes=y, figsize=(5, 5)) print("IDEAL IMAGE: ") label_patches = [patches.Patch(color=spy_colors[x] / 255., label=label_dictionary[x]) for x in np.unique(y)] plt.legend(handles=label_patches, ncol=2, fontsize='medium', loc='upper center', bbox_to_anchor=(0.5, -0.05)) plt.show()
def draw_part(GT_Label, ES_Label, train_map, test_map): fig = plt.figure(figsize=(12, 6)) p = plt.subplot(1, 4, 1) v = spy.imshow(classes=GT_Label, fignum=fig.number) p.set_title('Ground Truth') p.set_xticklabels([]) p.set_yticklabels([]) p = plt.subplot(1, 4, 2) spy.imshow(classes=train_map, fignum=fig.number) p.set_title('Training Map') p.set_xticklabels([]) p.set_yticklabels([]) p = plt.subplot(1, 4, 3) v = spy.imshow(classes=test_map, fignum=fig.number) p.set_title('Testing Map') p.set_xticklabels([]) p.set_yticklabels([]) p = plt.subplot(1, 4, 4) v = spy.imshow(classes=ES_Label * (GT_Label != 0), fignum=fig.number) p.set_title('Classification Map') p.set_xticklabels([]) p.set_yticklabels([])
def plot_spectra(img, wave_lengths=(29, 112, 226), grid_step=20): sp.imshow(img, wave_lengths, aspect="auto") ax = plt.gca() ax.set_xticks(np.arange(0, img.shape[1], int(img.shape[1] / grid_step))) ax.set_yticks(np.arange(0, img.shape[0], int(img.shape[0] / grid_step))) plt.xticks(rotation=75) ax.grid(color='k', linestyle=':', linewidth=1) plt.xlabel("y", fontsize=15) plt.ylabel("x", fontsize=15)
def plotrho(self): """ plot the intercept (rho) using imshow """ rhonumber = self.rhofilelist[0][3].index(self.rhoband.get()) print( "Plotting intercept (rho) layer " + self.rhoband.get() +" [" + str(rhonumber)+"]" ) rhonumber = [ rhonumber ] rho_memmap = self.rhofilelist[0][2] spectral.imshow( rho_memmap, rhonumber, stretch=0.98 )
def plot_predicted_output(model, path): classes = ['alfalfa', 'corn-no-till', 'corn-min-till', 'corn-clean', 'grass/pasture', 'grass/trees', 'grass/pasture-mowed', 'hay-windrowed', 'oats', 'soybean-no-till', 'soybean-min-till', 'soybean-clean', 'wheat', 'woods', 'buildings/grass/trees/drives', 'stone-steel towers', ] # plot legend plt.clf() fig = plt.gcf() fig.set_size_inches(5, 6.5) ax = plt.subplot() # create the axes ax.set_axis_off() # turn off the axis labelPatches = [Patch(color=spy_colors[x] / 255., label=classes[x - 1]) for x in range(1, 17)] plt.legend(handles=labelPatches, ncol=1, fontsize=18) plt.tight_layout() plt.savefig(path + '/legend.png') # ground truth plt.clf() spectral.imshow(classes=Y.reshape(145, 145)) plt.axis('off') plt.tight_layout() plt.savefig(path + '/ground_truth.png') # predicted results plt.clf() xx = X.reshape(-1, 200) xx = (xx - np.mean(xx, 1, keepdims=True)) / \ np.std(xx, 1, keepdims=True) y_pred = model.predict(xx.reshape(-1, 200)) y_pred = np.argmin(y_pred, -1) + 1 y_pred[not_labeled] = 0 spectral.imshow(classes=y_pred.reshape(145, 145)) plt.axis('off') plt.tight_layout() plt.savefig(path + '/predicted.png')
def hyperspectral(image): img = sp.open_image(image) view = sp.imshow(img, (4, 3, 2)) sp.save_rgb('false_color.jpg', img, (4, 3, 2)) print(img.shape) print(view) print(img) red = img[:, :, 2] nir = img[:, :, 3] ndvi = ((nir - red) / (nir + red + 0.00001)) sp.imshow(ndvi) sp.save_rgb('ndvi.jpg', ndvi) sp.imshow(img, (6, 6, 0))
def mapping(model): X, y = loadTiff() X, _sclaer = standartizeData(X) height = y.shape[0] width = y.shape[1] PATCH_SIZE = windowSize outputs = np.zeros((height, width)) time_start = time.time() for i in range(height - PATCH_SIZE + 1): # print(i / (height - PATCH_SIZE + 1)) patch1 = Patch(X, 1, 1, PATCH_SIZE) pred_line = np.zeros((width - PATCH_SIZE + 1, patch1.shape[0], patch1.shape[1], patch1.shape[2], 1)) for j in range(width - PATCH_SIZE + 1): target = int(y[i + PATCH_SIZE // 2, j + PATCH_SIZE // 2]) # 要不要预测无标签区域? # if target == 0: # continue # else: image_patch = Patch(X, i, j, PATCH_SIZE) # print (image_patch.shape) X_test_image = image_patch.reshape(1, image_patch.shape[0], image_patch.shape[1], image_patch.shape[2], 1).astype('float32') pred_line[j, :, :, :, :] = X_test_image prediction = model.predict_classes(pred_line) # print(prediction) outputs[i + PATCH_SIZE // 2][PATCH_SIZE // 2:width - PATCH_SIZE // 2] = prediction + 1 end_time = time.time() print("Prediction Time", end_time - time_start) ground_truth = spectral.imshow(classes=y, figsize=(5, 5)) spectral.save_rgb("ground_truth.png", y, colors=spectral.spy_colors) predict_image = spectral.imshow(classes=outputs.astype(int), figsize=(5, 5)) results_name = '3D' + 'INSize' + str(windowSize) + \ 'testRatio' + str(testRatio) + 'kdepth' + str(kdepth) + 'vol_num' + str(vol_num) + \ '.png' if is_1d: results_name = '3D-1d' + 'INSize' + str(windowSize) + \ 'testRatio' + str(testRatio) + 'kdepth' + str(kdepth) + 'vol_num' + str(vol_num) + \ '.png' spectral.save_rgb("results/" + results_name, outputs.astype(int), colors=spectral.spy_colors)
def __init__(self, image, fcc=True): """ Ploats image, make sure image is loaded into memory using img.load_image() function. fcc = true loads (98,56,36) for false color composite """ try: if fcc == False: imshow(image) else: imshow(image, (98, 56, 36)) except: print('Error : Load image first and try again.')
def draw(GT_Label, ES_Label): fig = plt.figure(figsize=(12, 6)) p = plt.subplot(1, 2, 1) v = spy.imshow(classes=GT_Label, fignum=fig.number) p.set_title('Ground Truth') p.set_xticklabels([]) p.set_yticklabels([]) p = plt.subplot(1, 2, 2) v = spy.imshow(classes=ES_Label * (GT_Label != 0), fignum=fig.number) p.set_title('CLassification Map') p.set_xticklabels([]) p.set_yticklabels([])
def open_file(): open_file.has_been_called = True filedialog.askopenfilename.has_been_called = True file_name = filedialog.askopenfilename(initialdir=os.path.expanduser("/"), filetypes=(("ENVI", "*.envi"), ("All files", "*"))) if str(file_name).endswith(".envi"): img = envi.open(file_name + ".hdr", file_name + ".envi") else: img = envi.open(file_name + ".hdr", file_name) file_save = file_name.split('/') file_save = file_save[:-1] save_dir = "" for string in file_save: save_dir = save_dir + string + "/" print(save_dir) filedialog.askopenfilename.has_been_called = False imshow.has_been_called = True imshow(img, bands=(55, 32, 20), aspect=0.45, stretch=0.25) print(img) print(file_name) noisy_bands_info = hy.noise_removal(file_name, min_threshold=0, max_threshold=0.55) noisy_bands_info.reflectance_plot() print("--------- List of noisy bands ---------") x = noisy_bands_info.show_noisy_bands_with_min_max() print(len(x)) for values in x: print(values) nb = noisy_bands_info.show_noisy_bands() pre = hy.preprocessing(img_path=file_name.split(".")[0] + ".hdr", save_directory=save_dir, available_memory_gb=8) pre.perform(ndvi_threshold=125, data_ignore_value=-9999.0, NIR=90, RED=55, min_threshold=0, max_threshold=0.55, noisy_bands=nb) file2_header = file_name + "_part_1" pre_image = envi.open(file2_header + ".hdr", file2_header) imshow(pre_image, bands=(55, 32, 20), aspect=0.45, stretch=0.25) imshow.has_been_called = True print(pre_image) if imshow.has_been_called == False and filedialog.askopenfilename.has_been_called == False: open_file.has_been_called = False
def Draw_Classification_Map(label, name: str, scale: float = 4.0, dpi: int = 400): ''' get classification map , then save to given path :param label: classification label, 2D :param name: saving path and file's name :param scale: scale of image. If equals to 1, then saving-size is just the label-size :param dpi: default is OK :return: null ''' fig, ax = plt.subplots() numlabel = np.array(label) v = spy.imshow(classes=numlabel.astype(np.int16), fignum=fig.number) ax.set_axis_off() ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) fig.set_size_inches(label.shape[1] * scale / dpi, label.shape[0] * scale / dpi) foo_fig = plt.gcf() # 'get current figure' plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) foo_fig.savefig(name + '.png', format='png', transparent=True, dpi=dpi, pad_inches=0) pass
def getClassificationImages(model, X, y, filename, PATCH_SIZE=5, isSave=True, isShow=False): height = y.shape[0] width = y.shape[1] outputs = np.zeros((height, width)) for i in range(height - PATCH_SIZE + 1): for j in range(width - PATCH_SIZE + 1): target = int(y[i + int(PATCH_SIZE / 2), j + int(PATCH_SIZE / 2)]) if target == 0: continue else: image_patch = Patch(X, i, j) X_test_image = image_patch.reshape( 1, image_patch.shape[2], image_patch.shape[0], image_patch.shape[1]).astype('float32') prediction = (model.predict_classes(X_test_image)) outputs[i + int(PATCH_SIZE / 2)][j + int(PATCH_SIZE / 2)] = prediction + 1 if isSave == True: spectral.save_rgb(filename, data=outputs.astype(int), colors=spectral.spy_colors) if isShow == True: predict_image = spectral.imshow(classes=outputs.astype(int), figsize=(5, 5))
def plot_band(data, band, title): """Draw a specific band of a datcube. Args: data (ndarray): datacube band (int): band selection title (str): title """ spectral.imshow( data=data, bands=(band, ), classes=None, source=None, colors=None, figsize=None, fignum=None, title=title, )
def open_band(): file_name = filedialog.askopenfilename(initialdir=os.path.expanduser("/"), filetypes=(("ENVI", "*.envi"), ("All files", "*"))) if str(file_name).endswith(".envi"): img = envi.open(file_name + ".hdr", file_name + ".envi") else: img = envi.open(file_name + ".hdr", file_name) def ret(x, y, z, s, a): return x, y, z, s, a x, y, z, stretch, aspect = ret(int(input("x: ")), int(input("y: ")), int(input("z: ")), int(input("stretch:")), int(input("aspect: "))) open_band.has_been_called = True imshow(img, bands=(x, y, z), aspect=aspect, stretch=stretch) print(img)
def view_clz_map_spyversion4single_img(self, gt, y_test_index, y_predicted, save_path=None, show_error=False, show_axis=False): """ view HSI classification results :param gt: :param y_test_index: test index of excluding 0th classes :param y_predicted: :param show_error: :return: """ n_row, n_column = gt.shape gt_1d = gt.reshape(-1).copy() nonzero_index = gt_1d.nonzero() gt_corrected = gt_1d[nonzero_index] if show_error: t = y_predicted.copy() correct_index = np.nonzero( y_predicted == gt_corrected[y_test_index]) t[correct_index] = 0 # leave error gt_corrected[:] = 0 gt_corrected[y_test_index] = t gt_1d[nonzero_index] = t else: gt_corrected[y_test_index] = y_predicted gt_1d[nonzero_index] = gt_corrected gt_map = gt_1d.reshape((n_row, n_column)).astype('uint8') spy.imshow(classes=gt_map) if save_path != None: import matplotlib.pyplot as plt spy.save_rgb('temp.png', gt_map, colors=spy.spy_colors) if show_axis: plt.savefig(save_path, format='eps', bbox_inches='tight') else: plt.axis('off') plt.savefig(save_path, format='eps', bbox_inches='tight') # self.classification_map(gt_map, gt, 24, save_path) print('the figure is saved in ', save_path)
def Comparison_draw(dataset): DATA_PATH1 = os.path.join(os.getcwd(), "image_show") DATA_PATH = os.path.join(DATA_PATH1, dataset) filename_list = os.listdir(DATA_PATH) h = [] for filename in filename_list: if filename.startswith(dataset): h.append(filename) # new_h = sorted(h,key=lambda i:len(i), reverse=False) new_h = sorted(h, key=lambda x: os.path.getmtime(os.path.join(DATA_PATH, x)), reverse=False) # new_h = sorted(h,key=lambda x: time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(os.path.getctime(x))), reverse=False) GT_Label = scipy.io.loadmat(os.path.join(DATA_PATH, new_h[0]))[dataset + "_gt"] fig = plt.figure(figsize=(12, 12)) p = plt.subplot(3, 4, 1) v = spy.imshow(classes=GT_Label, fignum=fig.number) p.set_title('Ground Truth') p.set_xticklabels([]) p.set_yticklabels([]) for i in range(len(new_h) - 1): file_name = new_h[i + 1] file_name_split = file_name.split('_') ES_Label = scipy.io.loadmat(os.path.join(DATA_PATH, file_name))["Seg_Map"] title = file_name_split[1] + "+" + file_name_split[2] seg_acc = float('%.4f' % float(file_name_split[-2])) p = plt.subplot(3, 4, i + 2) v = spy.imshow(classes=ES_Label * (GT_Label != 0), fignum=fig.number) p.set_title(title + "(" + '%.2f' % (100 * seg_acc) + "%" + ")") p.set_xticklabels([]) p.set_yticklabels([])
def overlay_on_raw_img(self, path_to_raw_img): """ Args: path_to_raw_img (strinf): Path to the actual image used for classification use %matplotlib tk for opening image in separate window. Returns: None: Open classification map on top of original image. """ raw_img = read_image(path_to_raw_img) v = imshow(raw_img.sub_image()[:,:,:], classes = self._helper(np.copy(self._classified_img)), colors = self._color_rgb)
def _load_preprocessed_test(): base_path_dataset = "/media/disk2/datasets/anna/Messungen/Current_UV_Gerste" base_path_dataset_parsed = "/media/disk2/datasets/anna/Messungen/Current_UV_Gerste/parsed_data" current_path = os.path.join(base_path_dataset_parsed, "*.p") filenames = sorted(list(set(glob.glob(current_path)))) bbox_obj_dict = pickle.load(open(filenames[0], "rb")) """ bbox_obj_dict["id"] = "_" bbox_obj_dict["label_genotype"] = current_label_genotype_ bbox_obj_dict["label_dai"] = current_label_dai bbox_obj_dict["label_inoculated"] = label_inoculated bbox_obj_dict["label_obj"] = {"label": obj_label, "idx": obj_label_idx} bbox_obj_dict["label_running"] = filename_idx bbox_obj_dict["filename"] = os.path.basename(os.path.dirname(fp_img)) [(min_y, min_x), (max_y, max_x)] = bbox_pixels[obj_label_idx] bbox_obj_dict["bbox"] = [(min_y, min_x), (max_y, max_x)] bbox_obj_dict["mask"] = labeledimg[min_x:max_x, min_y:max_y] bbox_obj_dict["image"] = img[min_x:max_x, min_y:max_y] """ [(min_y, min_x), (max_y, max_x)] = bbox_obj_dict["bbox"] # TODO load HS and extract labeled img = spectral.open_image( os.path.join( os.path.join(base_path_dataset, "{}dai".format(bbox_obj_dict["label_dai"])), bbox_obj_dict["filename"] + "/data.hdr")) classes = np.zeros_like(img[:, :, 0]).squeeze() print([(min_y, min_x), (max_y, max_x)]) print(classes.shape) classes[min_x:max_x, min_y:max_y] = bbox_obj_dict["mask"] view = spectral.imshow(img, classes=classes) view.set_display_mode('overlay') view.class_alpha = 0.5 input("") print(bbox_obj_dict["bbox"]) print(bbox_obj_dict["filename"]) """plt.figure(figsize=(20, 10))
image_patch = patch_margin(i, j) prediction = sess.run(softmax, feed_dict={ x_placeholder: image_patch, is_training: False }) outputs[i, j] = np.argmax(prediction) + 1 print('Now progress: %.2f ' % (float(i) / height * 100) + '%') return outputs # Calculate the mean of each channel for normalization MEAN_ARRAY = np.ndarray(shape=(Utils.bands, ), dtype=float) for i in range(Utils.bands): MEAN_ARRAY[i] = np.mean(input_image[:, :, i]) # Prediction & show image predicted_image = decoder() # Save result ground_truth = spectral.imshow(classes=output_image, figsize=(5, 5)) plt.savefig('gt.png') ground_truth_mirror = spectral.imshow(classes=mirror(output_image), figsize=(5, 5)) plt.savefig('gt_mirror.png') predict_image = spectral.imshow(classes=predicted_image.astype(int), figsize=(5, 5)) plt.savefig('predict.png')
from sklearn import metrics from matplotlib import pyplot as plt import numpy as np input_image = loadmat('H:\data\Pavia.mat')['pavia'] output_image = loadmat('H:\data\Pavia_gt.mat')['pavia_gt'] testdata = np.genfromtxt('H:\data\pavia.csv', delimiter=',') data_test = testdata[:, :-1] label_test = testdata[:, -1] clf = joblib.load('pavia.m') predict_label = clf.predict(data_test) accuracy = metrics.accuracy_score(label_test, predict_label) * 100 kappa = metrics.cohen_kappa_score(label_test, predict_label) print(accuracy) print(kappa) new_show = np.zeros((output_image.shape[0], output_image.shape[1])) k = 0 for i in range(output_image.shape[0]): for j in range(output_image.shape[1]): if output_image[i][j] != 0: new_show[i][j] = predict_label[k] k += 1 ground_truth = spectral.imshow(classes=output_image.astype(int), figsize=(5, 5)) ground_predict = spectral.imshow(classes=new_show.astype(int), figsize=(5, 5)) plt.show(ground_truth) plt.show(ground_predict)
# Output image envi.save_image(config['log_dir'] + "ps" + str(patch_size) + ".hdr", raw, dtype='uint8', force=True, interleave='BSQ', ext='raw') output = Decoder.output_image(input, raw) # view = imshow(output) # plt.savefig(config['log_dir'] + 'img/' + str(patch_size) +'.png') # Image with legend labelPatches = [patches.Patch(color=input.color_scale.colorTics[x + 1] / 255., label=input.class_names[x]) for x in range(input.num_classes)] fig = plt.figure(2) lgd = plt.legend(handles=labelPatches, ncol=1, fontsize='small', loc=2, bbox_to_anchor=(1, 1)) imshow(output, fignum=2) # fig.savefig(config['log_dir'] + 'img/' + str(patch_size) + '_lgd.png', # bbox_extra_artists=(lgd,), bbox_inches='tight') #save_rgb('ps'+str(patch_size)+'.png', output, format='png') file.close()
# 中值滤波 img_median = cv2.medianBlur(data_IN, 5) # 双边滤波 # 9---滤波领域直径 # 后面两个数字:空间高斯函数标准差,灰度值相似性标准差 # data_IN.convertTo(data_IN, cv2.CV_32FC3, 1.0 / 255.0) # cv2.error: OpenCV(3.4.3) D:\Build\OpenCV\opencv-3.4.3\modules\imgproc\src\smooth.cpp:5809: error: (-215:Assertion failed) (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data in function 'cv::bilateralFilter_32f' img_bilater = cv2.bilateralFilter(data_IN, 9, 75, 75) # 展示不同的图片 titles = ['srcImg', 'mean', 'Gaussian', 'median', 'bilateral'] imgs = [data_IN, img_mean, img_Guassian, img_median, img_bilater] for i in range(5): # plt.subplot(2, 3, i + 1) spectral.imshow(imgs[i]) plt.savefig('image' + str(i) + '.png') # plt.imshow(imgs[i]) # plt.title(titles[i]) plt.show() # input_image = spectral.imshow(data_IN) # plt.savefig('image.png') # # ground_truth = spectral.imshow(classes=gt_IN) # plt.savefig('gt.png')
import numpy as np #import tensorflow as tf import pickle as pkl import time from random import shuffle import pandas as pd import spectral import matplotlib.pyplot as plt import pylab as pl import scipy #import seaborn as sns from collections import Counter #import Spatial_dataset as input_data #import patch_size import os import scipy.io as io DATA_PATH = os.path.join(os.getcwd(), "Data") input_image = scipy.io.loadmat('salinas_in.mat') output_image = scipy.io.loadmat('salinas_gt.mat') model_name = 'sample' # input_image = np.rot90(input_image) # output_image = np.rot90(output_image) height = output_image.shape[0] width = output_image.shape[1] ground_truth = spectral.imshow(classes=output_image, figsize=(5, 5))
import sys from spectral import open_image, imshow if __name__ == "__main__": img = open_image(sys.argv[1]).load() view = imshow(img, (79, 69, 57)) x = input()
height = y.shape[0] width = y.shape[1] PATCH_SIZE = 5 numComponents = 30 outputs = np.zeros((height, width)) for i in range(height - PATCH_SIZE + 1): for j in range(width - PATCH_SIZE + 1): target = int(y[i + PATCH_SIZE // 2, j + PATCH_SIZE // 2]) if target == 0: continue else: image_patch = Patch(X, i, j) # print (image_patch.shape) X_test_image = image_patch.reshape( 1, image_patch.shape[2], image_patch.shape[0], image_patch.shape[1]).astype('float32') prediction = (model.predict_classes(X_test_image)) outputs[i + PATCH_SIZE // 2][j + PATCH_SIZE // 2] = prediction + 1 ground_truth = spectral.imshow(classes=y, figsize=(5, 5)) plt.show() plt.savefig('./plot/ground_truth.png') predict_image = spectral.imshow(classes=outputs.astype(int), figsize=(5, 5)) plt.show() plt.savefig('./plot/predict_image.png')
import matplotlib.pyplot as plt import numpy as np import spectral as spy from scipy.io import loadmat if __name__ == '__main__': data = loadmat('Indian_pines_corrected.mat') data = data['indian_pines_corrected'] gt = loadmat('Indian_pines_gt.mat') gt = gt['indian_pines_gt'] ntopics = 17 # number of topics to generate (kmeans_classes, c) = spy.kmeans(data, nclusters=ntopics, max_iterations=100) kmeans_classes += 1 fig = plt.figure(figsize=(12,6)) p = plt.subplot(2, 1, 1) v = spy.imshow(classes=gt, fignum=fig.number) p.set_title('Ground Truth') p = plt.subplot(2, 1, 2) v = spy.imshow(classes=kmeans_classes , fignum=fig.number) p.set_title('k-means classes'); plt.show(v)
# # raw = np.pad(raw, ((0, 0), (0, 270)), 'constant', constant_values=0) # # # # Output image envi.save_image(log_dir + "ps" + str(patch_size) + ".hdr", raw, dtype='uint8', force=True, interleave='BSQ', ext='raw') # # output = Decoder.output_image(input, raw) view = imshow(output) plt.savefig(log_dir + str(patch_size) + '.png') # # # # Image with legend # labelPatches = [patches.Patch(color=input.color_scale.colorTics[x + 1] / 255., label=input.class_names[x]) for x in # range(input.num_classes)] # fig = plt.figure(2) # lgd = plt.legend(handles=labelPatches, ncol=1, fontsize='small', loc=2, bbox_to_anchor=(1, 1)) # imshow(output, fignum=2) # # fig.savefig(config['log_dir'] + 'img/' + str(patch_size) + '_lgd.png', # # bbox_extra_artists=(lgd,), bbox_inches='tight') # # # #save_rgb('ps'+str(patch_size)+'.png', output, format='png') #
predict = np.reshape(predict, (21025,)) classi_report = classification_report(gt, predict, target_names=target_names) cf_mat = confusion_matrix(gt, predict) return classi_report, cf_mat if __name__ == "__main__": testdatasets = HsiDataset("./data", type='out', oversampling=False, removeZeroLabels=False) testdataloader = DataLoader(testdatasets, batch_size=1, shuffle=False) model = HsiNet(num_class=17) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ckpt_file = "./ckpt/best_model_zero.pt" model_dict = torch.load(ckpt_file, map_location=device) model.load_state_dict(model_dict['model_state_dict']) model.eval() predict_label = output(model, testdataloader) gt_label = loadmat(os.path.join('./data/Indian_pines_gt.mat'))['indian_pines_gt'] gt_img = spectral.imshow(classes=gt_label, figsize=(5, 5)) plt.show() predict_img = spectral.imshow(classes=predict_label, figsize=(5, 5)) plt.show() classi_report, cf_mat = reports(gt_label, predict_label) print(classi_report) plt.figure(figsize=(15, 15)) # plot_confusion_matrix(cf_mat, classes=target_names, title="Confusion matrix") # plt.show() plt.show()
n = train_indices[k] # print(n) # print(new_show.shape[1]) i = int(n / new_show.shape[1]) j = n - i * new_show.shape[1] new_show[i][j] = gt_train[k] + 1 color = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [0.5, 0.5, 1], [0.65, 0.35, 1], [0.75, 0.5, 0.75], [0.75, 1, 0.5], [0.5, 1, 0.65], [0.65, 0.65, 0], [0.75, 1, 0.65], [0, 0, 0.5], [0, 1, 0.75], [0.5, 0.75, 1]]) color = color * 255 gt = spectral.imshow(classes=gt_IN.astype(int), figsize=(9, 9), colors=color) bar = pyplot.colorbar() bar.set_ticks(np.linspace(0, 16, 17)) bar.set_ticklabels( ('', 'Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn', 'Grass-pasture', 'Grass-tree', 'Grass-pasture-mowed', 'Hay-windrowed', 'Oats', 'Soybean-notill', 'Soybean-mintill', 'Soybean-clean', 'Wheat', 'Woods', 'Buildings-Grass-Trees-Drives', 'Stone-Steel-Towers')) pyplot.show() pre = spectral.imshow(classes=new_show.astype(int), figsize=(9, 9), colors=color)
def overlay_on_raw_img(self, path_to_raw_img): raw_img = read_image(path_to_raw_img) v = imshow(raw_img.sub_image()[:, :, :], classes = self._helper(np.copy(self._classified_img)), colors = self._color_rgb)
def mode_filter(img): return ndimage.generic_filter(img, modal, size=5) def output_image(input, output): return get_rgb(output, color_scale=input.color_scale) labelPatches = [ patches.Patch(color=input.color_scale.colorTics[x + 1] / 255., label=input.class_names[x]) for x in range(input.num_classes) ] view = output_image(input, img) imshow(view) print("---------------") print("Modal filter") filt_img = img for n in range(5): print("---------------") print("Iteration " + str(n)) filt_img = mode_filter(filt_img) view = output_image(input, filt_img) fig = plt.figure(2) lgd = plt.legend(handles=labelPatches, ncol=1, fontsize='x-small',