Esempio n. 1
0
    def generateCompositeProducts(self, dataset, out_path):
        """
        Placeholder
        """

        # create product path
        product_path = os.path.join(out_path, 'composite')
        if not os.path.exists(product_path):
            os.makedirs(product_path, 0o755)

        # get channel images pertaining to product
        print('Creating composite products: {}'.format(product_path))
        for product in self._products['composite']:

            rgb = []
            for index in product['channels']:
                rgb.append(self.getChannelData(dataset, index))

            # save rgb image
            rgb_pathname = os.path.join(product_path, product['name'] + '.jpg')
            save_rgb(rgb_pathname, np.dstack(rgb), stretch=(0.02, 0.98))

            # save decorrelation stretch version of rgb image
            dcs_pathname = rgb_pathname.replace('.jpg', '-dcs.jpg')
            execute(
                os.path.join(os.path.dirname(sys.path[0]), '../bin/dstretch'),
                [rgb_pathname, dcs_pathname])

            # copy dcs image into geotiff
            self.writeGeoImage(dcs_pathname, dataset['srs'])

        return
Esempio n. 2
0
def getClassificationImages(model,
                            X,
                            y,
                            filename,
                            PATCH_SIZE=5,
                            isSave=True,
                            isShow=False):
    height = y.shape[0]
    width = y.shape[1]
    outputs = np.zeros((height, width))
    for i in range(height - PATCH_SIZE + 1):
        for j in range(width - PATCH_SIZE + 1):
            target = int(y[i + int(PATCH_SIZE / 2), j + int(PATCH_SIZE / 2)])
            if target == 0:
                continue
            else:
                image_patch = Patch(X, i, j)
                X_test_image = image_patch.reshape(
                    1, image_patch.shape[2], image_patch.shape[0],
                    image_patch.shape[1]).astype('float32')
                prediction = (model.predict_classes(X_test_image))
                outputs[i + int(PATCH_SIZE / 2)][j + int(PATCH_SIZE /
                                                         2)] = prediction + 1
    if isSave == True:
        spectral.save_rgb(filename,
                          data=outputs.astype(int),
                          colors=spectral.spy_colors)
    if isShow == True:
        predict_image = spectral.imshow(classes=outputs.astype(int),
                                        figsize=(5, 5))
def avg_accuracy():
    total = 0
    loss = 0
    model = load_model(os.getcwd()+'/mymodel.h5')
    data_path = os.path.join(os.getcwd(),'test_sar')
    label_path = os.path.join(os.getcwd(),'test_optical')
    input_images = os.listdir(data_path)
    label_images = os.listdir(label_path)
    i = 0
    for image in input_images:
        #print(image.type)
        X_test = imageio.imread(os.getcwd()+'/test_sar/{}'.format(image))
        y_test = imageio.imread(os.getcwd()+'/test_optical/{}'.format(image))
        #print(y)
        y_test = label_creator(y_test)

        X_test,y_test= createPatches(X_test, y_test, windowSize=5)

        #X_test  = np.reshape(X_test, (X_test.shape[0], X_test.shape[3], X_test.shape[1], X_test.shape[2]))
        y_test = to_categorical(y_test)

        os.chdir(os.getcwd())

        classification, confusion, Test_loss, Test_accuracy = reports(X_test,y_test)
        
        Y_pred = model.predict(X_test)
        y_pred = np.argmax(Y_pred, axis=1)

        img = np.zeros((256,256))
        
        j = 0
        for col in range(256):
            for row in range(256):
                img[col][row] = (y_pred[j])
                j+=1
                
        res = Image.fromarray(img, mode = 'RGB')
        with open(os.path.join(os.getcwd()+'/predicts', '{}'.format(image)), 'w') as f:
                     res.save(f)
            
        #predict_image = spectral.imshow(classes = img.astype(int),figsize =(5,5))
        spectral.save_rgb(os.getcwd()+'/pred/{}'.format(image),img)
        
        if i==0:
            acc = Test_accuracy
            loss = Test_loss
        acc = float((acc+Test_accuracy)/2)
        loss = float((loss+Test_loss)/2)
        print(i,'--------------')
        print(image)
        print('Average Test Accuracy: ' + str(acc))
        print('Average Test Loss: ' + str(loss))
        i+=1
    #acc = float(total/200)
    #avg_Loss = float(loss/200)
    return acc,loss
Esempio n. 4
0
def process2(index1,index2,index3):
    global m
    l=[]
    print(b[index1],b[index2],b[index3])
    l.append(b[index1])
    l.append(b[index2])
    l.append(b[index3])
    m+=1
    Raster_data = HSI[:,:,:]
    hsi_rgb_file_name = 'G:/Hyperspectral/slice/D7_triple_band/d7_{}_{}_{}.jpg'.format(b[index1],b[index2],b[index3])#要保存RGB图像的名称
    spectral.save_rgb(hsi_rgb_file_name, Raster_data,l)
    l.clear()
Esempio n. 5
0
def select_roi(input_path, output_path, output_path_rgb, rgb_bands):
    img = envi.open(input_path)

    # Make RGB image
    spectral.save_rgb(output_path_rgb, img, bands=rgb_bands)

    # Select ROI with cv2
    im = cv2.imread(output_path_rgb)
    from_center = False  # We use this variable for parameter clarification in cv2.selectROI()
    r = cv2.selectROI("Image", im, from_center)

    return input_path, output_path, output_path_rgb, rgb_bands, r
Esempio n. 6
0
def process1(index1,index2):
    global n
    l=[]
    print(a[index1],a[index2])
    l.append(a[index1])
    l.append(a[index2])
    l.append(0)
    n+=1
    Raster_data = HSI[:,:,:]
    hsi_rgb_file_name = 'G:/Hyperspectral/slice/D7_double_band/d7_{}_{}.jpg'.format(a[index1],a[index2])#要保存RGB图像的名称
    spectral.save_rgb(hsi_rgb_file_name, Raster_data,l)
    l.clear()
Esempio n. 7
0
def hyperspectral(image):
    img = sp.open_image(image)
    view = sp.imshow(img, (4, 3, 2))
    sp.save_rgb('false_color.jpg', img, (4, 3, 2))
    print(img.shape)
    print(view)
    print(img)
    red = img[:, :, 2]
    nir = img[:, :, 3]
    ndvi = ((nir - red) / (nir + red + 0.00001))
    sp.imshow(ndvi)
    sp.save_rgb('ndvi.jpg', ndvi)
    sp.imshow(img, (6, 6, 0))
    def mapping(model):
        X, y = loadTiff()
        X, _sclaer = standartizeData(X)
        height = y.shape[0]
        width = y.shape[1]
        PATCH_SIZE = windowSize
        outputs = np.zeros((height, width))
        time_start = time.time()
        for i in range(height - PATCH_SIZE + 1):
            # print(i / (height - PATCH_SIZE + 1))

            patch1 = Patch(X, 1, 1, PATCH_SIZE)
            pred_line = np.zeros((width - PATCH_SIZE + 1, patch1.shape[0],
                                  patch1.shape[1], patch1.shape[2], 1))

            for j in range(width - PATCH_SIZE + 1):
                target = int(y[i + PATCH_SIZE // 2, j + PATCH_SIZE // 2])
                # 要不要预测无标签区域?
                # if target == 0:
                #     continue
                # else:
                image_patch = Patch(X, i, j, PATCH_SIZE)

                # print (image_patch.shape)
                X_test_image = image_patch.reshape(1, image_patch.shape[0],
                                                   image_patch.shape[1],
                                                   image_patch.shape[2],
                                                   1).astype('float32')
                pred_line[j, :, :, :, :] = X_test_image

            prediction = model.predict_classes(pred_line)
            # print(prediction)
            outputs[i + PATCH_SIZE // 2][PATCH_SIZE // 2:width -
                                         PATCH_SIZE // 2] = prediction + 1
        end_time = time.time()
        print("Prediction Time", end_time - time_start)
        ground_truth = spectral.imshow(classes=y, figsize=(5, 5))
        spectral.save_rgb("ground_truth.png", y, colors=spectral.spy_colors)
        predict_image = spectral.imshow(classes=outputs.astype(int),
                                        figsize=(5, 5))
        results_name = '3D' + 'INSize' + str(windowSize) + \
                       'testRatio' + str(testRatio) + 'kdepth' + str(kdepth) + 'vol_num' + str(vol_num) + \
                       '.png'
        if is_1d:
            results_name = '3D-1d' + 'INSize' + str(windowSize) + \
                           'testRatio' + str(testRatio) + 'kdepth' + str(kdepth) + 'vol_num' + str(vol_num) + \
                           '.png'
        spectral.save_rgb("results/" + results_name,
                          outputs.astype(int),
                          colors=spectral.spy_colors)
Esempio n. 9
0
 def view_clz_map_spyversion4single_img(self,
                                        gt,
                                        y_test_index,
                                        y_predicted,
                                        save_path=None,
                                        show_error=False,
                                        show_axis=False):
     """
     view HSI classification results
     :param gt:
     :param y_test_index: test index of excluding 0th classes
     :param y_predicted:
     :param show_error:
     :return:
     """
     n_row, n_column = gt.shape
     gt_1d = gt.reshape(-1).copy()
     nonzero_index = gt_1d.nonzero()
     gt_corrected = gt_1d[nonzero_index]
     if show_error:
         t = y_predicted.copy()
         correct_index = np.nonzero(
             y_predicted == gt_corrected[y_test_index])
         t[correct_index] = 0  # leave error
         gt_corrected[:] = 0
         gt_corrected[y_test_index] = t
         gt_1d[nonzero_index] = t
     else:
         gt_corrected[y_test_index] = y_predicted
         gt_1d[nonzero_index] = gt_corrected
     gt_map = gt_1d.reshape((n_row, n_column)).astype('uint8')
     spy.imshow(classes=gt_map)
     if save_path != None:
         import matplotlib.pyplot as plt
         spy.save_rgb('temp.png', gt_map, colors=spy.spy_colors)
         if show_axis:
             plt.savefig(save_path, format='eps', bbox_inches='tight')
         else:
             plt.axis('off')
             plt.savefig(save_path, format='eps', bbox_inches='tight')
         # self.classification_map(gt_map, gt, 24, save_path)
         print('the figure is saved in ', save_path)
Esempio n. 10
0
def classify(model_path='my_model.h5',
             data_path='data',
             ground_path='ground_truth.jpg',
             classification_path='classification.jpg',
             patch_size=5,
             numComponents=30):
    model = loadModel(path=model_path)
    X, y = loadIndianPinesData(data_path=data_path)

    outputs = classifyModel(model,
                            X,
                            y,
                            PATCH_SIZE=patch_size,
                            numComponents=numComponents)

    spectral.save_rgb(ground_path, y, colors=spectral.spy_colors)
    spectral.save_rgb(classification_path,
                      outputs.astype(int),
                      colors=spectral.spy_colors)

    return ground_path, classification_path
def classify(datasetname, data_process_method=1, model_method=1):
    if datasetname == "Indian_pines":
        dataset = Indian_pines
        labelset = Indian_pines_gt
    elif datasetname == "PaviaU":
        dataset = PaviaU
        labelset = PaviaU_gt
    else:
        print("输入参数错误,程序即将退出")
        return
    row = labelset.shape[0]
    col = labelset.shape[1]
    # 数据预处理
    new_data_set,training_data, test_data, training_label, test_label,feature_num,num_class=data_process(dataset, labelset,method=data_process_method)
    # 预测
    predict_label,classes=set_model(datasetname, new_data_set,
                            training_data, test_data,
                            training_label, test_label,
                            num_class, feature_num, method=model_method)
    # 绘图
    result = np.reshape(predict_label, (row, col))
    #result = result.astype(int)
    image = Image.fromarray(result)
    image.save(datasetname+"_predict.tif")
    print("预测结果已保存为:"+datasetname+"_predict.tif")
    sp.save_rgb(datasetname+"_predict.jpg", result, colors=sp.spy_colors)
    if datasetname=="Indian_pines":
        sp.save_rgb(datasetname+".jpg", Indian_modify_gt, colors=sp.spy_colors)
        unit.performence_get(datasetname+"_predict.tif",dataset_id=0)
    else:
        sp.save_rgb(datasetname+".jpg", Pavia_modify_gt, colors=sp.spy_colors)
        unit.performence_get(datasetname+"_predict.tif",dataset_id=1)
    print("预测效果可查看图片:"+datasetname+"_predict.jpg")
Esempio n. 12
0
def save_image(image_numpy, image_path, aspect_ratio=1.0):
    """Save a numpy image to the disk

    Parameters:
        image_numpy (numpy array) -- input numpy array
        image_path (str)          -- the path of the image
    """
    h, w, c = image_numpy.shape
    if c == 1:
        spectral.save_rgb(image_path,
                          image_numpy.astype(int),
                          colors=spectral.spy_colors)
    else:
        image_pil = Image.fromarray(image_numpy)

        if aspect_ratio > 1.0:
            image_pil = image_pil.resize((h, int(w * aspect_ratio)),
                                         Image.BICUBIC)
        if aspect_ratio < 1.0:
            image_pil = image_pil.resize((int(h / aspect_ratio), w),
                                         Image.BICUBIC)
        image_pil.save(image_path)
Esempio n. 13
0
    def generatePrincipalComponentProducts(self, dataset, out_path):
        """
        Placeholder
        """

        # create product path
        product_path = os.path.join(out_path, 'pca')
        if not os.path.exists(product_path):
            os.makedirs(product_path, 0o755)

        # get channel images pertaining to product
        print('Creating principal component products: {}'.format(product_path))
        for product in self._products['pca']:

            channels = []
            for index in product['channels']:
                channels.append(self.getChannelData(dataset, index))

            img = np.dstack(channels)

            # compute pca transformation
            pc = principal_components(img)
            img_pc = pc.transform(img)

            # save rgb image
            rgb_pathname = os.path.join(product_path, product['name'] + '.jpg')
            save_rgb(rgb_pathname, img_pc[:, :, :3], stretch=(0.05, 0.95))

            # save decorrelation stretch version of rgb image
            dcs_pathname = rgb_pathname.replace('.jpg', '-dcs.jpg')
            execute(
                os.path.join(os.path.dirname(sys.path[0]), '../bin/dstretch'),
                [rgb_pathname, dcs_pathname])

            # copy dcs image into geotiff
            self.writeGeoImage(dcs_pathname, dataset['srs'])

        return
Esempio n. 14
0
confusion_matrix_mss=metrics.confusion_matrix(gt_test[:-VAL_SIZE],pred_test)
print(confusion_matrix_mss)

average_acc=averageAccuracy.AA_andEachClassAccuracy(confusion_matrix_mss)
kappa_value=Kappa.kappa(confusion_matrix_mss)


print("training finished.")
print('Training Time: ', toc6 - tic6)
print('Test time:', toc7 - tic7)

print('each_acc', each_acc_res4)
print("aa", average_acc_res4)
print("oa", overall_acc)
print("kappa", kappa)

gt[test_indices[:-VAL_SIZE]] = pred_test + 1
gt = gt.reshape(145, 145)
save_rgb('IN-DBDA.jpg', gt, colors=spy_colors)
#
color = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [0.5, 0.5, 1], [0.65, 0.35, 1],
                  [0.75, 0.5, 0.75], [0.75, 1, 0.5], [0.5, 1, 0.65], [0.65, 0.65, 0], [0.75, 1, 0.65], [0, 0, 0.5], [0, 1, 0.75], [0.5, 0.75, 1]])
# color = color*255
newcmap = ListedColormap(color)

view = pyplot.imshow(gt.astype(int), cmap=newcmap)
bar = pyplot.colorbar()
bar.set_ticks(np.linspace(0, 16, 17))
bar.set_ticklabels(('', 'Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn', 'Grass-pasture', 'Grass-tree', 'Grass-pasture-mowed', 'Hay-windrowed',
                    'Oats', 'Soybean-notill', 'Soybean-mintill', 'Soybean-clean', 'Wheat', 'Woods', 'Buildings-Grass-Trees-Drives', 'Stone-Steel-Towers'))
pyplot.show()
Esempio n. 15
0
height = y.shape[0]
width = y.shape[1]
PATCH_SIZE = windowSize
X = padWithZeros(X, PATCH_SIZE // 2)
# calculate the predicted image
outputs = np.zeros((height, width))
for i in range(height):
    for j in range(width):
        target = int(y[i, j])
        if target == 0:
            continue
        else:
            image_patch = Patch(X, i, j)
            X_test_image = image_patch.reshape(
                1, image_patch.shape[0], image_patch.shape[1],
                image_patch.shape[2]).astype('float32')
            np.save('WholePic.npy', X_test_image)
            Datapath = 'WholePic.npy'
            Labelpath = 'WholePic.npy'
            prediction = predict(encoder, model_SVM, Datapath, Labelpath)
            prediction = int(prediction[0])
            outputs[i][j] = prediction + 1
ground_truth = spectral.imshow(classes=y, figsize=(7, 7))
predict_image = spectral.imshow(classes=outputs.astype(int), figsize=(7, 7))
spectral.save_rgb("predictions.jpg",
                  outputs.astype(int),
                  colors=spectral.spy_colors)
spectral.save_rgb(str(dataset) + "_ground_truth.jpg",
                  y,
                  colors=spectral.spy_colors)
torch.cuda.empty_cache()
Esempio n. 16
0
def patchify_folds(
    folds_folders: list,
    destination: str,
    patch_size: int = 64,
):
    """
    Performs image patching on the folds.

    Parameters
    ----------
    folds_folders: list
        The image folders of the folds
    destination: str
        Destination path to save the patches.
    patch_size: int, optional, default = 64
        The size of each patch (`patch_size * patch_size`)
    """

    # If already exists, delete entire tree
    if os.path.exists(destination):
        # Force delete
        shutil.rmtree(destination, ignore_errors=True)

    for fold_folder in folds_folders:
        # Grab name of current fold
        current_fold = fold_folder.split("\\")[1]
        # Search for folder contents
        fold_images = glob.glob(fold_folder + "/*")

        # Iterate over each image folder present in the current fold
        for image_folder in fold_images:
            # Get file name
            file_name = image_folder.split("\\")[-1].split(".hdr")[0]
            print(f"File {file_name}:")
            # Search folder contents
            image_folder_contents = sorted(glob.glob(image_folder + "/*"))
            # Get the .hdr file
            image_file = list(
                filter(lambda file: ".hdr" in file, image_folder_contents)
            )[0]
            # Load the .hdr file
            image = spectral.open_image(image_file).load()
            # Generate patches
            image_patches = _patchify(image, patch_size)
            print(
                f"\tGenerated {len(image_patches)} patches of size ({patch_size}, {patch_size})"
            )
            for i, patch in enumerate(image_patches):
                # Generate new format name, ie: "IMAGE_1_PATCH_2_NORMAL"
                new_image_name = f"{file_name}_PATCH_{i+1}"
                # Generate path
                path = f"{destination}/{current_fold}/{new_image_name}/"
                # Create folders
                os.makedirs(path)
                # Create final path
                final_path = f"{path}{new_image_name}"
                # Save the patch
                spectral.envi.save_image(final_path + ".hdr", patch, dtype=np.float32)
                # Save rgb version
                spectral.save_rgb(final_path + ".jpg", patch, [29, 19, 9])
            print("**" * 30)
    def Rnn(self, best_parameters, data, X_train, y_train, X, accuracy):
        model = Sequential()
        model.add(
            LSTM(data[0][best_parameters[0]],
                 input_shape=self.input_shape,
                 return_sequences=False))
        model.add(Dropout(0.25))

        model.add(Dense(self.number_of_classes, activation='softmax'))
        model.compile(loss=self.loss, optimizer='adam', metrics=self.metrics)
        model.summary()

        model.fit(X_train,
                  np.array(y_train),
                  validation_data=(self.xx_val, np.array(self.yy_val)),
                  epochs=50,
                  verbose=1)

        prediction = model.predict(X)

        if not self.state.one_hot_encoding:
            self.y_test = [x + 1 for x in self.y_test]
            self.y_train = [x + 1 for x in self.y_train]

        if len(prediction.shape) == 2:
            predicted_gt_1 = np.argmax(prediction, axis=1)
            predicted_gt_1_list = list(predicted_gt_1)
            predicted_gt_1_list = [x + 1 for x in predicted_gt_1_list]
        else:
            predicted_gt_1_list = prediction

        for i in self.zero_data:
            predicted_gt_1_list.insert(i, 0)

        self.predicted_gt_1_list = np.array(predicted_gt_1_list).reshape(
            self.image_shape[0], self.image_shape[1])
        sp.save_rgb('predicted_gt.jpg',
                    self.predicted_gt_1_list,
                    colors=sp.spy_colors)

        self.predicted_gt = Image.open('predicted_gt.jpg')
        self.predicted_gt = self.predicted_gt.resize((250, 250),
                                                     Image.ANTIALIAS)
        self.predicted_gt = ImageTk.PhotoImage(self.predicted_gt)

        self.output_detail_frame = tk.Frame(self.output_frame)
        self.output_detail_frame.grid(row=0, column=0, sticky='nsew')

        self.accuracy = tk.Label(self.output_detail_frame,
                                 text='Accuracy: ' + str(accuracy),
                                 background='green')
        self.accuracy_0 = tk.Label(self.output_detail_frame,
                                   background='green')
        self.accuracy.grid(row=0, column=0, sticky='nsew')
        self.accuracy_0.grid(row=0, column=1, sticky='nsew')

        for count, i in enumerate(model.layers):
            name = i.output.name.split('/')[0].split('_')
            if len(name) > 2:
                name = name[0] + '_' + name[1]
            else:
                name = name[0]

            self.label1 = tk.Label(self.output_detail_frame, text=str(name))
            self.label2 = tk.Label(self.output_detail_frame,
                                   text=str(i.output.shape))

            self.label1.grid(row=count + 1, column=0, sticky='nsew')
            self.label2.grid(row=count + 1, column=1, sticky='nsew')

        self.output_detail_frame.columnconfigure((0, 1), weight=1)
Esempio n. 18
0
def clean(dataset_folder, masks_folder):
    """
    Cleans the dataset.
    """
    # Grab the image folders dirs
    image_folders = sorted(glob.glob(dataset_folder + "/*", ))

    # Grab the mask dirs
    mask_dirs = glob.glob(
        masks_folder + "/**/mask.*",
        recursive=True,
    )

    # Mask dirs contains .psd files. Use list comprehension to remove them. Sort them by number (ascending).
    # This is important so mask order matches image folder order
    mask_dirs = list(filter(lambda path: ".psd" not in path, mask_dirs))
    mask_dirs = sorted(
        mask_dirs, key=lambda file: int(file.split("\\")[-2].split("_")[1]))

    base_dir = "cleaned_dataset/"
    # Ensure that new directory is created
    if os.path.exists(base_dir):
        shutil.rmtree(base_dir, ignore_errors=True)

    # Lambda function to process mask (Make it binary)
    process_mask = lambda mask: np.where(mask > 0, 255, 0) / 255

    for i, folder in enumerate(image_folders):
        # Get the folder name
        current_folder_name = folder.split("\\")[-1]
        # Extract the label. 1 is 'pos' in folder name, 0 otherwise
        label = "INFECTED" if "pos" in current_folder_name else "NORMAL"
        # Build new folder name
        new_folder_name = f"{current_folder_name.split('_')[0]}_{label}"

        # If folder does not exist, create it
        os.makedirs(base_dir + new_folder_name)
        # Find the contents of the folder
        folder_files = glob.glob(folder + "/**")
        # Filter for the .hdr file
        image_file = list(filter(lambda file: ".hdr" in file, folder_files))[0]
        # Load the .hdr file
        image = spectral.open_image(image_file).load()
        # Load the mask and process it
        mask = process_mask(cv2.imread(mask_dirs[i], 0)).astype(np.int8)
        # Apply mask to image
        print(f"Image {current_folder_name}:")
        print(f"\tApplying binary mask ({mask_dirs[i]})")
        image = cv2.bitwise_and(image, image, mask=mask)
        # Get final path
        path = base_dir + new_folder_name
        # Save .hdr image in final path
        print(f"\tSaving raw version ({path})")
        spectral.envi.save_image(path + "/" + new_folder_name + ".hdr",
                                 image,
                                 dtype=np.float32)
        # Save .jpg version with 3 random bands
        print(f"\tSaving jpg version ({path})")
        spectral.save_rgb(path + "/" + new_folder_name + ".jpg", image,
                          [29, 19, 9])
        print("**" * 50)
Esempio n. 19
0
else:
    pass
margin = int((opt.WINDOW_SIZE - 1) / 2)
raw_data_padded = pad_zeros(raw_data, margin=margin)
'''calculate the predicted image'''
pred_map = np.zeros((raw_label.shape[0], raw_label.shape[1]))
for row in range(raw_label.shape[0]):
    for col in range(raw_label.shape[1]):
        target = int(raw_label[row, col])
        if target == 0:
            continue
        else:
            img_patch = patch(raw_data_padded, row, col)
            data_te_img = img_patch.reshape(1, img_patch.shape[0],
                                            img_patch.shape[1],
                                            img_patch.shape[2],
                                            1).astype('float32')
            _, prediction = classifier.predict(data_te_img)
            prediction = np.argmax(prediction, axis=1)
            pred_map[row][col] = prediction + 1

spectral.save_rgb(os.path.join(
    opt.RESULT,
    str(opt.DATASET) + "_" + str(opt.ACQUISITION) + "_predictions.jpg"),
                  pred_map.astype(int),
                  colors=spectral.spy_colors)
spectral.save_rgb(os.path.join(opt.RESULT,
                               str(opt.DATASET) + "_groundtruth.jpg"),
                  raw_label,
                  colors=spectral.spy_colors)
    def __init__(self, master):
        self.master = master
        self.state = State()
        self.population = int(self.state.population)
        self.generation = int(self.state.generation)
        self.mutation = float(self.state.mutation)
        self.crossover = float(self.state.crossover)

        self.image = np.array(self.load_data(self.state.image_file))
        self.image_shape = self.image.shape
        self.gt = np.array(self.load_data(self.state.gt_file))

        sp.save_rgb('image.jpg', self.image, [43, 21, 11])
        sp.save_rgb('gt.jpg', self.gt, colors=sp.spy_colors)
        self.display_input_image()

        resize_data = self.resize_data(self.image, self.gt)
        cleaned_data = self.drop_if_gt_zero(resize_data)
        self.X, y = self.feature_target(cleaned_data)

        self.number_of_classes = len(np.unique(y))

        if self.state.feature_selection:
            self.X = self.feature_selection(self.X, y)

        if self.state.one_hot_encoding:
            y = self.one_hot_encoding(y)

        if self.state.normalization:
            self.X = self.standardizing(self.X)

        if self.state.feature_extraction:
            self.X = self.feature_extraction(self.X)

        self.X_train, self.X_test, self.y_train, self.y_test = self.split_data(
            self.X, y, float(self.state.test_set))

        if self.state.model == 'Convolutional Neural Network':
            number = int(self.X_test.shape[0] / 2)

            self.xx_test = self.X_test[:number, :]
            self.xx_val = self.X_test[number:, :]

            if self.state.one_hot_encoding:
                self.loss = tensorflow.keras.losses.categorical_crossentropy
                self.metrics = ['accuracy']
                self.yy_test = self.y_test[:number, :]
                self.yy_val = self.y_test[number:, :]
            else:
                self.loss = tensorflow.keras.losses.sparse_categorical_crossentropy
                self.metrics = ['sparse_categorical_accuracy']
                self.y_test = [x - 1 for x in self.y_test]
                self.y_train = [x - 1 for x in self.y_train]
                self.yy_test = self.y_test[:number]
                self.yy_val = self.y_test[number:]

            feature = self.X.shape[1]

            if K.image_data_format() == 'channels_first':
                self.X = self.X.reshape(self.X.shape[0], 1, feature)
                print('x is reshaped to', self.X.shape)
                self.X_train = self.X_train.reshape(self.X_train.shape[0], 1,
                                                    feature)
                self.xx_test = self.xx_test.reshape(self.xx_test.shape[0], 1,
                                                    feature)
                self.xx_val = self.xx_val.reshape(self.xx_val.shape[0], 1,
                                                  feature)
                self.input_shape = (1, feature)
            else:
                self.X = self.X.reshape(self.X.shape[0], feature, 1)
                print('x is reshaped to', self.X.shape)
                self.X_train = self.X_train.reshape(self.X_train.shape[0],
                                                    feature, 1)
                self.xx_test = self.xx_test.reshape(self.xx_test.shape[0],
                                                    feature, 1)
                self.xx_val = self.xx_val.reshape(self.xx_val.shape[0],
                                                  feature, 1)
                self.input_shape = (feature, 1)

        if self.state.model == 'Recurrent Neural Network':
            number = int(self.X_test.shape[0] / 2)

            self.xx_test = self.X_test[:number, :]
            self.xx_val = self.X_test[number:, :]

            if self.state.one_hot_encoding:
                self.loss = tensorflow.keras.losses.categorical_crossentropy
                self.metrics = ['accuracy']
                self.yy_test = self.y_test[:number, :]
                self.yy_val = self.y_test[number:, :]
            else:
                self.loss = tensorflow.keras.losses.sparse_categorical_crossentropy
                self.metrics = ['sparse_categorical_accuracy']
                self.y_test = [x - 1 for x in self.y_test]
                self.y_train = [x - 1 for x in self.y_train]
                self.yy_test = self.y_test[:number]
                self.yy_val = self.y_test[number:]

            feature = self.X.shape[1]

            self.X = self.X.reshape(self.X.shape[0], 1, feature)
            print('x is reshaped to', self.X.shape)
            self.X_train = self.X_train.reshape(self.X_train.shape[0], 1,
                                                feature)
            self.xx_test = self.xx_test.reshape(self.xx_test.shape[0], 1,
                                                feature)
            self.xx_val = self.xx_val.reshape(self.xx_val.shape[0], 1, feature)
            self.input_shape = (1, feature)

        self.model = self.genetic_algorithm(population=self.population,
                                            generation=self.generation,
                                            crossover=self.crossover,
                                            mutation=self.mutation)
        self.my_thread = threading.Thread(target=self.run_in_thread,
                                          args=(self.X_train, self.y_train,
                                                self.X))
        self.my_thread.start()
Esempio n. 21
0
        gt_corrected = gt_1d[nonzero_index]
        if show_error:
            t = y_predicted.copy()
            correct_index = np.nonzero(y_predicted == gt_corrected[y_test_index])
            t[correct_index] = 0  # leave error
            gt_corrected[:] = 0
            gt_corrected[y_test_index] = t
            gt_1d[nonzero_index] = t
        else:
            gt_corrected[y_test_index] = y_predicted
            gt_1d[nonzero_index] = gt_corrected
        gt_map = gt_1d.reshape((n_row, n_column)).astype('uint8')
        spy.imshow(classes=gt_map)
        if save_path != None:
            import matplotlib.pyplot as plt
            spy.save_rgb('temp.png', gt_map, colors=spy.spy_colors)
            if show_axis:
                plt.savefig(save_path, format='eps', bbox_inches='tight')
            else:
                plt.axis('off')
                plt.savefig(save_path, format='eps', bbox_inches='tight')
            # self.classification_map(gt_map, gt, 24, save_path)
            print('the figure is saved in ', save_path)

    def classification_map(self, map, groundTruth, dpi, savePath):
        import matplotlib.pyplot as plt
        fig = plt.figure(frameon=False)
        fig.set_size_inches(groundTruth.shape[1] * 2.0 / dpi, groundTruth.shape[0] * 2.0 / dpi)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        ax.xaxis.set_visible(False)
Esempio n. 22
0
        'each_acc',
        each_acc_res4,
    )
    print("oa", overall_acc_res4)
    print("aa", average_acc_res4)
    print("kappa", kappa)

    # modelStatsRecord.outputStats(KAPPA_RES_SS4, OA_RES_SS4, AA_RES_SS4, ELEMENT_ACC_RES_SS4,
    #                              TRAINING_TIME_RES_SS4, TESTING_TIME_RES_SS4,
    #                              history_res4_SS_BN, loss_and_metrics_res4_SS_BN, CATEGORY,
    #                              '/home/zilong/SSRN/records/IN_train_SS_10.txt',
    #                              '/home/zilong/SSRN/records/IN_train_SS_element_10.txt')

    gt1[test_indices[:-VAL_SIZE]] = pred_test_res4 + 1
    gt1 = gt1.reshape(349, 1905)
    save_rgb('houston-SSRN.jpg', gt1, colors=spy_colors)
    #
    color = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
                      [1, 0, 1], [1, 1, 0], [0.5, 0.5, 1], [0.65, 0.35, 1],
                      [0.75, 0.5, 0.75], [0.75, 1, 0.5], [0.5, 1, 0.65],
                      [0.65, 0.65, 0], [0.75, 1, 0.65], [0, 0, 0.5],
                      [0, 1, 0.75]])
    # color = color*255
    newcmap = ListedColormap(color)

    view = pyplot.imshow(gt1.astype(int), cmap=newcmap)
    bar = pyplot.colorbar()
    bar.set_ticks(np.linspace(0, 15, 16))

    pyplot.show()
Esempio n. 23
0
def create_test(config):

    # Configurable parameters
    # config = {}
    # config['patch_size'] = 9
    # config['kernel_size'] = 3
    # config['conv1_channels'] = 32
    # config['conv2_channels'] = 64
    # config['fc1_units'] = 1024
    # config['batch_size'] = 64
    # config['max_epochs'] = 100
    # config['train_dropout'] = 0.5
    # config['initial_learning_rate'] = 0.01
    # config['decaying_lr'] = True

    log_dir = 'app/static/data/models/' + config['image_name'] + "/"
    config['log_dir'] = log_dir

    # Input data
    if (config['image_name'] == "indianpines"):
        input = IndianPines_Input.IndianPines_Input()
    else:
        input = Salinas_Input.Salinas_Input()

    X, y = input.read_data(config['patch_size'])

    tf.reset_default_graph()

    with tf.Graph().as_default():

        # Size of input
        input_size = len(X)
        num_batches_per_epoch = int(input_size / config['batch_size'])
        test_size = len(y)

        # Create placeholders
        images_pl, labels_pl = CNNModel_2D.placeholder_inputs(
            config['patch_size'], input.input_channels)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, keep_prob = CNNModel_2D.inference(
            images_pl, input.input_channels, config['patch_size'],
            config['kernel_size'], config['conv1_channels'],
            config['conv2_channels'], config['fc1_units'], input.num_classes)

        # Calculate loss.
        loss = CNNModel_2D.loss(logits, labels_pl)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.

        # Create a variable to track the global step.
        global_step = tf.Variable(0, name='global_step', trainable=False)

        # Define learning rate
        # Decay once per epoch , using an exponential schedule starting at initial_learning_rate
        if config['decaying_lr']:
            learning_rate = tf.train.exponential_decay(
                config['initial_learning_rate'],  # Base learning rate.
                global_step,  # Current index into the dataset.
                num_batches_per_epoch,  # Decay step.
                0.96,  # Decay rate.
                staircase=True)
        else:
            learning_rate = config['initial_learning_rate']

        train_step = CNNModel_2D.training(loss, learning_rate, global_step)

        # Add the Op to compare the logits to the labels during evaluation.
        predictions, correct_predictions, accuracy = CNNModel_2D.evaluation(
            logits, labels_pl)

        # Add the variable initializer Op.
        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.InteractiveSession()

        # Run the Op to initialize the variables.
        sess.run(init)

        # Create DataBuffer for managing batches of train set
        data_buffer = DataBuffer.DataBuffer(images=X,
                                            labels=y,
                                            batch_size=config['batch_size'])

        # TensorBoard
        train_writer = tf.summary.FileWriter(
            log_dir + str(config['id']) + '/train', sess.graph)
        test_writer = tf.summary.FileWriter(log_dir + str(config['id']) +
                                            '/test')

        with tf.name_scope("accuracy"):
            acc_var = tf.Variable(0.0)
            tf.summary.scalar("accuracy",
                              acc_var,
                              collections=['train', 'test'])

        with tf.name_scope("xent"):
            xent_var = tf.Variable(0.0)
            tf.summary.scalar("xent", xent_var, collections=['train', 'test'])

        merged_summ_training = tf.summary.merge_all('train')
        merged_summ_test = tf.summary.merge_all('test')

        # Code for testing the test set in batches (normally too large)
        test_batch_size = 1000
        test_data_buffer = DataBuffer.DataBuffer(images=X,
                                                 labels=y,
                                                 batch_size=test_batch_size)
        test_batch_num = int(math.ceil(len(y) / test_batch_size))
        test_eval_freq = 5

        def eval_test_set(step, conf_matrix=False):
            final_test_accuracy, test_loss = 0, 0
            y_pred, y_true = [], []
            for i in range(test_batch_num):
                images_batch, labels_batch = test_data_buffer.next_batch(
                    shuffle_data=False)
                feed_dict_test = {
                    images_pl: images_batch,
                    labels_pl: labels_batch,
                    keep_prob: 1
                }
                batch_loss,batch_correct_predictions,batch_predictions = \
                    sess.run([loss,tf.reduce_sum(correct_predictions),predictions],feed_dict= feed_dict_test)
                test_loss += batch_loss
                final_test_accuracy += batch_correct_predictions

            final_test_accuracy /= test_size
            test_loss /= test_batch_num
            summ_test = sess.run(merged_summ_test, {
                xent_var: test_loss,
                acc_var: final_test_accuracy
            })
            test_writer.add_summary(summ_test, step)

            return final_test_accuracy

        start_time = time.time()  # Start time

        session['epoch'] = 0
        flash(0)

        for epoch in range(config['max_epochs']):

            session['epoch'] = epoch
            print("Session epoch:" + str(session['epoch']))
            flash(epoch)

            for batch_index in range(num_batches_per_epoch + 1):

                step = tf.train.global_step(sess, global_step)

                images_batch, labels_batch = data_buffer.next_batch()

                feed_dict_train_dropout = {
                    images_pl: images_batch,
                    labels_pl: labels_batch,
                    keep_prob: config['train_dropout']
                }
                feed_dict_train_eval = {
                    images_pl: images_batch,
                    labels_pl: labels_batch,
                    keep_prob: 1
                }

                # Evaluate next batch before train
                train_accuracy = accuracy.eval(feed_dict_train_eval) * 100

                if batch_index % 10 == 0:

                    train_loss, train_accuracy = sess.run([loss, accuracy],
                                                          feed_dict_train_eval)
                    feed_dict_train_eval.update({
                        xent_var: train_loss,
                        acc_var: train_accuracy
                    })
                    summ_train = sess.run(merged_summ_training,
                                          feed_dict_train_eval)
                    train_writer.add_summary(summ_train, step)

                    #percent_advance = str(batch_index * 100 / num_batches_per_epoch)
                    print(
                        'Time: ',
                        str(
                            time.strftime(
                                "%Hh%Mm%Ss",
                                time.gmtime((time.time() - start_time)))))
                    print('Epoch %d. Batch index %d, training accuracy %g' %
                          (epoch, batch_index, train_accuracy))
                    print('---------------\n')

                # Train model
                train_step.run(feed_dict_train_dropout)

            if (epoch % test_eval_freq == 0):
                test_accuracy = eval_test_set(step)
                print('---------------')
                print('Epoch %d. test accuracy %g' %
                      (epoch, test_accuracy * 100))
                print('---------------\n')

        train_writer.close()
        test_writer.close()
        save_path = saver.save(
            sess, log_dir + 'model-' + str(config['id']) + '.ckpt')

    predicted_image, final_accuracy = Decoder.decode(input, config, save_path)

    image_path = 'app/static/data/images/outputmap_' + str(
        config['image_name']) + "_" + str(config['id'])

    #ground_truth = spectral.imshow(classes = input.target_data,figsize =(9,9))
    #predict_image = spectral.imshow(classes = predicted_image.astype(int),figsize =(9,9))
    #spectral.save_rgb('gt.png', input.target_data,colors=spectral.spy_colors, format='png')
    spectral.save_rgb(image_path + ".png",
                      predicted_image,
                      colors=spectral.spy_colors)

    img = Image.open(image_path + ".png")
    img = img.resize((700, 700), resample=Image.ANTIALIAS)
    img.save(image_path + "Big.png")

    return predicted_image, final_accuracy
Esempio n. 24
0
                print(time.time() - a)
                y_train, y_test = np.take(y_reduced, train_index,
                                          axis=0), np.take(y_reduced,
                                                           test_index,
                                                           axis=0)

                train_positions = np.take(selected_positions,
                                          train_index,
                                          axis=0)
                test_positions = np.take(selected_positions,
                                         test_index,
                                         axis=0)

                img_train, img_test = input.train_test_images(
                    train_positions, test_positions)
                save_rgb(fold_dir + "train.png", img_train, format='png')
                save_rgb(fold_dir + "test.png", img_test, format='png')

                if rotation_oversampling:
                    X_train, y_train = input.rotation_oversampling(
                        X_train, y_train)

                print("Size training set", len(X_train))
                print("Size test set", len(X_test))

                if fold_num == 1:
                    file.write("Size training set: %d\n" % len(X_train))
                    file.write("Size test set: %d\n" % len(X_test))
                    file.write("Class distribution:\n")
                    file.write("Train;Test\n")
                    dtrain = Counter(y_train)
    def Mlp(self, best_parameters, data, X_train, y_train, X, accuracy):
        model = MLPClassifier(
            hidden_layer_sizes=tuple(data[0][best_parameters[0]]),
            activation='relu',
            solver='adam',
            alpha=data[1][best_parameters[1]],
            batch_size=data[2][best_parameters[2]],
            learning_rate='constant',
            learning_rate_init=data[3][best_parameters[3]],
            power_t=0.5,
            max_iter=500,
            shuffle=True,
            random_state=1,
            tol=0.0001,
            verbose=False,
            warm_start=True,
            momentum=0.9,
            nesterovs_momentum=True,
            early_stopping=False,
            validation_fraction=0.18,  # 0.33 0.18
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            n_iter_no_change=data[4][best_parameters[4]],
            max_fun=15000)
        model.fit(X_train, y_train)
        prediction = model.predict(X)

        if len(prediction.shape) == 2:
            predicted_gt_1 = np.argmax(prediction, axis=1)
            predicted_gt_1_list = list(predicted_gt_1)
            predicted_gt_1_list = [x + 1 for x in predicted_gt_1_list]
        else:
            predicted_gt_1_list = prediction

        for i in self.zero_data:
            predicted_gt_1_list = np.insert(predicted_gt_1_list, i, 0)

        # predicted_gt_size = int(math.sqrt(predicted_gt_1_list.shape[0]))

        self.predicted_gt_1_list = predicted_gt_1_list.reshape(
            self.image_shape[0], self.image_shape[1])

        sp.save_rgb('predicted_gt.jpg',
                    self.predicted_gt_1_list,
                    colors=sp.spy_colors)

        self.predicted_gt = Image.open('predicted_gt.jpg')
        self.predicted_gt = self.predicted_gt.resize((250, 250),
                                                     Image.ANTIALIAS)
        self.predicted_gt = ImageTk.PhotoImage(self.predicted_gt)

        self.output_detail_frame = tk.Frame(self.output_frame)
        self.output_detail_frame.grid(row=0, column=0, sticky='nsew')

        self.accuracy = tk.Label(self.output_detail_frame,
                                 text='Accuracy: ' + str(accuracy))

        self.hidden_layer = tk.Label(self.output_detail_frame,
                                     text='hidden_layer_sizes: ' +
                                     str(data[0][best_parameters[0]]))
        self.alpha = tk.Label(self.output_detail_frame,
                              text='alpha: ' +
                              str(data[1][best_parameters[1]]))
        self.degree = tk.Label(self.output_detail_frame,
                               text='batch_size: ' +
                               str(data[2][best_parameters[2]]))
        self.learning_rate_init = tk.Label(self.output_detail_frame,
                                           text='learning_rate_init: ' +
                                           str(data[3][best_parameters[3]]))
        self.n_iter_no_change = tk.Label(self.output_detail_frame,
                                         text='n_iter_no_change: ' +
                                         str(data[4][best_parameters[4]]))

        self.accuracy.grid(row=0, column=0, sticky='nsew')
        self.hidden_layer.grid(row=1, column=0, sticky='nsew')
        self.alpha.grid(row=2, column=0, sticky='nsew')
        self.degree.grid(row=3, column=0, sticky='nsew')
        self.learning_rate_init.grid(row=4, column=0, sticky='nsew')
        self.n_iter_no_change.grid(row=5, column=0, sticky='nsew')

        self.output_detail_frame.grid_rowconfigure((0, 1, 2, 3, 4, 5),
                                                   weight=1)
        self.output_detail_frame.columnconfigure(0, weight=1)
    def Svm(self, best_parameters, data, X_train, y_train, X, accuracy):
        model = sklearn.svm.SVC(
            C=data[0][best_parameters[0]],
            kernel=data[1][best_parameters[1]],
            degree=data[2][best_parameters[2]],
            gamma=data[3][best_parameters[3]],
            coef0=0.0,
            shrinking=data[4][best_parameters[4]],
            probability=data[5][best_parameters[5]],
            tol=0.001,
            cache_size=200,
            class_weight=None,
            verbose=True,
            max_iter=-1,
            decision_function_shape=data[6][best_parameters[6]],
            break_ties=False,
            random_state=None)

        model.fit(X_train, y_train)
        prediction = model.predict(X)
        predicted_gt_1_list = prediction

        for i in self.zero_data:
            predicted_gt_1_list = np.insert(predicted_gt_1_list, i, 0)

        self.predicted_gt_1_list = predicted_gt_1_list.reshape(
            self.image_shape[0], self.image_shape[1])

        sp.save_rgb('predicted_gt.jpg',
                    self.predicted_gt_1_list,
                    colors=sp.spy_colors)

        self.predicted_gt = Image.open('predicted_gt.jpg')
        self.predicted_gt = self.predicted_gt.resize((250, 250),
                                                     Image.ANTIALIAS)
        self.predicted_gt = ImageTk.PhotoImage(self.predicted_gt)

        self.output_detail_frame = tk.Frame(self.output_frame)
        self.output_detail_frame.grid(row=0, column=0, sticky='nsew')

        self.accuracy = tk.Label(self.output_detail_frame,
                                 text='Accuracy: ' + str(accuracy))

        self.C = tk.Label(self.output_detail_frame,
                          text='C: ' + str(data[0][best_parameters[0]]))
        self.kernel = tk.Label(self.output_detail_frame,
                               text='kernel: ' +
                               str(data[1][best_parameters[1]]))
        self.degree = tk.Label(self.output_detail_frame,
                               text='degree: ' +
                               str(data[2][best_parameters[2]]))
        self.gamma = tk.Label(self.output_detail_frame,
                              text='gamma: ' +
                              str(data[3][best_parameters[3]]))
        self.shrinking = tk.Label(self.output_detail_frame,
                                  text='shrinking: ' +
                                  str(data[4][best_parameters[4]]))
        self.probability = tk.Label(self.output_detail_frame,
                                    text='probability: ' +
                                    str(data[5][best_parameters[5]]))
        self.decision_function_shape = tk.Label(
            self.output_detail_frame,
            text='decision_function_shape: ' +
            str(data[6][best_parameters[6]]))

        self.accuracy.grid(row=0, column=0, sticky='nsew')
        self.C.grid(row=1, column=0, sticky='nsew')
        self.kernel.grid(row=2, column=0, sticky='nsew')
        self.degree.grid(row=3, column=0, sticky='nsew')
        self.gamma.grid(row=4, column=0, sticky='nsew')
        self.shrinking.grid(row=5, column=0, sticky='nsew')
        self.probability.grid(row=5, column=0, sticky='nsew')
        self.decision_function_shape.grid(row=5, column=0, sticky='nsew')

        self.output_detail_frame.grid_rowconfigure((0, 1, 2, 3, 4, 5, 6, 7),
                                                   weight=1)
        self.output_detail_frame.columnconfigure(0, weight=1)
Esempio n. 27
0
        kappa_all[0][num] = kappa

    KAPPA_RES_SS4.append(kappa)
    OA_RES_SS4.append(overall_acc_mss)
    AA_RES_SS4.append(average_acc_mss)

    print("training finished.")
    # print('Training Time: ', toc6 - tic6)
    # print('Test time:', toc7 - tic7)
    print("# %d Iteration" % (index_iter + 1))
    print('each_acc', each_acc_mss)
    print("oa", overall_acc_mss)
    print("aa", average_acc_mss)
    print("kappa", kappa)
    gt1[test_indices[:-VAL_SIZE]] = pred_test + 1
    gt1 = gt1.reshape(349, 1905)
    save_rgb('houston-DBDA.jpg', gt1, colors=spy_colors)
    #
    color = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
                      [1, 0, 1], [1, 1, 0], [0.5, 0.5, 1], [0.65, 0.35, 1],
                      [0.75, 0.5, 0.75], [0.75, 1, 0.5], [0.5, 1, 0.65],
                      [0.65, 0.65, 0], [0.75, 1, 0.65], [0, 0, 0.5],
                      [0, 1, 0.75]])
    # color = color*255
    newcmap = ListedColormap(color)

    view = pyplot.imshow(gt1.astype(int), cmap=newcmap)
    bar = pyplot.colorbar()
    bar.set_ticks(np.linspace(0, 15, 16))

    pyplot.show()
    def Cnn(self, best_parameters, data, X_train, y_train, X, accuracy):
        model = Sequential()
        model.add(
            Conv1D(filters=data[0][best_parameters[0]],
                   kernel_size=data[4][best_parameters[4]],
                   activation='relu',
                   input_shape=self.input_shape))
        model.add(
            Conv1D(filters=data[0][best_parameters[0]],
                   kernel_size=data[4][best_parameters[4]],
                   activation='relu',
                   input_shape=self.input_shape))
        model.add(BatchNormalization())
        model.add(MaxPooling1D(pool_size=2))
        model.add(
            Conv1D(filters=data[1][best_parameters[1]],
                   kernel_size=data[5][best_parameters[5]],
                   activation='relu'))
        model.add(
            Conv1D(filters=data[1][best_parameters[1]],
                   kernel_size=data[5][best_parameters[5]],
                   activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling1D(pool_size=2))
        model.add(
            Conv1D(filters=data[2][best_parameters[2]],
                   kernel_size=data[5][best_parameters[5]],
                   activation='relu'))
        model.add(
            Conv1D(filters=data[2][best_parameters[2]],
                   kernel_size=data[5][best_parameters[5]],
                   activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling1D(pool_size=2))
        model.add(
            Conv1D(filters=data[3][best_parameters[3]],
                   kernel_size=data[6][best_parameters[6]],
                   activation='relu'))
        model.add(
            Conv1D(filters=data[3][best_parameters[3]],
                   kernel_size=data[6][best_parameters[6]],
                   activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling1D(pool_size=2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(64, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.25))
        model.add(Dense(32, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.25))
        model.add(Dense(16, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.25))
        model.add(Dense(self.number_of_classes, activation='softmax'))

        model.compile(loss=self.loss,
                      optimizer=tensorflow.keras.optimizers.Adadelta(),
                      metrics=self.metrics)

        model.fit(X_train,
                  np.array(y_train),
                  batch_size=128,
                  epochs=10,
                  verbose=1,
                  validation_data=(self.xx_val, np.array(self.yy_val)))

        # score = model.evaluate(self.xx_test, self.yy_test, verbose=1)[0]
        prediction = model.predict(X)

        if not self.state.one_hot_encoding:
            self.y_test = [x + 1 for x in self.y_test]
            self.y_train = [x + 1 for x in self.y_train]

        if len(prediction.shape) == 2:
            predicted_gt_1 = np.argmax(prediction, axis=1)
            predicted_gt_1_list = list(predicted_gt_1)
            predicted_gt_1_list = [x + 1 for x in predicted_gt_1_list]
        else:
            predicted_gt_1_list = prediction

        for i in self.zero_data:
            predicted_gt_1_list.insert(i, 0)
            # predicted_gt_1_list = np.insert(np.array(predicted_gt_1_list), i, 0)

        # predicted_gt_size = int(math.sqrt(predicted_gt_1_list.shape[0]))

        self.predicted_gt_1_list = np.array(predicted_gt_1_list).reshape(
            self.image_shape[0], self.image_shape[1])
        sp.save_rgb('predicted_gt.jpg',
                    self.predicted_gt_1_list,
                    colors=sp.spy_colors)

        self.predicted_gt = Image.open('predicted_gt.jpg')
        self.predicted_gt = self.predicted_gt.resize((250, 250),
                                                     Image.ANTIALIAS)
        self.predicted_gt = ImageTk.PhotoImage(self.predicted_gt)

        self.output_detail_frame = tk.Frame(self.output_frame)
        self.output_detail_frame.grid(row=0, column=0, sticky='nsew')

        self.accuracy = tk.Label(self.output_detail_frame,
                                 text='Accuracy: ' + str(accuracy),
                                 background='green')
        self.accuracy_0 = tk.Label(self.output_detail_frame,
                                   background='green')
        self.accuracy.grid(row=0, column=0, sticky='nsew')
        self.accuracy_0.grid(row=0, column=1, sticky='nsew')

        for count, i in enumerate(model.layers):
            name = i.output.name.split('/')[0].split('_')
            if len(name) > 2:
                name = name[0] + '_' + name[1]
            else:
                name = name[0]

            self.label1 = tk.Label(self.output_detail_frame, text=str(name))
            self.label2 = tk.Label(self.output_detail_frame,
                                   text=str(i.output.shape))

            self.label1.grid(row=count + 1, column=0, sticky='nsew')
            self.label2.grid(row=count + 1, column=1, sticky='nsew')

        self.output_detail_frame.columnconfigure((0, 1), weight=1)
Esempio n. 29
0
    OA_RES_SS4.append(overall_acc_mss)
    AA_RES_SS4.append(average_acc_mss)

    print("training finished.")
    print('Training Time: ', toc6 - tic6)
    print('Test time:', toc7 - tic7)
    print("# %d Iteration" % (index_iter + 1))
    print('each_acc', each_acc_mss)
    print("oa", overall_acc_mss)
    print("aa", average_acc_mss)
    print("kappa", kappa)

    gt1[test_indices[:-VAL_SIZE]] = pred_test + 1
    gt1 = gt1.reshape(349, 1905)

    gt_IN1 = gt_IN1.reshape(349, 1905)
    save_rgb('houston.jpg', gt1, colors=spy_colors)
    save_rgb('houston-gt.jpg', gt_IN1, colors=spy_colors)
    #
    color = np.array(
        [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [0.5, 0.5, 1], [0.65, 0.35, 1],
         [0.75, 0.5, 0.75], [0.75, 1, 0.5], [0.5, 1, 0.65], [0.65, 0.65, 0], [0.75, 1, 0.65], [0, 0, 0.5], [0, 1, 0.75]])
    # color = color*255
    newcmap = ListedColormap(color)

    view = pyplot.imshow(gt1.astype(int), cmap=newcmap)
    bar = pyplot.colorbar()
    bar.set_ticks(np.linspace(0, 15, 16))

    pyplot.show()
X, y = loadData()

height = y.shape[0]
width = y.shape[1]
PATCH_SIZE = windowSize
numComponents = K

X,pca = applyPCA(X, numComponents=numComponents)

X = padWithZeros(X, PATCH_SIZE//2)

# calculate the predicted image
outputs = np.zeros((height,width))
for i in range(height):
    for j in range(width):
        target = int(y[i,j])
        if target == 0 :
            continue
        else :
            image_patch=Patch(X,i,j)
            X_test_image = image_patch.reshape(1,image_patch.shape[0],image_patch.shape[1], image_patch.shape[2], 1).astype('float32')                                   
            prediction = (model.predict(X_test_image))
            prediction = np.argmax(prediction, axis=1)
            outputs[i][j] = prediction+1

ground_truth = spectral.imshow(classes = y,figsize =(7,7))

predict_image = spectral.imshow(classes = outputs.astype(int),figsize =(7,7))

spectral.save_rgb("predictions.jpg", outputs.astype(int), colors=spectral.spy_colors)