Esempio n. 1
0
def constrain_height(results_folder, label_folder, out_folder):

    if os.path.exists(out_folder) == 0:
        os.mkdir(out_folder)

    glob_path = os.path.join(results_folder, '*AGL.tif')
    files = glob.glob(glob_path)
    height_range = [[-0.49, 0.49], [0, 31], [0, 80], [-0.49, 0.49], [0, 100]]
    for img in files:

        image_name = os.path.split(img)[-1]
        site_name = image_name[0:-7]
        result_my = load_img(img)
        #result_my=convert_labels(result_my,params,toLasStandard=False)
        label_file = image_name.replace('AGL', 'CLS')
        label = load_img(os.path.join(label_folder, label_file))
        label = convert_labels(label, params, toLasStandard=False)

        for i in range(5):
            lds = np.where((result_my < height_range[i][0]) & (label == i))
            result_my[lds] = height_range[i][0]

            lds = np.where((result_my > height_range[i][1]) & (label == i))
            result_my[lds] = height_range[i][1]

        #     result_merge=np.zeros([label.shape[0],label.shape[1]], dtype = 'float32')

        # result_my[label==0]=0.0
        # result_my[label==3]=0.0

        out_path = os.path.join(out_folder, image_name)
        tifffile.imsave(out_path, result_my, compress=6)
Esempio n. 2
0
def GetInputData(img_path,
                 extra_path,
                 work_region=(),
                 convertLab=False,
                 resize2size=(),
                 pad_edge=1,
                 normalize_dsm=False):
    from grss_data import convertMSI2watertreedata
    if img_path[-7:] == 'MSI.tif':
        is_msi = True
    elif img_path[-7:] == 'RGB.tif':
        is_msi = False
    else:
        print('wrong input img_path:', img_path)

    if is_msi:
        img = convertMSI2watertreedata(img_path)
    else:
        img = load_img(img_path)
    if len(extra_path) > 3:
        extra_input = True
        extra_data = load_img(extra_path)
    else:
        extra_input = False
        extra_data = []
    if extra_input:
        if convertLab:
            extra_data = convertLas2Train(extra_data,
                                          params.LABEL_MAPPING_LAS2TRAIN)
        elif normalize_dsm:
            nan_data = np.isnan(extra_data)
            extra_data[nan_data] = 99999
            min_t = extra_data.min()
            extra_data = extra_data - min_t
            extra_data[nan_data] = 0

    if len(work_region) > 0:
        img = img[:, work_region[0]:work_region[1],
                  work_region[2]:work_region[3]]
        if extra_input:
            if (extra_data.shape[0] > work_region[1] - work_region[0]
                    or extra_data.shape[1] > work_region[3] - work_region[2]):
                extra_data = extra_data[:, work_region[0]:work_region[1],
                                        work_region[2]:work_region[3]]

    if len(resize2size) > 0:
        img = cv2.resize(img, resize2size)
        if extra_input:
            extra_data = cv2.resize(extra_data,
                                    resize2size,
                                    interpolation=cv2.INTER_NEAREST)
    return img, extra_data
Esempio n. 3
0
def GetSmallTreeLabel(label_folder, out_folder):
    from skimage import data, util, color, measure
    import matplotlib.pyplot as plt
    #from skimage.measure import label as m_label
    if os.path.exists(out_folder) == 0:
        os.mkdir(out_folder)
    MAX_NUMBER = 4000
    glob_path = os.path.join(label_folder, '*CLS*.tif')
    files = glob.glob(glob_path)

    for img in files:

        image_name = os.path.split(img)[-1]
        site_name = image_name[0:-7]
        label_data = load_img(img)
        bw_img = np.zeros(label_data.shape[:2], np.uint8)
        bw_img[label_data == 1] = 1

        labeled_img, num = measure.label(bw_img,
                                         neighbors=4,
                                         background=0,
                                         return_num=True)

        for i in range(1, num):  # 这里从1开始,防止将背景设置为最大连通域
            totol_num = np.sum(labeled_img == i)
            if totol_num >= MAX_NUMBER:
                bw_img[labeled_img == i] = 3

        # dst = color.label2rgb(bw_img)
        # plt.imshow(dst)
        # plt.show()
        out_path = os.path.join(out_folder, image_name)
        tifffile.imsave(out_path, bw_img, compress=6)
Esempio n. 4
0
def load_img_label_seg(rgb_file, gts_file, extra_file):
    inputs = []
    labels = []
    img_data = load_img(rgb_file)
    label_data = load_img(gts_file)

    img_data = img_data.astype(np.float)
    if len(extra_file) > 3:
        #print('sfd')
        extra_data = load_img(extra_file)
        if extra_file[-9:-6] == 'AGL':
            extra_data[np.isnan(extra_data)] = params.IGNORE_VALUE
        extra_data = extra_data.astype(np.float)
        extra_data = extra_data[:, :, np.newaxis]
        img_data = np.concatenate((img_data, extra_data), axis=-1)
    if gts_file[-9:-6] == 'AGL':
        label_data[np.isnan(label_data)] = params.IGNORE_VALUE
    elif gts_file[-9:-6] == 'CLS':
        label_data = label_data.astype(np.float)
        currLabel = np.array(label_data, np.float)
        currLabel = to_categorical(currLabel, num_classes=NUM_class + 1)
        label_data = currLabel[:, :, 0:-1]
    # label_data[label_data==1] = 0
    # label_data[label_data==2] = 1
    # label_data[label_data==3] = 0
    # label_data[label_data==4] = 2
    # label_data[label_data==5] = 0
    ###currLabel=convertLas2Train(label_data, params.LABEL_MAPPING_LAS2TRAIN)
    if 0:
        import matplotlib.pyplot as plt
        plt.subplot(121)  #用于显示多个子图121代表行、列、位置
        plt.imshow(img_data)
        plt.title('org')
        plt.subplot(122)
        plt.imshow(label_data)
        plt.title('rote90')  #添加标题
        plt.show()

    imageMedium, labelMedium = image_augmentation(img_data, label_data)
    inputs.append(img_data)
    labels.append(label_data)
    inputs.append(imageMedium)
    labels.append(labelMedium)
    return inputs, labels

    return rgb, gts
def calculate_confusion_matrix(yp_folder,yt_folder):
    from sklearn.metrics import confusion_matrix

    glob_path=os.path.join(yp_folder,'*CLS.tif')
    files=glob.glob(glob_path)
    All_confus_matrix=np.zeros((6,6))
    for img in files:
        yp=load_img(img)
        yp=convert_labels(yp,params,toLasStandard=True)
        image_name=os.path.split(img)[-1]
        yt=load_img(os.path.join(yt_folder,image_name))
        yp = np.array(yp)
        yt = np.array(yt)
        yp_num=yp.flatten()
        
        yt_num=yt.flatten()
        c_matrix=confusion_matrix(yt_num,yp_num,labels=[2,5,6,9,17,65])
        All_confus_matrix=All_confus_matrix+c_matrix
    np.savetxt('confusion_matraix.txt', All_confus_matrix)   # X is an array
def convertMSI2watertreedata(msi_path):
    if 1:
        if 1:
            data_folder=os.path.split(msi_path)[-2]
            image_name=os.path.split(msi_path)[-1]
            msi=load_img(msi_path)
            avg_r=(msi[:,:,6]+msi[:,:,7])/2
            ndvi=(avg_r-msi[:,:,4])/(avg_r+msi[:,:,4])
            avg_w=(msi[:,:,0]+msi[:,:,1])/2
            ndwi1=(avg_w-msi[:,:,6])/(avg_w+msi[:,:,6])

            #ndwi2=(avg_w-msi[:,:,7])/(avg_w+msi[:,:,7])

            rgb=load_img(os.path.join(data_folder,image_name.replace('MSI','RGB')))
            itens=np.mean(rgb,axis=-1)/125-1
            data=np.zeros((msi.shape[0],msi.shape[1],3),np.float32)
            data[:,:,0]=itens#ndwi2#itens
            data[:,:,1]=ndvi
            data[:,:,2]=ndwi1
    return data
Esempio n. 7
0
def GenerateTreeWaterLabelandData(data_folder, label_path, labelout_path,
                                  dataout_path):
    if os.path.exists(labelout_path) == 0:
        os.mkdir(labelout_path)
    if os.path.exists(dataout_path) == 0:
        os.mkdir(dataout_path)
    label_folder = os.path.join(data_folder, 'label_patch')
    msi_folder = os.path.join(data_folder, 'msi_patch')
    rgb_folder = os.path.join(data_folder, 'img_patch')
    label_list = os.path.join(data_folder, 'label_list.txt')
    image_names = []
    fp = open(label_list)
    lines = fp.readlines()
    fp.close()
    for line in lines:
        line = line.strip('\n')
        image_names.append(line)

    tree_list_file = os.path.join(data_folder, 'tree_list.txt')
    water_list_file = os.path.join(data_folder, 'water_list.txt')
    text_files = [tree_list_file, water_list_file]

    all_ids = []
    if os.path.exists(tree_list_file) and os.path.exists(water_list_file):
        for i in range(len(text_files)):
            fp = open(text_files[i])
            lines = fp.readlines()
            fp.close()
            ids = []
            for line in lines:
                line = line.strip('\n')
                ids.append(int(line))
            all_ids.extend(ids)
        all_ids = set(all_ids)

        for id in all_ids:
            i = id - 1
            label = load_img(os.path.join(label_path, image_names[i]))
            bw_img = np.zeros(label.shape[:2], np.uint8)
            bw_img[label == 1] = 1
            bw_img[label == 3] = 2
            bw_img[label == 5] = 3
            out_path = os.path.join(labelout_path, image_names[i])
            tifffile.imsave(out_path, bw_img, compress=6)

            msi = load_img(
                os.path.join(msi_folder, image_names[i].replace('CLS', 'MSI')))
            avg_r = (msi[:, :, 6] + msi[:, :, 7]) / 2
            ndvi = (avg_r - msi[:, :, 4]) / (avg_r + msi[:, :, 4])
            avg_w = (msi[:, :, 0] + msi[:, :, 1]) / 2
            ndwi1 = (avg_w - msi[:, :, 6]) / (avg_w + msi[:, :, 6])

            #ndwi2=(avg_w-msi[:,:,7])/(avg_w+msi[:,:,7])

            rgb = load_img(
                os.path.join(rgb_folder, image_names[i].replace('CLS', 'RGB')))
            itens = np.mean(rgb, axis=-1) / 125 - 1
            data = np.zeros((label.shape[0], label.shape[1], 3), np.float32)
            data[:, :, 0] = itens  #ndwi2#itens
            find_nan = np.where(ndvi == ndvi, 1, 0)
            if np.min(find_nan) == 0:
                print('NAN in image:', image_names[i])
            # if len(np.isinf(ndvi))>0:
            #     ccc=0
            ndvi[np.isnan(ndvi)] = 0
            ndvi[np.isinf(ndvi)] = 0
            ndwi1[np.isnan(ndwi1)] = 0
            ndwi1[np.isinf(ndwi1)] = 0
            data[:, :, 1] = ndvi
            data[:, :, 2] = ndwi1
            out_path = os.path.join(dataout_path,
                                    image_names[i].replace('CLS', 'RGB'))
            tifffile.imsave(out_path, data, compress=6)
Esempio n. 8
0
def _load_batch_helper(inputDict):
    """
    Helper for load_cnn_batch that actually loads imagery and supports parallel processing
    :param inputDict: dict containing the data and metadataStats that will be used to load imagery
    :return currOutput: dict with image data, metadata, and the associated label
    """
    #print("fsf")
    #return

    rgb_file = inputDict['rgb']
    gts_file = inputDict['gts']
    extra_file = inputDict['extra']
    num_class = inputDict['num_class']
    inputs = []
    labels = []
    img_data = load_img(rgb_file)
    label_data = load_img(gts_file)
    if gts_file[-9:-6] == 'AGL':
        label_data[np.isnan(label_data)] = params.IGNORE_VALUE
        #label_data[label_data==0]=0.001
    img_data = img_data.astype(np.float)
    if len(extra_file) > 3:
        #print('sfd')
        extra_data = load_img(extra_file)
        if extra_file[-9:-6] == 'AGL':
            extra_data[np.isnan(extra_data)] = 0.001
            #extra_data[extra_data==0]=0.001
        extra_data = extra_data.astype(np.float)

    label_data = label_data.astype(np.float)
    # label_data[label_data==1] = 0
    # label_data[label_data==2] = 1
    # label_data[label_data==3] = 0
    # label_data[label_data==4] = 2
    # label_data[label_data==5] = 3
    ###currLabel=convertLas2Train(label_data, params.LABEL_MAPPING_LAS2TRAIN)
    if 0:
        import matplotlib.pyplot as plt
        plt.subplot(121)  #用于显示多个子图121代表行、列、位置
        plt.imshow(img_data)
        plt.title('org')
        plt.subplot(122)
        plt.imshow(label_data)
        plt.title('rote90')  #添加标题
        plt.show()

    if gts_file[-9:-6] == 'CLS':
        currLabel = np.array(label_data, np.float)
        currLabel = to_categorical(currLabel, num_classes=int(num_class) + 1)
        label_data = currLabel[:, :, 0:-1]
    if len(extra_file) > 3:
        extra_data = extra_data[:, :, np.newaxis]
        label_data = np.concatenate((label_data, extra_data), axis=-1)
    from dataFunctions import image_augmentation, image_augmentation_test
    imageMedium, labelMedium = image_augmentation_test(img_data, label_data)

    if len(extra_file) > 3:
        extra_data_m = labelMedium[:, :, -1]
        extra_data_m = extra_data_m[:, :, np.newaxis]
        img_data = np.concatenate((img_data, extra_data), axis=-1)
        imageMedium = np.concatenate((imageMedium, extra_data_m), axis=-1)
        label_data = label_data[:, :, :-1]
        labelMedium = labelMedium[:, :, :-1]
    inputs.append(img_data)
    labels.append(label_data)
    inputs.append(imageMedium)
    labels.append(labelMedium)
    return inputs, labels
Esempio n. 9
0
def track3_Merge_temparal_results_new(result_folder,
                                      out_folder,
                                      track='track3',
                                      new_merge=True,
                                      if_convert_labels=False,
                                      offset=False,
                                      offset_folder=''):
    site_images = []
    site_names = []
    #back_folder='G:/programs/dfc2019-master/track1/data/validate/track2-beforMerge/'
    if os.path.exists(out_folder) == 0:
        os.makedirs(out_folder)
    else:
        glob_path = os.path.join(out_folder, '*.tif')
        files = glob.glob(glob_path)
        for file in files:
            os.remove(file)
    glob_path = os.path.join(result_folder, '*.tif')
    files = glob.glob(glob_path)
    for img in files:
        image_name = os.path.split(img)[-1]
        if track == 'track3':
            site_name = image_name[0:7]
        elif track == 'track1':
            site_name = image_name[0:11]
        else:
            site_name = image_name[0:-9]

        new_site = True
        for i in range(len(site_names)):
            if site_name == site_names[i]:
                new_site = False
                site_images[i].append(img)
        if new_site:
            site_names.append(site_name)
            site_images.append([img])


#              site_images[len(site_names)-1].append(img)
    NUM_CATEGORIES = params.NUM_CATEGORIES
    for m in range(len(site_names)):
        imgs = site_images[m]
        im = load_img(imgs[0])

        vote_map = np.zeros((im.shape[0], im.shape[1], NUM_CATEGORIES))
        for img_p in imgs:
            im = load_img(img_p)

            #image_name=os.path.split(img_p)[-1]
            #pred_img=np.argmax(im,axis=-1).astype('uint8')
            #out_path=os.path.join(back_folder,image_name)
            #pred_img=convertLas2Train(pred_img, params.LABEL_MAPPING_TRAIN2LAS)
            #tifffile.imsave(out_path,pred_img,compress=6)
            if if_convert_labels:
                im = convertLas2Train(im, params.LABEL_MAPPING_LAS2TRAIN)
            im = to_categorical(im, NUM_CATEGORIES)  ###different merge
            for i in range(vote_map.shape[-1]):
                vote_map[:, :, i] = vote_map[:, :, i] + im[:, :, i]
        #new_merge=False
        if new_merge:
            pred = VoteStrategeMapping(vote_map)
        else:
            pred = np.argmax(vote_map, axis=-1).astype('uint8')

        if offset:
            offset_file = os.path.join(offset_folder,
                                       site_names[m] + '_DSM.txt')
            offset = np.loadtxt(offset_file)
            offset = offset.astype('int')
            pred = pred[offset[1]:offset[1] + 512, offset[0]:offset[0] + 512]

        pred = convert_labels(pred, params, toLasStandard=True).astype('uint8')
        out_path = os.path.join(out_folder, site_names[m] + '_CLS.tif')
        tifffile.imsave(out_path, pred, compress=6)
Esempio n. 10
0
def normalize_image_to_path(img_path,label_path,path_size,overlap_ratio,work_region=[],resize2size=[],extra_input=False, convertLab=False,pad_edge=1,normalize_dsm=1, image_order=0):
    
    img=load_img(img_path)
    if label_path:
        couple_input=True
        label=load_img(label_path)
    else:
        couple_input=False
    if couple_input:
        if convertLab:
            label=convertLas2Train(label, params.LABEL_MAPPING_LAS2TRAIN)
        elif normalize_dsm:
            nan_data=np.isnan(label)
            label[nan_data] = 99999
            min_t=label.min()
            label=label-min_t
            label[nan_data]=0

    if len(work_region)>0:
        img = img[:,work_region[0]:work_region[1],work_region[2]:work_region[3]]
        if couple_input:
            if (label.shape[0]>work_region[1]-work_region[0] or label.shape[1]>work_region[3]-work_region[2]):
                label=label[:,work_region[0]:work_region[1],work_region[2]:work_region[3]]

    if len(resize2size)>0:
        img=cv2.resize(img,resize2size)
        if couple_input:
            label=cv2.resize(label,resize2size,interpolation=cv2.INTER_NEAREST)

    imgs=[]
    labels=[]

    if img.shape[0]==path_size[0] and img.shape[1]==path_size[1]:
        imgs.append(img)
        labels.append(label)
        return imgs,labels,[path_size[0],path_size[1]]
    else:
        if img.shape[0]<path_size[0]:
            padded_img= np.zeros((path_size[0],img.shape[1],img.shape[2]), dtype=img.dtype)
            padded_label= np.ones((path_size[0],img.shape[1],img.shape[2]), dtype=label.dtype)*facade_params.train_background
            padding_step=round((path_size[0]-img.shape[0])/2)
            padded_img[padding_step:padding_step+img.shape[0],:,:]=img
            padded_label[padding_step:padding_step+img.shape[0],:,:]=label
            img=padded_img
            label=padded_label
        if img.shape[1]<path_size[1]:
            padded_img= np.zeros((img.shape[0],path_size[1],img.shape[2]), dtype=img.dtype)
            padded_label= np.ones((img.shape[0],path_size[1],img.shape[2]), dtype=label.dtype)*facade_params.train_background
            padding_step=round((path_size[1]-img.shape[1])/2)
            padded_img[:,padding_step:padding_step+img.shape[1],:]=img
            padded_label[:,padding_step:padding_step+img.shape[1],:]=label
            img=padded_img
            label=padded_label

    rows=img.shape[0]
    cols=img.shape[1]

    patch_ranges=calculate_cut_range([rows,cols], patch_size=path_size,overlap=overlap_ratio)    
    for inds in range(len(patch_ranges)):
        y_s=round(patch_ranges[inds][0])
        y_e=round(patch_ranges[inds][1])
        x_s=round(patch_ranges[inds][2])
        x_e=round(patch_ranges[inds][3])
        img_patch=img[int(y_s):int(y_e),int(x_s):int(x_e)]
        imgs.append(img_patch)
        if couple_input:
            label_patch=label[int(y_s):int(y_e),int(x_s):int(x_e)]
            labels.append(label_patch)
    return imgs,labels,[rows,cols]