def skeletonize(path_input: str, path_output: str) -> None: """ It takes binary raster as input and mark its central line. This central line is represents singel line raster instead of thick line. Input: path_input: Input path of binary raster to be processed path_output: Output path of raster to be saved Output: None (Data is automatically save to given path_output location) """ filter = config.skeletonize_filter geotransform, geoprojection, size, arr = io.read_tif(path_input) """Array input must be binary Output array is also binary """ arr[arr > 0] = 1 dilate_kernel = np.ones((filter, filter), np.uint8) arr = cv2.dilate(arr, dilate_kernel) skeleton = skt(arr) logging.info('Saving skeleton to %s' % (path_output)) io.write_tif(path_output, skeleton * 255, geotransform, geoprojection, size) return path_output
def erosion(path_input: str, filter: int, path_output: str) -> None: """ Input : path_input: Input path of TIF to be processed filter: Size Erosion Filer to be used to clean image path_output: Ouput path of TIF to be saved output: None (Data is automatically save to given path_output location) """ erode_kernel = np.ones((filter, filter), np.uint8) geotransform, geoprojection, size, arr = io.read_tif(path_input) # Image erosion erode = cv2.erode(arr, erode_kernel) # Image dilation dilate_kernel = np.ones((filter, filter), np.uint8) dilate = cv2.dilate(erode, dilate_kernel) # removing smaller pixels cell_size = geotransform[1] min_area = 9 # 9 sq.metres num_pixel = int(min_area / cell_size * cell_size) dilate = np.asarray(dilate, dtype=int) dilate = np.absolute(dilate) cleaned = remove_small_objects(dilate, min_size=num_pixel, connectivity=2) logging.info('Saving erosion to %s' % (path_output)) io.write_tif(path_output, cleaned, geotransform, geoprojection, size) return path_output
def watershedSegmentation(path_input: str, filter: int, path_output: str) -> None: """ Input: path_input: Input path of TIF to be processed filter: Size Erosion Filer to be used to clean image path_output: Ouput path of TIF to be saved Output: None (Output is automatically save to path_output location given) """ geotransform, geoprojection, size, array = io.read_tif(path_input) """ Minimum distance between two objects is 7.5m distance = 5/cell_size """ dim_array = array.shape if len(dim_array) > 2: depth = dim_array[2] else: depth = 1 labels = np.zeros(array.shape) for i in range(depth): try: arr = array[:, :, i] except Exception as e: arr = array[:, :] distance = int(config.minimum_distance_watershed / geotransform[1]) D = ndi.distance_transform_edt(arr) localMax = peak_local_max(D, indices=False, min_distance=distance, labels=arr) # 4 Connected pixels, we can also use 8 connected pixels if int(filter) == 4: filter = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] elif int(filter) == 8: filter = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] filter = np.asarray(filter) # markers = ndimage.label(localMax, structure=filter)[0] markers = ndi.label(localMax, structure=filter)[0] try: labels[:, :, i] = watershed(-D, markers, mask=arr) except Exception as e: labels = watershed(-D, markers, mask=arr) logging.info('Saving watershed segmentation to %s' % (path_output)) io.write_tif(path_output, labels, geotransform, geoprojection, size)
def skeletonize(path_image, path_output): filter = 5 geotransform, geoprojection, size, arr = io.read_tif(path_image) """Array input must be binary Output array is also binary """ arr[arr > 0] = 1 dilate_kernel = np.ones((filter, filter), np.uint8) arr = cv2.dilate(arr, dilate_kernel) skeleton = skt(arr) print('Saving skeleton to %s' % (path_output)) io.write_tif(path_output, skeleton * 255, geotransform, geoprojection, size) return path_output
def waterseg(path_image, filter, path_output): geotransform, geoprojection, size, array = io.read_tif(path_image) """ Minimum distance between two objects is 5m. distance = 5/cell_size """ dim_array = array.shape if len(dim_array) > 2: depth = dim_array[2] else: depth = 1 labels = np.zeros(array.shape) for i in range(depth): try: arr = array[:, :, i] except: arr = array[:, :] distance = int(7.5 / geotransform[1]) D = ndi.distance_transform_edt(arr) localMax = peak_local_max(D, indices=False, min_distance=distance, labels=arr) # 4 Connected pixels, we can also use 8 connected pixels if int(filter) == 4: filter = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] elif int(filter) == 8: filter = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] filter = np.asarray(filter) # markers = ndimage.label(localMax, structure=filter)[0] markers = ndi.label(localMax, structure=filter)[0] try: labels[:, :, i] = watershed(-D, markers, mask=arr) except: labels = watershed(-D, markers, mask=arr) print('Saving watershed segmentation to %s' % (path_output)) io.write_tif(path_output, labels, geotransform, geoprojection, size) return path_output
def erosion(path_image, filter, path_output): erode_kernel = np.ones((filter, filter), np.uint8) geotransform, geoprojection, size, arr = io.read_tif(path_image) # Image erosion erode = cv2.erode(arr, erode_kernel) # Image dilation dilate_kernel = np.ones((filter, filter), np.uint8) dilate = cv2.dilate(erode, dilate_kernel) # removing smaller pixels cell_size = geotransform[1] min_area = 9 # 9 sq.metres num_pixel = int(min_area / cell_size * cell_size) dilate = np.asarray(dilate, dtype=int) dilate = np.absolute(dilate) cleaned = remove_small_objects(dilate, min_size=num_pixel, connectivity=2) print('Saving erosion to %s' % (path_output)) io.write_tif(path_output, cleaned, geotransform, geoprojection, size) return path_output
print('Saving Prediction...') predict_image = [] for i in range(predict_result.shape[0]): # im = train_images[i] lb = predict_result[i, :, :, :] lb = np.round(lb, decimals=0) path_im = join( path_predict, basename(normpath(dirname(train_set.image_part_list[k][i]))), basename(data['name'][i])) checkdir(os.path.dirname(path_im)) predict_image.append(path_im) # Saving data to disk current_process.append('saving_prediction') io.write_tif(path_im, lb * 255, data['geotransform'][i], data['geoprojection'][i], data['size'][i]) current_process.append('saving_prediction') # Flushing all the memory train_image = [] predict_result = [] lb = [] timing['Processing'] = mtime.time() - st_time # Merging tiled dataset to single tif time = mtime.time() print('Merging and compressing %s tiled dataset. This may take a while' % (str(train_set.count))) current_process.append('merging')
predict_image = [] # Iterating over predictions and saving it to geoReferenced TIF files temp_listPrediction = [] for i in range(len(testing_list_ids)): file_name = os.path.basename(testing_geoMap[i]['path']) labelPrediction = predictResult[i, :, :, :] # Setting 0.5 as threshold labelPrediction = np.round(labelPrediction, decimals=0) temp_path_output = os.path.join(temp_path_predict, file_name) # Saving data to disk io.write_tif(temp_path_output, labelPrediction * 255, testing_geoMap[i]['geoTransform'], testing_geoMap[i]['geoProjection'], testing_geoMap[i]['size']) temp_listPrediction.append(temp_path_output) timing['Processing'] = mtime.time() - st_time # Merging Gridded dataset to single TIF time = mtime.time() logging.info('Merging and compressing gridded dataset: {}. \ Total number of files: {}. This may take a while'.format( temp_path_data, len(temp_listPrediction))) temp_merged_output = os.path.join(path_merged_prediction, file) io.mergeTile(listTIF=temp_listPrediction, path_output=temp_merged_output)