def assess_radiometric_quality(metadata, calc_quality='high', score=False): if calc_quality == 'high': scl = raster_to_array(metadata['path']['20m']['SCL']).astype('intc') aot = raster_to_array(metadata['path']['20m']['AOT']).astype('intc') band_02 = raster_to_array( metadata['path']['20m']['B02']).astype('intc') band_12 = raster_to_array( metadata['path']['20m']['B12']).astype('intc') band_cldprb = raster_to_array(metadata['path']['QI']['CLDPRB_20m']) distance = 63 else: scl = raster_to_array(metadata['path']['60m']['SCL']).astype('intc') aot = raster_to_array(metadata['path']['60m']['AOT']).astype('intc') band_cldprb = raster_to_array(metadata['path']['QI']['CLDPRB_60m']) band_02 = raster_to_array( metadata['path']['60m']['B02']).astype('intc') band_12 = raster_to_array( metadata['path']['60m']['B12']).astype('intc') distance = 21 kernel_nodata = create_kernel(201, weighted_edges=False, weighted_distance=False, normalise=False).astype('uint8') # Dilate nodata values by 1km each side nodata_dilated = cv2.dilate((scl == 0).astype('uint8'), kernel_nodata).astype('intc') darkprb = np.zeros(scl.shape) darkprb = np.where(scl == 2, 55, 0) darkprb = np.where(scl == 3, 45, darkprb).astype('uint8') darkprb = cv2.GaussianBlur(darkprb, (distance, distance), 0).astype(np.double) band_cldprb = cv2.GaussianBlur(band_cldprb, (distance, distance), 0).astype(np.double) quality = np.zeros(scl.shape, dtype=np.double) td = 0.0 if score is True else metadata['time_difference'] / 86400 # OBS: the radiometric_quality functions mutates the quality input. combined_score = radiometric_quality(scl, band_02, band_12, band_cldprb, darkprb, aot, nodata_dilated, quality, td, metadata['SUN_ELEVATION']) if score is True: return combined_score blur_dist = 31 quality_blurred = cv2.GaussianBlur(quality, (blur_dist, blur_dist), 0).astype(np.double) return quality_blurred, scl
def super_sample_s2(B04_link, B08_link, B05_link=None, B06_link=None, B07_link=None, B8A_link=None, out_folder='../raster/', prefix='', suffix=''): assert(isinstance(B05_link, str) or isinstance(B06_link, str) or isinstance(B07_link, str) or isinstance(B8A_link, str)) paths = { 'B04': B04_link, 'B05': B05_link, 'B06': B06_link, 'B07': B07_link, 'B08': B08_link, 'B8A': B8A_link, } bands = { 'B04': raster_to_array(B04_link).astype('float32'), 'B05': raster_to_array(B05_link).astype('float32') if B05_link is not None else False, 'B06': raster_to_array(B06_link).astype('float32') if B06_link is not None else False, 'B07': raster_to_array(B07_link).astype('float32') if B07_link is not None else False, 'B08': raster_to_array(B08_link).astype('float32'), 'B8A': raster_to_array(B8A_link).astype('float32') if B8A_link is not None else False, } bands_to_pansharpen = [] if bands['B05'] is not False: bands_to_pansharpen.append('B05') if bands['B06'] is not False: bands_to_pansharpen.append('B06') if bands['B07'] is not False: bands_to_pansharpen.append('B07') if bands['B8A'] is not False: bands_to_pansharpen.append('B8A') for band_x in bands_to_pansharpen: if band_x is 'B05': pseudo_band = 'B04' else: pseudo_band = 'B08' pseudo_path = os.path.join(out_folder, f'{prefix}{band_x}{suffix}_pseudo.tif') array_to_raster(bands[pseudo_band], reference_raster=paths[pseudo_band], out_raster=pseudo_path) low_res_10m = raster_to_array(resample(paths[band_x], reference_raster=paths[pseudo_band])).astype('float32') resampled_path = os.path.join(out_folder, f'{prefix}{band_x}{suffix}_resampled.tif') array_to_raster(low_res_10m, reference_raster=paths[pseudo_band], out_raster=resampled_path) low_res_10m = None pansharpened_path = os.path.join(out_folder, f'{prefix}{band_x}{suffix}_float.tif') pansharpen(pseudo_path, resampled_path, pansharpened_path) os.remove(resampled_path) os.remove(pseudo_path)
import sys sys.path.append('..') from matplotlib import pyplot as plt from lib.raster_io import raster_to_array folder = 'C:\\Users\\caspe\\Desktop\\Data\\Sentinel1\\' in_raster = f'{folder}accra_s1.tif' out_figure = f'{folder}accra_s1_figure.png' data = raster_to_array(in_raster) fig, ax = plt.subplots() ax.axis('off') im = ax.imshow(data, cmap='viridis', interpolation=None, vmin=0, vmax=1) # vmin and vmax control interpolation plt.colorbar(im, shrink=0.68) fig.tight_layout() fig.savefig(out_figure, transparent=True, dpi=300, papertype=None, bbox_inches='tight', pad_inches=0)
def mosaic_tile( list_of_SAFE_images, out_dir, out_name='mosaic', dst_projection=None, feather=True, target_quality=100, threshold_change=0.5, threshold_quality=10.0, feather_dist=21, feather_scl=5, filter_tracking=True, match_mean=True, allow_nodata=False, max_days=120, max_images_include=15, max_images_search=25, output_scl=True, output_tracking=True, output_quality=False, verbose=True, ): start_time = time() # Verify input assert isinstance( list_of_SAFE_images, list ), "list_of_SAFE_images is not a list. [path_to_safe_file1, path_to_safe_file2, ...]" assert isinstance(out_dir, str), f"out_dir is not a string: {out_dir}" assert isinstance(out_name, str), f"out_name is not a string: {out_name}" assert len(list_of_SAFE_images ) > 1, "list_of_SAFE_images is empty or only a single image." if verbose: print('Selecting best image..') metadata = prepare_metadata(list_of_SAFE_images) # Sorted by best, so 0 is the best one. best_image = metadata[0] best_image_name = best_image['name'] if verbose: print(f'Selected: {best_image_name} {out_name}') if verbose: print('Preparing base image..') master_quality, master_scl = assess_radiometric_quality(best_image) tracking_array = np.zeros(master_quality.shape, dtype='uint8') if match_mean is True: metadata[0]['scl'] = np.copy(master_scl) time_limit = (max_days * 86400) master_quality_avg = (master_quality.sum() / master_quality.size) i = 1 # The 0 index is for the best image processed_images_indices = [0] # Loop the images and update the tracking array (SYNTHESIS) if verbose: print( f'Initial. tracking array: (quality {round(master_quality_avg, 2)}%) (0/{max_days} days) (goal {target_quality}%)' ) while ((master_quality_avg < target_quality) and i < len(metadata) - 1 and len(processed_images_indices) <= max_images_include): if (metadata[i]['time_difference'] > time_limit): i += 1 continue if (i >= max_images_search): if (master_scl == 0).sum() == 0 or allow_nodata is True: break if verbose: print( 'Continuing dispite reaching max_images_search as there is still nodata in tile..' ) # Time difference td = int(round(metadata[i]['time_difference'] / 86400, 0)) # Assess quality of current image quality, scl = assess_radiometric_quality(metadata[i]) # Calculate changes. Always update nodata. change_mask = (quality > master_quality) | ((master_scl == 0) & (scl != 0)) percent_change = (change_mask.sum() / change_mask.size) * 100 # Calculate the global change in quality quality_global = np.where(change_mask, quality, master_quality) quality_global_avg = quality_global.sum() / quality_global.size quality_global_change = quality_global_avg - master_quality_avg if ((percent_change > threshold_change) and (quality_global_change > threshold_change)): # Udpdate the trackers tracking_array = np.where(change_mask, i, tracking_array).astype('uint8') master_scl = np.where(change_mask, scl, master_scl).astype('intc') master_quality = np.where(change_mask, quality, master_quality).astype(np.double) master_quality_avg = quality_global_avg # Save the scene classification in memory. This cost a bit of RAM but makes harmonisation much faster.. metadata[i]['scl'] = scl.astype('uint8') # Append to the array that keeps track on which images are used in the synth process.. processed_images_indices.append(i) img_name = metadata[i]['name'] if verbose: print( f'Updating tracking array: (quality {round(master_quality_avg, 2)}%) ({td}/{max_days} days) (goal {target_quality}%) (name {img_name})' ) else: if verbose: print( f'Skipping image due to low change.. ({round(threshold_change, 3)}% threshold) ({td}/{max_days} days)' ) i += 1 # Free memory change_mask = None change_mask_inv = None quality_global = None quality = None scl = None # Only merge images if there are more than one. multiple_images = len(processed_images_indices) > 1 if match_mean is True and multiple_images is True: if verbose: print('Harmonising layers..') total_counts = 0 counts = [] weights = [] for i in processed_images_indices: metadata[i]['stats'] = {'B02': {}, 'B03': {}, 'B04': {}, 'B08': {}} pixel_count = (tracking_array == i).sum() total_counts += pixel_count counts.append(pixel_count) for i in range(len(processed_images_indices)): w = counts[i] / total_counts weights.append(w) medians = {'B02': [], 'B03': [], 'B04': [], 'B08': []} medians_4 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} medians_5 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} medians_6 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds_4 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds_5 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds_6 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} for v, i in enumerate(processed_images_indices): layer_mask_4 = metadata[i]['scl'] != 4 layer_mask_4_sum = (layer_mask_4 == False).sum() layer_mask_5 = metadata[i]['scl'] != 5 layer_mask_5_sum = (layer_mask_5 == False).sum() layer_mask_6 = metadata[i]['scl'] != 6 layer_mask_6_sum = (layer_mask_6 == False).sum() layer_mask = (layer_mask_4 | layer_mask_5 | layer_mask_6 | (metadata[i]['scl'] == 7)) == False for band in ['B02', 'B03', 'B04', 'B08']: if band == 'B08': array = raster_to_array( resample(metadata[i]['path']['10m'][band], reference_raster=metadata[i]['path']['20m'] ['B02'])) else: array = raster_to_array(metadata[i]['path']['20m'][band]) calc_array = np.ma.array(array, mask=layer_mask) calc_array_4 = np.ma.array(array, mask=layer_mask_4) calc_array_5 = np.ma.array(array, mask=layer_mask_5) calc_array_6 = np.ma.array(array, mask=layer_mask_6) med, mad = madstd(calc_array) if layer_mask_4_sum > 1000: med_4, mad_4 = madstd(calc_array_4) else: med_4, mad_4 = madstd(calc_array) if layer_mask_5_sum > 1000: med_5, mad_5 = madstd(calc_array_5) else: med_5, mad_5 = madstd(calc_array) if layer_mask_6_sum > 1000: med_6, mad_6 = madstd(calc_array_6) else: med_6, mad_6 = madstd(calc_array) if med == 0 or mad == 0: med, mad = madstd(array) if med_4 == 0 or mad_4 == 0: med_4, mad_4 = (med, mad) if med_5 == 0 or mad_5 == 0: med_5, mad_5 = (med, mad) if med_6 == 0 or mad_6 == 0: med_6, mad_6 = (med, mad) medians[band].append(med) medians_4[band].append(med_4) medians_5[band].append(med_5) medians_6[band].append(med_6) madstds[band].append(mad) madstds_4[band].append(mad_4) madstds_5[band].append(mad_5) madstds_6[band].append(mad_6) targets_median = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_median_4 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_median_5 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_median_6 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd_4 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd_5 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd_6 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} for band in ['B02', 'B03', 'B04', 'B08']: targets_median[band] = np.average(medians[band], weights=weights) targets_median_4[band] = np.average(medians_4[band], weights=weights) targets_median_5[band] = np.average(medians_5[band], weights=weights) targets_median_6[band] = np.average(medians_6[band], weights=weights) targets_madstd[band] = np.average(madstds[band], weights=weights) targets_madstd_4[band] = np.average(madstds_4[band], weights=weights) targets_madstd_5[band] = np.average(madstds_5[band], weights=weights) targets_madstd_6[band] = np.average(madstds_6[band], weights=weights) for v, i in enumerate(processed_images_indices): for band in ['B02', 'B03', 'B04', 'B08']: metadata[i]['stats'][band]['src_median'] = medians[band][ v] if medians[band][v] > 0 else targets_median[band] metadata[i]['stats'][band]['src_median_4'] = medians_4[band][ v] if medians_4[band][v] > 0 else targets_median_4[band] metadata[i]['stats'][band]['src_median_5'] = medians_5[band][ v] if medians_5[band][v] > 0 else targets_median_5[band] metadata[i]['stats'][band]['src_median_6'] = medians_6[band][ v] if medians_6[band][v] > 0 else targets_median_6[band] metadata[i]['stats'][band]['src_madstd'] = madstds[band][ v] if madstds[band][v] > 0 else targets_madstd[band] metadata[i]['stats'][band]['src_madstd_4'] = madstds_4[band][ v] if madstds_4[band][v] > 0 else targets_madstd_4[band] metadata[i]['stats'][band]['src_madstd_5'] = madstds_5[band][ v] if madstds_5[band][v] > 0 else targets_madstd_5[band] metadata[i]['stats'][band]['src_madstd_6'] = madstds_6[band][ v] if madstds_6[band][v] > 0 else targets_madstd_6[band] metadata[i]['stats'][band]['target_median'] = targets_median[ band] metadata[i]['stats'][band][ 'target_median_4'] = targets_median_4[band] metadata[i]['stats'][band][ 'target_median_5'] = targets_median_5[band] metadata[i]['stats'][band][ 'target_median_6'] = targets_median_6[band] metadata[i]['stats'][band]['target_madstd'] = targets_madstd[ band] metadata[i]['stats'][band][ 'target_madstd_4'] = targets_madstd_4[band] metadata[i]['stats'][band][ 'target_madstd_5'] = targets_madstd_5[band] metadata[i]['stats'][band][ 'target_madstd_6'] = targets_madstd_6[band] # Clear memory of scl images for j in range(len(metadata)): metadata[j]['scl'] = None if output_tracking is True: array_to_raster(tracking_array.astype('uint8'), reference_raster=best_image['path']['20m']['B04'], out_raster=os.path.join(out_dir, f"tracking_{out_name}.tif"), dst_projection=dst_projection) if output_scl is True: array_to_raster(master_scl.astype('uint8'), reference_raster=best_image['path']['20m']['B04'], out_raster=os.path.join(out_dir, f"scl_{out_name}.tif"), dst_projection=dst_projection) if output_quality is True: array_to_raster(master_quality.astype('float32'), reference_raster=best_image['path']['20m']['B04'], out_raster=os.path.join(out_dir, f"quality_{out_name}.tif"), dst_projection=dst_projection) # Resample scl and tracking array tracking_array = raster_to_array( resample(array_to_raster( tracking_array, reference_raster=best_image['path']['20m']['B04']), reference_raster=best_image['path']['10m']['B04'])) master_scl = raster_to_array( resample(array_to_raster( master_scl, reference_raster=best_image['path']['20m']['B04']), reference_raster=best_image['path']['10m']['B04'])) # Run a mode filter on the tracking array if filter_tracking is True and multiple_images is True: if verbose: print('Filtering tracking array..') tracking_array = mode_filter(tracking_array, 7).astype('uint8') # Feather the edges between joined images (ensure enough valid pixels are on each side..) if feather is True and multiple_images is True: feathers = {} print('Precalculating classification feathers..') feather_rest = feather_s2_filter( master_scl, np.array([0, 1, 2, 3, 7, 8, 9, 10, 11], dtype='intc'), feather_scl).astype('float32') feather_4 = feather_s2_filter(master_scl, np.array([4], dtype='intc'), feather_scl).astype('float32') feather_5 = feather_s2_filter(master_scl, np.array([5], dtype='intc'), feather_scl).astype('float32') feather_6 = feather_s2_filter(master_scl, np.array([6], dtype='intc'), feather_scl).astype('float32') if verbose: print('Precalculating inter-layer feathers..') for i in processed_images_indices: feathers[str(i)] = feather_s2_filter( tracking_array, np.array([i], dtype='intc'), feather_dist).astype('float32') if match_mean is True and feather is False and len( processed_images_indices) > 1: mask_4 = (master_scl == 4) mask_5 = (master_scl == 5) mask_6 = (master_scl == 6) mask_rest = (master_scl != 4) & (master_scl != 5) & (master_scl != 6) bands_to_output = ['B02', 'B03', 'B04', 'B08'] if verbose: print('Merging band data..') for band in bands_to_output: if verbose: print(f'Writing: {band}..') base_image = raster_to_array( metadata[0]['path']['10m'][band]).astype('float32') for i in processed_images_indices: if match_mean and len(processed_images_indices) > 1: src_med = metadata[i]['stats'][band]['src_median'] src_med_4 = metadata[i]['stats'][band]['src_median_4'] src_med_5 = metadata[i]['stats'][band]['src_median_5'] src_med_6 = metadata[i]['stats'][band]['src_median_6'] src_mad = metadata[i]['stats'][band]['src_madstd'] src_mad_4 = metadata[i]['stats'][band]['src_madstd_4'] src_mad_5 = metadata[i]['stats'][band]['src_madstd_5'] src_mad_6 = metadata[i]['stats'][band]['src_madstd_6'] target_med = metadata[i]['stats'][band]['target_median'] target_med_4 = metadata[i]['stats'][band]['target_median_4'] target_med_5 = metadata[i]['stats'][band]['target_median_5'] target_med_6 = metadata[i]['stats'][band]['target_median_6'] target_mad = metadata[i]['stats'][band]['target_madstd'] target_mad_4 = metadata[i]['stats'][band]['target_madstd_4'] target_mad_5 = metadata[i]['stats'][band]['target_madstd_5'] target_mad_6 = metadata[i]['stats'][band]['target_madstd_6'] if i == 0: if match_mean and len(processed_images_indices) > 1: dif = base_image - src_med dif_4 = base_image - src_med_4 dif_5 = base_image - src_med_5 dif_6 = base_image - src_med_6 if feather is True and len(processed_images_indices) > 1: base_image = (((dif * target_mad) / src_mad) + target_med) * feather_rest base_image = np.add( base_image, (((dif_4 * target_mad_4) / src_mad_4) + target_med_4) * feather_4) base_image = np.add( base_image, (((dif_5 * target_mad_5) / src_mad_5) + target_med_5) * feather_5) base_image = np.add( base_image, (((dif_6 * target_mad_6) / src_mad_6) + target_med_6) * feather_6) else: base_image_rest = ( (dif * target_mad) / src_mad) + target_med base_image_4 = ( (dif_4 * target_mad_4) / src_mad_4) + target_med_4 base_image_5 = ( (dif_5 * target_mad_5) / src_mad_5) + target_med_5 base_image_6 = ( (dif_6 * target_mad_6) / src_mad_6) + target_med_6 base_image = np.where(mask_rest, base_image_rest, base_image) base_image = np.where(mask_4, base_image_4, base_image) base_image = np.where(mask_5, base_image_5, base_image) base_image = np.where(mask_6, base_image_6, base_image) base_image = np.where(base_image >= 0, base_image, 0) if feather is True and len(processed_images_indices) > 1: base_image = base_image * feathers[str(i)] else: add_band = raster_to_array( metadata[i]['path']['10m'][band]).astype('float32') if match_mean: dif = add_band - src_med dif_4 = add_band - src_med_4 dif_5 = add_band - src_med_5 dif_6 = add_band - src_med_6 if feather is True: add_band = (((dif * target_mad) / src_mad) + target_med) * feather_rest add_band = np.add( add_band, (((dif_4 * target_mad_4) / src_mad_4) + target_med_4) * feather_4) add_band = np.add( add_band, (((dif_5 * target_mad_5) / src_mad_5) + target_med_5) * feather_5) add_band = np.add( add_band, (((dif_6 * target_mad_6) / src_mad_6) + target_med_6) * feather_6) else: add_band_rest = ( (dif * target_mad) / src_mad) + target_med add_band_4 = ( (dif_4 * target_mad_4) / src_mad_4) + target_med_4 add_band_5 = ( (dif_5 * target_mad_5) / src_mad_5) + target_med_5 add_band_6 = ( (dif_6 * target_mad_6) / src_mad_6) + target_med_6 add_band = np.where(mask_rest, add_band_rest, add_band) add_band = np.where(mask_4, add_band_4, add_band) add_band = np.where(mask_5, add_band_5, add_band) add_band = np.where(mask_6, add_band_6, add_band) add_band = np.where(add_band >= 0, add_band, 0) if feather is True: base_image = np.add(base_image, (add_band * feathers[str(i)])) else: base_image = np.where(tracking_array == i, add_band, base_image).astype('float32') array_to_raster(np.rint(base_image).astype('uint16'), reference_raster=best_image['path']['10m'][band], out_raster=os.path.join(out_dir, f"{band}_{out_name}.tif"), dst_projection=dst_projection) if verbose: print(f'Completed mosaic in: {round((time() - start_time) / 60, 1)}m')
import sys; sys.path.append('..') import numpy as np from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import mean_filter, median_filter, standard_deviation_filter, cdef_filter folder = '/mnt/c/users/caspe/desktop/data/satf_preprocess/' b16_path = folder + '16-02-2019_crop.tif' b22_path = folder + '22-02-2019_crop.tif' b16 = raster_to_array(b16_path) b22 = raster_to_array(b22_path) maxz = raster_to_array(folder + '__max_z.tif') array_to_raster(cdef_filter(maxz), reference_raster=b16_path, out_raster=folder + '__max_z_cdf.tif') # b16_mean = mean_filter(b16, 15) # b16_median = median_filter(b16, 15) # b22_mean = mean_filter(b22, 15) # b22_median = median_filter(b22, 15) # b16_std = standard_deviation_filter(b16, 15) # b22_std = standard_deviation_filter(b22, 15) # b16_22_median_difference = np.abs(b16_median - b22_median) # b16_mean_after_difference = b16_mean - (b16_22_median_difference - b16_mean) # b22_mean_after_difference = b22_mean - (b16_22_median_difference - b22_mean) # with np.errstate(divide='ignore', invalid='ignore'): # b16_zscore = np.abs((b16_mean - b16_mean_after_difference) / b16_std) # b16_zscore[b16_std == 0] = 0 # b22_zscore = np.abs((b22_mean - b22_mean_after_difference) / b22_std)
import sys; sys.path.append('..') import numpy as np from lib.raster_io import raster_to_array, array_to_raster base = '/mnt/c/Users/caspe/Desktop/Projects/multicriteriaAnalysis_vejdirektorat/' layers = np.array([ raster_to_array(f'{base}andrekomplan2.tif', fill_value=0, src_nodata=0, filled=True) * 4 * 0.013, raster_to_array(f'{base}unesco_area2.tif', fill_value=0, src_nodata=0, filled=True) * 8 * 0.169, raster_to_array(f'{base}unesco_buf2.tif', fill_value=0, src_nodata=0, filled=True) * 1 *0.086, raster_to_array(f'{base}besnatur2.tif', fill_value=0, src_nodata=0, filled=True) * 6 * 0.038, raster_to_array(f'{base}besnat_sammen2.tif', fill_value=0, src_nodata=0, filled=True) * 8 * 0.049, raster_to_array(f'{base}fundarealbesk2.tif', fill_value=0, src_nodata=0, filled=True) * 5 * 0.019, raster_to_array(f'{base}fundfortid2.tif', fill_value=0, src_nodata=0, filled=True) * 9 * 0.089, raster_to_array(f'{base}fundbesk_sam2.tif', fill_value=0, src_nodata=0, filled=True) * 7 * 0.026, raster_to_array(f'{base}bygn_fred2.tif', fill_value=0, src_nodata=0, filled=True) * 6 * 0.067, raster_to_array(f'{base}boligo500100012.tif', fill_value=0, src_nodata=0, filled=True) * 4 * 0.010, raster_to_array(f'{base}foreneligfred2.tif', fill_value=0, src_nodata=0, filled=True) * 2 * 0.011, raster_to_array(f'{base}boligomr_komplan000500mbuff_v2.tif', fill_value=0, src_nodata=0, filled=True) * 8 * 0.182, raster_to_array(f'{base}fred_fredforslag_v2.tif', fill_value=0, src_nodata=0, filled=True) * 9 * 0.084, raster_to_array(f'{base}natura2000_korr_v2.tif', fill_value=0, src_nodata=0, filled=True) * 10 * 0.237, ]) array_to_raster( np.sum(layers, axis=0), out_raster=f'{base}multicriteria_victor2.tif', reference_raster=f'{base}andrekomplan2.tif', src_nodata=0, dst_nodata=None, )
def super_sample_bands(self, bands=['B05', 'B06', 'B07', 'B8A']): try: temp_dir = self._make_temp_dir() B4 = self.get_raw_image('B04', 10) B8 = self.get_raw_image('B08', 10) B4_arr = raster_to_array(B4) B8_arr = raster_to_array(B8) if 'B8A' in bands: # Test if resampled version already exists band_potential_path = self.get_custom_image( f'{self.metadata["basename"]}_B8A_rs_10m') band_exists = os.path.exists(band_potential_path) if band_potential_path is not False and band_exists is True: resampled_name = band_potential_path else: resampled_name = os.path.join( temp_dir, f'{self.metadata["basename"]}_B8A_rs_10m.tif') resample(self.get_raw_image('B8A', 20), reference_raster=B8, out_raster=resampled_name) pansharpened_name = os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_B8A_ss_10m.tif') pansharpen(B8, resampled_name, pansharpened_name, out_datatype='uint16') band_arrays = {} for band in bands: # Special case, will be handed at end of function. if band == 'B8A': continue band_path = self.get_raw_image(band, 20) band_array = raster_to_array(band_path) B4_distance = self.s2_spectral_profile[band][ 'edge_bot'] - self.s2_spectral_profile['B04']['edge_top'] B8_distance = self.s2_spectral_profile['B08'][ 'edge_bot'] - self.s2_spectral_profile[band]['edge_top'] distance_sum = B4_distance + B8_distance B4_weight = 1 - (B4_distance / distance_sum) B8_weight = 1 - (B8_distance / distance_sum) ratio = np.add(np.multiply(B4_arr, B4_weight), np.multiply(B8_arr, B8_weight)) # Test if resampled version already exists band_potential_path = self.get_custom_image( f'{self.metadata["basename"]}_{band}_rs_10m') band_exists = os.path.exists(band_potential_path) if band_potential_path is not False and band_exists is True: resampled_name = band_potential_path else: resampled_name = os.path.join( temp_dir, f'{self.metadata["basename"]}_{band}_rs_10m.tif') resample(band_path, reference_raster=B8, out_raster=resampled_name) ratio_name = os.path.join( temp_dir, f'{self.metadata["basename"]}_{band}_ratio_10m.tif') array_to_raster(ratio, reference_raster=B8, out_raster=ratio_name) pansharpened_name = os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_{band}_ss_10m.tif') pansharpen(ratio_name, resampled_name, pansharpened_name, out_datatype='uint16') resample(self.get_raw_image('B11', 20), reference_raster=B8, out_raster=os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_B11_ss_10m.tif')) resample(self.get_raw_image('B12', 20), reference_raster=B8, out_raster=os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_B12_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B02', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B02_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B03', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B03_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B04', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B04_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B04', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B08_ss_10m.tif')) finally: self.update_custom() shutil.rmtree(temp_dir)
def texture_variance(self): before = time.time() try: temp_dir = self._make_temp_dir() vis = self.get_custom_image( f'{self.metadata["basename"]}_vis_pca_10m') nir = self.get_custom_image( f'{self.metadata["basename"]}_nir_pca_10m') swir = self.get_custom_image( f'{self.metadata["basename"]}_swir_pca_10m') # vis_stats_3 = local_stats(vis, os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad3_10m.tif'), options={'radius': 3}, band=2) # vis_stats_2 = local_stats(vis, os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad2_10m.tif'), options={'radius': 2}, band=2) # vis_stats_1 = local_stats(vis, os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad1_10m.tif'), options={'radius': 1}, band=2) # vis3_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad3_10m.tif')) # vis2_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad2_10m.tif')) # vis1_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad1_10m.tif')) # with np.errstate(divide='ignore', invalid='ignore'): # vis_mean_variance = np.true_divide( # np.add( # np.sqrt(vis3_variance_arr), # np.sqrt(vis2_variance_arr), # np.sqrt(vis1_variance_arr), # ), 3) # array_to_raster(vis_mean_variance, reference_raster=vis, out_raster=os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_vis_pca_var_texture_10m.tif')) # nir_stats_3 = local_stats(nir, os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad3_10m.tif'), options={'radius': 3}, band=2) # nir_stats_2 = local_stats(nir, os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad2_10m.tif'), options={'radius': 2}, band=2) # nir_stats_1 = local_stats(nir, os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad1_10m.tif'), options={'radius': 1}, band=2) # nir3_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad3_10m.tif')) # nir2_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad2_10m.tif')) # nir1_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad1_10m.tif')) # with np.errstate(divide='ignore', invalid='ignore'): # nir_mean_variance = np.true_divide( # np.add( # np.sqrt(nir3_variance_arr), # np.sqrt(nir2_variance_arr), # np.sqrt(nir1_variance_arr), # ), 3) # array_to_raster(nir_mean_variance, reference_raster=nir, out_raster=os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_nir_pca_var_texture_10m.tif')) swir_arr = raster_to_array(swir).astype('uint32') swir_rast = array_to_raster( swir_arr, reference_raster=swir, out_raster=os.path.join( temp_dir, f'{self.metadata["basename"]}_swir_pca_uint32_10m.tif')) swir_stats_3 = local_stats( swir_rast, os.path.join( temp_dir, f'{self.metadata["basename"]}_swir_pca_uint32_stats_10m.tif' ), options={'radius': 3}, band=2) # swir_stats_2 = local_stats(swir, os.path.join(temp_dir, f'{self.metadata["basename"]}_swir_pca_stats_rad2_10m.tif'), options={'radius': 2}, band=2) # swir_stats_1 = local_stats(swir, os.path.join(temp_dir, f'{self.metadata["basename"]}_swir_pca_stats_rad1_10m.tif'), options={'radius': 1}, band=2) swir3_variance_arr = raster_to_array( os.path.join(temp_dir, swir_stats_3)) # swir2_variance_arr = raster_to_array(os.path.join(temp_dir, swir_stats_2)) # swir1_variance_arr = raster_to_array(os.path.join(temp_dir, swir_stats_1)) array_to_raster( swir3_variance_arr, reference_raster=swir, out_raster=os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_swir_pca_var_texture_10m_rad3.tif' )) # with np.errstate(divide='ignore', invalid='ignore'): # swir_mean_sqrt3 = np.sqrt(swir3_variance_arr) # swir_mean_sqrt2 = np.sqrt(swir2_variance_arr) # swir_mean_sqrt1 = np.sqrt(swir1_variance_arr) # swir_mean_sqrt_sum = np.add(swir_mean_sqrt3, swir_mean_sqrt2) # swir_mean_sqrt_sum = np.add(swir_mean_sqrt_sum, swir_mean_sqrt1) # swir_mean_variance = np.true_divide(swir_mean_sqrt_sum, 3).astype('float32') # swir_mean_variance = np.true_divide( # np.add( # np.sqrt(swir3_variance_arr), # np.sqrt(swir2_variance_arr), # np.sqrt(swir1_variance_arr), # ), 3).astype('float32') # array_to_raster(swir_mean_variance, reference_raster=swir, out_raster=os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_swir_pca_var_texture_10m.tif')) finally: self.update_custom() shutil.rmtree(temp_dir) print(f'execution took: {round(time.time() - before, 2)}s')
import sys sys.path.append('..') import numpy as np import geopandas as gpd from time import time from glob import glob from pathlib import Path from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import fast_sum_filter, highest_filter phase4_folder = '/mnt/c/users/caspe/desktop/Analysis/' ref_path = phase4_folder + 'Data/s2_b04_10m_dry.tif' ref = raster_to_array(ref_path) # path = phase4_folder + 'Phase4/urban_raw_masked.tif' # arr = raster_to_array(path).astype('float32') # arr.fill_value = 0 # arr = arr.filled() # dens = fast_sum_filter(arr, 201, weighted_distance=True, distance_calc='power') # dens = np.ma.masked_where(ref.mask, dens) # out = phase4_folder + 'Phase4/urban_raw_density.tif' # array_to_raster(dens, reference_raster=ref_path, out_raster=out) # path_dens = phase4_folder + 'Phase4/suburban_density.tif' # path_prox = phase4_folder + 'Phase4/suburban_proximity.tif' # dens_arr = np.ma.add(raster_to_array(path_dens), 1) # prox_arr = np.ma.add(raster_to_array(path_prox), 1) # merge = dens_arr / np.sqrt(prox_arr)
from lib.raster_reproject import reproject from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import threshold_filter, median_filter, median_deviation_filter clip_to = '/mnt/c/Users/caspe/Desktop/Analysis/Data/vector/ghana_5km_buffer_wgs84.shp' # reference = src_folder = '/mnt/c/users/caspe/Desktop/Analysis/Data/nightlights/' dst_folder = '/mnt/c/users/caspe/Desktop/Analysis/Data/nightlights/clipped/' # files = glob(src_folder + '*.tif') # for f in files: # clip_raster(f, out_raster=dst_folder + f.rsplit('/', 1)[1], cutline=clip_to, cutline_all_touch=True, crop_to_cutline=True) files = glob(dst_folder + '*cf_cvg*.tif') base = raster_to_array(files[0]) for i, f in enumerate(files): if i == 0: continue base = np.add(base, raster_to_array(f)) array_to_raster(base, out_raster=dst_folder + 'count.tif', reference_raster=files[0]) count = raster_to_array(dst_folder + 'count.tif') files = glob(dst_folder + '*avg_rade9h*.tif') base = files[0] base_count = files[0].rsplit('.', 2)[0] + '.cf_cvg.tif' base_weight = raster_to_array(base_count) / count base = raster_to_array(base) * base_weight
import sys; sys.path.append('..') import numpy as np from time import time import cv2 from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import threshold_filter, median_filter, median_deviation_filter folder = '/mnt/c/Users/caspe/Desktop/Analysis/Data/mosaic/aligned/' b4_path = folder + 'B04.tif' b8_path = folder + 'B08.tif' bs_path = folder + 'BS.tif' blur_path = folder + 'bs_blur.tif' B4 = raster_to_array(b4_path) B8 = raster_to_array(b8_path) # bs_blur = cv2.GaussianBlur(raster_to_array(bs_path), (11, 11), 0) # array_to_raster(np.sqrt(raster_to_array(blur_path)).astype('float32'), out_raster=folder + 'bs_blur_sqrt.tif', reference_raster=b4_path) array_to_raster((B4 - B8) / (B4 + B8), out_raster=folder + 'inv_ndvi.tif', reference_raster=b4_path) adding some new code here
import sys sys.path.append('..') import numpy as np from time import time from glob import glob from pathlib import Path from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import median_filter, mean_filter, threshold_array, close_filter, truncate_filter, sum_filter, fast_sum_filter, median_deviation_filter, standardise_filter folder = 'c:/users/caspe/desktop/Analysis/data/' prox = folder + 's1_wet_perm-proximity.tif' dens = folder + 's1_wet_mask-density_5km.tif' merge = folder + 's1_wet_perm.tif' prox_arr = np.ma.add(raster_to_array(prox), 1) dens_arr = np.ma.add(raster_to_array(dens), 1) merge_arr = dens_arr / np.ma.sqrt(prox_arr) array_to_raster(merge_arr, reference_raster=prox, out_raster=merge) array_to_raster(standardise_filter(merge_arr.astype('float32')), reference_raster=prox, out_raster=folder + '/standardized/s1_wet_perm.tif') # bands = [ # 's2_b04_10m_dry', # 's2_b04_10m_wet', # 's2_b08_10m_dry', # 's2_b08_10m_wet', # 's2_b12_10m_dry', # 's2_b12_10m_wet',
def calc_zonal(in_vector, in_rasters=[], prefixes=[], stats=['mean', 'med', 'std']): # Translate stats to integers stats_translated = enumerate_stats(stats) # Read the raster: raster_origin = gdal.Open(in_rasters[0]) # Read the vector vector = ogr.Open(in_vector, 1) vector_layer = vector.GetLayer(0) # Check that projections match vector_projection = vector_layer.GetSpatialRef() raster_projection = raster_origin.GetProjection() raster_projection_osr = osr.SpatialReference(raster_projection) vector_projection_osr = osr.SpatialReference() vector_projection_osr.ImportFromWkt(str(vector_projection)) if not vector_projection_osr.IsSame(raster_projection_osr): print('Vector projection: ', vector_projection_osr) print('Raster projection: ', raster_projection_osr) raise Exception('Projections do not match!') # Read raster data in overlap raster_transform = np.array(raster_origin.GetGeoTransform(), dtype=np.float64) raster_size = np.array( [raster_origin.RasterXSize, raster_origin.RasterYSize], dtype=np.int32) raster_extent = get_extent(raster_transform, raster_size) vector_extent = np.array(vector_layer.GetExtent(), dtype=np.float64) overlap_extent = get_intersection(raster_extent, vector_extent) if overlap_extent is False: print('raster_extent: ', raster_extent) print('vector_extent: ', vector_extent) raise Exception('Vector and raster do not overlap!') overlap_aligned_extent, overlap_aligned_rasterized_size, overlap_aligned_offset = align_extent( raster_transform, overlap_extent, raster_size) overlap_transform = np.array([ overlap_aligned_extent[0], raster_transform[1], 0, overlap_aligned_extent[3], 0, raster_transform[5], ], dtype=np.float64) overlap_size = overlap_size_calc(overlap_aligned_extent, raster_transform) # Loop the features vector_driver = ogr.GetDriverByName('Memory') vector_feature_count = vector_layer.GetFeatureCount() vector_layer.StartTransaction() # Create fields vector_layer_defn = vector_layer.GetLayerDefn() vector_field_counts = vector_layer_defn.GetFieldCount() vector_current_fields = [] # Get current fields for i in range(vector_field_counts): vector_current_fields.append(vector_layer_defn).GetFieldDefn( i).GetName() # Add fields where missing for stat in stats: for i in range(len(in_rasters)): field_name = f'{prefixes[i]}{stat}' if field_name not in vector_current_fields: field_defn = ogr.FieldDefn(field_name, ogr.OFTReal) vector_layer.CreateField(field_defn) rasterized_features = [] offsets = [] sizes = [] for raster_index, raster_value in enumerate(in_rasters): columns = {} for stat in stats: columns[prefixes[raster_index] + stat] = [] raster_data = raster_to_array(raster_value, crop=[ overlap_aligned_offset[0], overlap_aligned_offset[1], overlap_aligned_rasterized_size[0], overlap_aligned_rasterized_size[1], ]).astype(np.double) for n in range(vector_feature_count): vector_feature = vector_layer.GetNextFeature() if raster_index == 0: try: vector_geom = vector_feature.GetGeometryRef() except: vector_geom.Buffer(0) Warning('Invalid geometry at : ', n) if vector_geom is None: raise Exception('Invalid geometry. Could not fix.') feature_extent = vector_geom.GetEnvelope() # Create temp layer temp_vector_datasource = vector_driver.CreateDataSource( f'vector_{n}') temp_vector_layer = temp_vector_datasource.CreateLayer( 'temp_polygon', vector_projection, ogr.wkbPolygon) temp_vector_layer.CreateFeature(vector_feature.Clone()) feature_aligned_extent, feature_aligned_rasterized_size, feature_aligned_offset = align_extent( overlap_transform, feature_extent, overlap_size) rasterized_features.append( rasterize_vector(temp_vector_layer, feature_aligned_extent, feature_aligned_rasterized_size, raster_projection)) offsets.append(feature_aligned_offset) sizes.append(feature_aligned_rasterized_size) cropped_raster = raster_data[offsets[n][1]:offsets[n][1] + sizes[n][1], offsets[n][0]:offsets[n][0] + sizes[n][0], ] if rasterized_features[n] is None: for stat in stats: field_name = f'{prefixes[raster_index]}{stat}' vector_feature.SetField(field_name, None) elif cropped_raster is None: for stat in stats: field_name = f'{prefixes[raster_index]}{stat}' vector_feature.SetField(field_name, None) else: raster_data_masked = np.ma.masked_array( cropped_raster, mask=rasterized_features[n]).compressed() zonal_stats = global_statistics( raster_data_masked, translated_stats=stats_translated) nodata = np.isnan(zonal_stats).any() for index, stat in enumerate(stats): field_name = f'{prefixes[raster_index]}{stat}' if nodata is True: vector_feature.SetField(field_name, None) else: vector_feature.SetField( f'{prefixes[raster_index]}{stat}', float(zonal_stats[index])) vector_layer.SetFeature(vector_feature) progress(n, vector_feature_count, name=prefixes[raster_index]) vector_layer.ResetReading() vector_layer.CommitTransaction()