def super_sample_s2(B04_link, B08_link, B05_link=None, B06_link=None, B07_link=None, B8A_link=None, out_folder='../raster/', prefix='', suffix=''): assert(isinstance(B05_link, str) or isinstance(B06_link, str) or isinstance(B07_link, str) or isinstance(B8A_link, str)) paths = { 'B04': B04_link, 'B05': B05_link, 'B06': B06_link, 'B07': B07_link, 'B08': B08_link, 'B8A': B8A_link, } bands = { 'B04': raster_to_array(B04_link).astype('float32'), 'B05': raster_to_array(B05_link).astype('float32') if B05_link is not None else False, 'B06': raster_to_array(B06_link).astype('float32') if B06_link is not None else False, 'B07': raster_to_array(B07_link).astype('float32') if B07_link is not None else False, 'B08': raster_to_array(B08_link).astype('float32'), 'B8A': raster_to_array(B8A_link).astype('float32') if B8A_link is not None else False, } bands_to_pansharpen = [] if bands['B05'] is not False: bands_to_pansharpen.append('B05') if bands['B06'] is not False: bands_to_pansharpen.append('B06') if bands['B07'] is not False: bands_to_pansharpen.append('B07') if bands['B8A'] is not False: bands_to_pansharpen.append('B8A') for band_x in bands_to_pansharpen: if band_x is 'B05': pseudo_band = 'B04' else: pseudo_band = 'B08' pseudo_path = os.path.join(out_folder, f'{prefix}{band_x}{suffix}_pseudo.tif') array_to_raster(bands[pseudo_band], reference_raster=paths[pseudo_band], out_raster=pseudo_path) low_res_10m = raster_to_array(resample(paths[band_x], reference_raster=paths[pseudo_band])).astype('float32') resampled_path = os.path.join(out_folder, f'{prefix}{band_x}{suffix}_resampled.tif') array_to_raster(low_res_10m, reference_raster=paths[pseudo_band], out_raster=resampled_path) low_res_10m = None pansharpened_path = os.path.join(out_folder, f'{prefix}{band_x}{suffix}_float.tif') pansharpen(pseudo_path, resampled_path, pansharpened_path) os.remove(resampled_path) os.remove(pseudo_path)
def mosaic_tile( list_of_SAFE_images, out_dir, out_name='mosaic', dst_projection=None, feather=True, target_quality=100, threshold_change=0.5, threshold_quality=10.0, feather_dist=21, feather_scl=5, filter_tracking=True, match_mean=True, allow_nodata=False, max_days=120, max_images_include=15, max_images_search=25, output_scl=True, output_tracking=True, output_quality=False, verbose=True, ): start_time = time() # Verify input assert isinstance( list_of_SAFE_images, list ), "list_of_SAFE_images is not a list. [path_to_safe_file1, path_to_safe_file2, ...]" assert isinstance(out_dir, str), f"out_dir is not a string: {out_dir}" assert isinstance(out_name, str), f"out_name is not a string: {out_name}" assert len(list_of_SAFE_images ) > 1, "list_of_SAFE_images is empty or only a single image." if verbose: print('Selecting best image..') metadata = prepare_metadata(list_of_SAFE_images) # Sorted by best, so 0 is the best one. best_image = metadata[0] best_image_name = best_image['name'] if verbose: print(f'Selected: {best_image_name} {out_name}') if verbose: print('Preparing base image..') master_quality, master_scl = assess_radiometric_quality(best_image) tracking_array = np.zeros(master_quality.shape, dtype='uint8') if match_mean is True: metadata[0]['scl'] = np.copy(master_scl) time_limit = (max_days * 86400) master_quality_avg = (master_quality.sum() / master_quality.size) i = 1 # The 0 index is for the best image processed_images_indices = [0] # Loop the images and update the tracking array (SYNTHESIS) if verbose: print( f'Initial. tracking array: (quality {round(master_quality_avg, 2)}%) (0/{max_days} days) (goal {target_quality}%)' ) while ((master_quality_avg < target_quality) and i < len(metadata) - 1 and len(processed_images_indices) <= max_images_include): if (metadata[i]['time_difference'] > time_limit): i += 1 continue if (i >= max_images_search): if (master_scl == 0).sum() == 0 or allow_nodata is True: break if verbose: print( 'Continuing dispite reaching max_images_search as there is still nodata in tile..' ) # Time difference td = int(round(metadata[i]['time_difference'] / 86400, 0)) # Assess quality of current image quality, scl = assess_radiometric_quality(metadata[i]) # Calculate changes. Always update nodata. change_mask = (quality > master_quality) | ((master_scl == 0) & (scl != 0)) percent_change = (change_mask.sum() / change_mask.size) * 100 # Calculate the global change in quality quality_global = np.where(change_mask, quality, master_quality) quality_global_avg = quality_global.sum() / quality_global.size quality_global_change = quality_global_avg - master_quality_avg if ((percent_change > threshold_change) and (quality_global_change > threshold_change)): # Udpdate the trackers tracking_array = np.where(change_mask, i, tracking_array).astype('uint8') master_scl = np.where(change_mask, scl, master_scl).astype('intc') master_quality = np.where(change_mask, quality, master_quality).astype(np.double) master_quality_avg = quality_global_avg # Save the scene classification in memory. This cost a bit of RAM but makes harmonisation much faster.. metadata[i]['scl'] = scl.astype('uint8') # Append to the array that keeps track on which images are used in the synth process.. processed_images_indices.append(i) img_name = metadata[i]['name'] if verbose: print( f'Updating tracking array: (quality {round(master_quality_avg, 2)}%) ({td}/{max_days} days) (goal {target_quality}%) (name {img_name})' ) else: if verbose: print( f'Skipping image due to low change.. ({round(threshold_change, 3)}% threshold) ({td}/{max_days} days)' ) i += 1 # Free memory change_mask = None change_mask_inv = None quality_global = None quality = None scl = None # Only merge images if there are more than one. multiple_images = len(processed_images_indices) > 1 if match_mean is True and multiple_images is True: if verbose: print('Harmonising layers..') total_counts = 0 counts = [] weights = [] for i in processed_images_indices: metadata[i]['stats'] = {'B02': {}, 'B03': {}, 'B04': {}, 'B08': {}} pixel_count = (tracking_array == i).sum() total_counts += pixel_count counts.append(pixel_count) for i in range(len(processed_images_indices)): w = counts[i] / total_counts weights.append(w) medians = {'B02': [], 'B03': [], 'B04': [], 'B08': []} medians_4 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} medians_5 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} medians_6 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds_4 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds_5 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} madstds_6 = {'B02': [], 'B03': [], 'B04': [], 'B08': []} for v, i in enumerate(processed_images_indices): layer_mask_4 = metadata[i]['scl'] != 4 layer_mask_4_sum = (layer_mask_4 == False).sum() layer_mask_5 = metadata[i]['scl'] != 5 layer_mask_5_sum = (layer_mask_5 == False).sum() layer_mask_6 = metadata[i]['scl'] != 6 layer_mask_6_sum = (layer_mask_6 == False).sum() layer_mask = (layer_mask_4 | layer_mask_5 | layer_mask_6 | (metadata[i]['scl'] == 7)) == False for band in ['B02', 'B03', 'B04', 'B08']: if band == 'B08': array = raster_to_array( resample(metadata[i]['path']['10m'][band], reference_raster=metadata[i]['path']['20m'] ['B02'])) else: array = raster_to_array(metadata[i]['path']['20m'][band]) calc_array = np.ma.array(array, mask=layer_mask) calc_array_4 = np.ma.array(array, mask=layer_mask_4) calc_array_5 = np.ma.array(array, mask=layer_mask_5) calc_array_6 = np.ma.array(array, mask=layer_mask_6) med, mad = madstd(calc_array) if layer_mask_4_sum > 1000: med_4, mad_4 = madstd(calc_array_4) else: med_4, mad_4 = madstd(calc_array) if layer_mask_5_sum > 1000: med_5, mad_5 = madstd(calc_array_5) else: med_5, mad_5 = madstd(calc_array) if layer_mask_6_sum > 1000: med_6, mad_6 = madstd(calc_array_6) else: med_6, mad_6 = madstd(calc_array) if med == 0 or mad == 0: med, mad = madstd(array) if med_4 == 0 or mad_4 == 0: med_4, mad_4 = (med, mad) if med_5 == 0 or mad_5 == 0: med_5, mad_5 = (med, mad) if med_6 == 0 or mad_6 == 0: med_6, mad_6 = (med, mad) medians[band].append(med) medians_4[band].append(med_4) medians_5[band].append(med_5) medians_6[band].append(med_6) madstds[band].append(mad) madstds_4[band].append(mad_4) madstds_5[band].append(mad_5) madstds_6[band].append(mad_6) targets_median = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_median_4 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_median_5 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_median_6 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd_4 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd_5 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} targets_madstd_6 = {'B02': None, 'B03': None, 'B04': None, 'B08': None} for band in ['B02', 'B03', 'B04', 'B08']: targets_median[band] = np.average(medians[band], weights=weights) targets_median_4[band] = np.average(medians_4[band], weights=weights) targets_median_5[band] = np.average(medians_5[band], weights=weights) targets_median_6[band] = np.average(medians_6[band], weights=weights) targets_madstd[band] = np.average(madstds[band], weights=weights) targets_madstd_4[band] = np.average(madstds_4[band], weights=weights) targets_madstd_5[band] = np.average(madstds_5[band], weights=weights) targets_madstd_6[band] = np.average(madstds_6[band], weights=weights) for v, i in enumerate(processed_images_indices): for band in ['B02', 'B03', 'B04', 'B08']: metadata[i]['stats'][band]['src_median'] = medians[band][ v] if medians[band][v] > 0 else targets_median[band] metadata[i]['stats'][band]['src_median_4'] = medians_4[band][ v] if medians_4[band][v] > 0 else targets_median_4[band] metadata[i]['stats'][band]['src_median_5'] = medians_5[band][ v] if medians_5[band][v] > 0 else targets_median_5[band] metadata[i]['stats'][band]['src_median_6'] = medians_6[band][ v] if medians_6[band][v] > 0 else targets_median_6[band] metadata[i]['stats'][band]['src_madstd'] = madstds[band][ v] if madstds[band][v] > 0 else targets_madstd[band] metadata[i]['stats'][band]['src_madstd_4'] = madstds_4[band][ v] if madstds_4[band][v] > 0 else targets_madstd_4[band] metadata[i]['stats'][band]['src_madstd_5'] = madstds_5[band][ v] if madstds_5[band][v] > 0 else targets_madstd_5[band] metadata[i]['stats'][band]['src_madstd_6'] = madstds_6[band][ v] if madstds_6[band][v] > 0 else targets_madstd_6[band] metadata[i]['stats'][band]['target_median'] = targets_median[ band] metadata[i]['stats'][band][ 'target_median_4'] = targets_median_4[band] metadata[i]['stats'][band][ 'target_median_5'] = targets_median_5[band] metadata[i]['stats'][band][ 'target_median_6'] = targets_median_6[band] metadata[i]['stats'][band]['target_madstd'] = targets_madstd[ band] metadata[i]['stats'][band][ 'target_madstd_4'] = targets_madstd_4[band] metadata[i]['stats'][band][ 'target_madstd_5'] = targets_madstd_5[band] metadata[i]['stats'][band][ 'target_madstd_6'] = targets_madstd_6[band] # Clear memory of scl images for j in range(len(metadata)): metadata[j]['scl'] = None if output_tracking is True: array_to_raster(tracking_array.astype('uint8'), reference_raster=best_image['path']['20m']['B04'], out_raster=os.path.join(out_dir, f"tracking_{out_name}.tif"), dst_projection=dst_projection) if output_scl is True: array_to_raster(master_scl.astype('uint8'), reference_raster=best_image['path']['20m']['B04'], out_raster=os.path.join(out_dir, f"scl_{out_name}.tif"), dst_projection=dst_projection) if output_quality is True: array_to_raster(master_quality.astype('float32'), reference_raster=best_image['path']['20m']['B04'], out_raster=os.path.join(out_dir, f"quality_{out_name}.tif"), dst_projection=dst_projection) # Resample scl and tracking array tracking_array = raster_to_array( resample(array_to_raster( tracking_array, reference_raster=best_image['path']['20m']['B04']), reference_raster=best_image['path']['10m']['B04'])) master_scl = raster_to_array( resample(array_to_raster( master_scl, reference_raster=best_image['path']['20m']['B04']), reference_raster=best_image['path']['10m']['B04'])) # Run a mode filter on the tracking array if filter_tracking is True and multiple_images is True: if verbose: print('Filtering tracking array..') tracking_array = mode_filter(tracking_array, 7).astype('uint8') # Feather the edges between joined images (ensure enough valid pixels are on each side..) if feather is True and multiple_images is True: feathers = {} print('Precalculating classification feathers..') feather_rest = feather_s2_filter( master_scl, np.array([0, 1, 2, 3, 7, 8, 9, 10, 11], dtype='intc'), feather_scl).astype('float32') feather_4 = feather_s2_filter(master_scl, np.array([4], dtype='intc'), feather_scl).astype('float32') feather_5 = feather_s2_filter(master_scl, np.array([5], dtype='intc'), feather_scl).astype('float32') feather_6 = feather_s2_filter(master_scl, np.array([6], dtype='intc'), feather_scl).astype('float32') if verbose: print('Precalculating inter-layer feathers..') for i in processed_images_indices: feathers[str(i)] = feather_s2_filter( tracking_array, np.array([i], dtype='intc'), feather_dist).astype('float32') if match_mean is True and feather is False and len( processed_images_indices) > 1: mask_4 = (master_scl == 4) mask_5 = (master_scl == 5) mask_6 = (master_scl == 6) mask_rest = (master_scl != 4) & (master_scl != 5) & (master_scl != 6) bands_to_output = ['B02', 'B03', 'B04', 'B08'] if verbose: print('Merging band data..') for band in bands_to_output: if verbose: print(f'Writing: {band}..') base_image = raster_to_array( metadata[0]['path']['10m'][band]).astype('float32') for i in processed_images_indices: if match_mean and len(processed_images_indices) > 1: src_med = metadata[i]['stats'][band]['src_median'] src_med_4 = metadata[i]['stats'][band]['src_median_4'] src_med_5 = metadata[i]['stats'][band]['src_median_5'] src_med_6 = metadata[i]['stats'][band]['src_median_6'] src_mad = metadata[i]['stats'][band]['src_madstd'] src_mad_4 = metadata[i]['stats'][band]['src_madstd_4'] src_mad_5 = metadata[i]['stats'][band]['src_madstd_5'] src_mad_6 = metadata[i]['stats'][band]['src_madstd_6'] target_med = metadata[i]['stats'][band]['target_median'] target_med_4 = metadata[i]['stats'][band]['target_median_4'] target_med_5 = metadata[i]['stats'][band]['target_median_5'] target_med_6 = metadata[i]['stats'][band]['target_median_6'] target_mad = metadata[i]['stats'][band]['target_madstd'] target_mad_4 = metadata[i]['stats'][band]['target_madstd_4'] target_mad_5 = metadata[i]['stats'][band]['target_madstd_5'] target_mad_6 = metadata[i]['stats'][band]['target_madstd_6'] if i == 0: if match_mean and len(processed_images_indices) > 1: dif = base_image - src_med dif_4 = base_image - src_med_4 dif_5 = base_image - src_med_5 dif_6 = base_image - src_med_6 if feather is True and len(processed_images_indices) > 1: base_image = (((dif * target_mad) / src_mad) + target_med) * feather_rest base_image = np.add( base_image, (((dif_4 * target_mad_4) / src_mad_4) + target_med_4) * feather_4) base_image = np.add( base_image, (((dif_5 * target_mad_5) / src_mad_5) + target_med_5) * feather_5) base_image = np.add( base_image, (((dif_6 * target_mad_6) / src_mad_6) + target_med_6) * feather_6) else: base_image_rest = ( (dif * target_mad) / src_mad) + target_med base_image_4 = ( (dif_4 * target_mad_4) / src_mad_4) + target_med_4 base_image_5 = ( (dif_5 * target_mad_5) / src_mad_5) + target_med_5 base_image_6 = ( (dif_6 * target_mad_6) / src_mad_6) + target_med_6 base_image = np.where(mask_rest, base_image_rest, base_image) base_image = np.where(mask_4, base_image_4, base_image) base_image = np.where(mask_5, base_image_5, base_image) base_image = np.where(mask_6, base_image_6, base_image) base_image = np.where(base_image >= 0, base_image, 0) if feather is True and len(processed_images_indices) > 1: base_image = base_image * feathers[str(i)] else: add_band = raster_to_array( metadata[i]['path']['10m'][band]).astype('float32') if match_mean: dif = add_band - src_med dif_4 = add_band - src_med_4 dif_5 = add_band - src_med_5 dif_6 = add_band - src_med_6 if feather is True: add_band = (((dif * target_mad) / src_mad) + target_med) * feather_rest add_band = np.add( add_band, (((dif_4 * target_mad_4) / src_mad_4) + target_med_4) * feather_4) add_band = np.add( add_band, (((dif_5 * target_mad_5) / src_mad_5) + target_med_5) * feather_5) add_band = np.add( add_band, (((dif_6 * target_mad_6) / src_mad_6) + target_med_6) * feather_6) else: add_band_rest = ( (dif * target_mad) / src_mad) + target_med add_band_4 = ( (dif_4 * target_mad_4) / src_mad_4) + target_med_4 add_band_5 = ( (dif_5 * target_mad_5) / src_mad_5) + target_med_5 add_band_6 = ( (dif_6 * target_mad_6) / src_mad_6) + target_med_6 add_band = np.where(mask_rest, add_band_rest, add_band) add_band = np.where(mask_4, add_band_4, add_band) add_band = np.where(mask_5, add_band_5, add_band) add_band = np.where(mask_6, add_band_6, add_band) add_band = np.where(add_band >= 0, add_band, 0) if feather is True: base_image = np.add(base_image, (add_band * feathers[str(i)])) else: base_image = np.where(tracking_array == i, add_band, base_image).astype('float32') array_to_raster(np.rint(base_image).astype('uint16'), reference_raster=best_image['path']['10m'][band], out_raster=os.path.join(out_dir, f"{band}_{out_name}.tif"), dst_projection=dst_projection) if verbose: print(f'Completed mosaic in: {round((time() - start_time) / 60, 1)}m')
import sys; sys.path.append('..') import numpy as np from lib.raster_io import raster_to_array, array_to_raster base = '/mnt/c/Users/caspe/Desktop/Projects/multicriteriaAnalysis_vejdirektorat/' layers = np.array([ raster_to_array(f'{base}andrekomplan2.tif', fill_value=0, src_nodata=0, filled=True) * 4 * 0.013, raster_to_array(f'{base}unesco_area2.tif', fill_value=0, src_nodata=0, filled=True) * 8 * 0.169, raster_to_array(f'{base}unesco_buf2.tif', fill_value=0, src_nodata=0, filled=True) * 1 *0.086, raster_to_array(f'{base}besnatur2.tif', fill_value=0, src_nodata=0, filled=True) * 6 * 0.038, raster_to_array(f'{base}besnat_sammen2.tif', fill_value=0, src_nodata=0, filled=True) * 8 * 0.049, raster_to_array(f'{base}fundarealbesk2.tif', fill_value=0, src_nodata=0, filled=True) * 5 * 0.019, raster_to_array(f'{base}fundfortid2.tif', fill_value=0, src_nodata=0, filled=True) * 9 * 0.089, raster_to_array(f'{base}fundbesk_sam2.tif', fill_value=0, src_nodata=0, filled=True) * 7 * 0.026, raster_to_array(f'{base}bygn_fred2.tif', fill_value=0, src_nodata=0, filled=True) * 6 * 0.067, raster_to_array(f'{base}boligo500100012.tif', fill_value=0, src_nodata=0, filled=True) * 4 * 0.010, raster_to_array(f'{base}foreneligfred2.tif', fill_value=0, src_nodata=0, filled=True) * 2 * 0.011, raster_to_array(f'{base}boligomr_komplan000500mbuff_v2.tif', fill_value=0, src_nodata=0, filled=True) * 8 * 0.182, raster_to_array(f'{base}fred_fredforslag_v2.tif', fill_value=0, src_nodata=0, filled=True) * 9 * 0.084, raster_to_array(f'{base}natura2000_korr_v2.tif', fill_value=0, src_nodata=0, filled=True) * 10 * 0.237, ]) array_to_raster( np.sum(layers, axis=0), out_raster=f'{base}multicriteria_victor2.tif', reference_raster=f'{base}andrekomplan2.tif', src_nodata=0, dst_nodata=None, )
def reproject(in_raster, out_raster=None, reference_raster=None, target_projection=None, resampling=0, output_format='MEM', quiet=True, compress=True): ''' ''' # Is the output format correct? if out_raster is None and output_format != 'MEM': raise AttributeError( "If output_format is not MEM, out_raster must be defined") # If out_raster is specified, default to GTiff output format if out_raster is not None and output_format == 'MEM': output_format = 'GTiff' if out_raster is None: out_raster = 'ignored' # This is necessary as GDAL expects a string no matter what. else: assert os.path.isdir(os.path.dirname( out_raster)), f'Output folder does not exists: {out_raster}' assert reference_raster is None or target_projection is None, 'reference_raster and target_epsg cannot be applied at the same time.' if isinstance(in_raster, gdal.Dataset): # Dataset already GDAL dataframe. source_raster = in_raster else: try: source_raster = gdal.Open(in_raster, gdal.GA_ReadOnly) except: try: if isinstance(in_raster, np.ndarray): source_raster = array_to_raster( in_raster, reference_raster=reference_raster) else: raise Exception('Unable to transform in_raster.') except: raise Exception('Unable to read in_raster.') # Gather reference information if reference_raster is not None: if isinstance(reference_raster, gdal.Dataset): # Dataset already GDAL dataframe. target_projection = CRS.from_wkt(reference_raster.GetProjection()) else: try: target_projection = CRS.from_wkt( gdal.Open(reference_raster, gdal.GA_ReadOnly).GetProjection()) except: raise Exception('Unable to read reference_raster.') else: try: target_projection = CRS.from_epsg(target_projection) except: try: target_projection = CRS.from_wkt(target_projection) except: try: if isinstance(target_projection, CRS): target_projection = target_projection else: raise Exception( 'Unable to transform target_projection') except: raise Exception('Unable to read target_projection') driver = gdal.GetDriverByName(output_format) datatype = source_raster.GetRasterBand(1).DataType # If the output is not memory, set compression options. creation_options = [] if compress is True: if output_format != 'MEM': if datatype_is_float(datatype) is True: predictor = 3 # Float predictor else: predictor = 2 # Integer predictor creation_options = [ 'COMPRESS=DEFLATE', f'PREDICTOR={predictor}', 'NUM_THREADS=ALL_CPUS', 'BIGTIFF=YES' ] og_projection_osr = osr.SpatialReference() og_projection_osr.ImportFromWkt(source_raster.GetProjection()) dst_projection_osr = osr.SpatialReference() dst_projection_osr.ImportFromWkt(target_projection.to_wkt()) og_transform = source_raster.GetGeoTransform() og_x_size = source_raster.RasterXSize og_y_size = source_raster.RasterYSize coord_transform = osr.CoordinateTransformation(og_projection_osr, dst_projection_osr) o_ulx, xres, xskew, o_uly, yskew, yres = og_transform o_lrx = o_ulx + (og_x_size * xres) o_lry = o_uly + (og_y_size * yres) og_col = (o_lrx - o_ulx) og_row = (o_uly - o_lry) ulx, uly, ulz = coord_transform.TransformPoint(float(o_ulx), float(o_uly)) urx, ury, urz = coord_transform.TransformPoint(float(o_lrx), float(o_uly)) lrx, lry, lrz = coord_transform.TransformPoint(float(o_lrx), float(o_lry)) llx, lly, llz = coord_transform.TransformPoint(float(o_ulx), float(o_lry)) dst_col = max(lrx, urx) - min(llx, ulx) dst_row = max(ury, uly) - min(lry, lly) cols = int((dst_col / og_col) * og_x_size) rows = int((dst_row / og_row) * og_y_size) dst_pixel_width = dst_col / cols dst_pixel_height = dst_row / rows dst_transform = (min(ulx, llx), dst_pixel_width, -0.0, max(uly, ury), 0.0, -dst_pixel_height) destination_dataframe = driver.Create(out_raster, cols, rows, 1, datatype, creation_options) destination_dataframe.SetProjection(target_projection.to_wkt()) destination_dataframe.SetGeoTransform(dst_transform) # gdal.Warp( # destination_dataframe, # source_raster, # format=output_format, # multithread=True, # srcSRS=og_projection_osr.ExportToWkt(), # dstSRS=target_projection.to_wkt(), # ) gdal.ReprojectImage(source_raster, destination_dataframe, og_projection_osr.ExportToWkt(), target_projection.to_wkt(), resampling) destination_dataframe.FlushCache() if output_format == 'MEM': return destination_dataframe else: destination_dataframe = None return out_raster
def texture_variance(self): before = time.time() try: temp_dir = self._make_temp_dir() vis = self.get_custom_image( f'{self.metadata["basename"]}_vis_pca_10m') nir = self.get_custom_image( f'{self.metadata["basename"]}_nir_pca_10m') swir = self.get_custom_image( f'{self.metadata["basename"]}_swir_pca_10m') # vis_stats_3 = local_stats(vis, os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad3_10m.tif'), options={'radius': 3}, band=2) # vis_stats_2 = local_stats(vis, os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad2_10m.tif'), options={'radius': 2}, band=2) # vis_stats_1 = local_stats(vis, os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad1_10m.tif'), options={'radius': 1}, band=2) # vis3_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad3_10m.tif')) # vis2_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad2_10m.tif')) # vis1_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_vis_pca_stats_rad1_10m.tif')) # with np.errstate(divide='ignore', invalid='ignore'): # vis_mean_variance = np.true_divide( # np.add( # np.sqrt(vis3_variance_arr), # np.sqrt(vis2_variance_arr), # np.sqrt(vis1_variance_arr), # ), 3) # array_to_raster(vis_mean_variance, reference_raster=vis, out_raster=os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_vis_pca_var_texture_10m.tif')) # nir_stats_3 = local_stats(nir, os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad3_10m.tif'), options={'radius': 3}, band=2) # nir_stats_2 = local_stats(nir, os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad2_10m.tif'), options={'radius': 2}, band=2) # nir_stats_1 = local_stats(nir, os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad1_10m.tif'), options={'radius': 1}, band=2) # nir3_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad3_10m.tif')) # nir2_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad2_10m.tif')) # nir1_variance_arr = raster_to_array(os.path.join(temp_dir, f'{self.metadata["basename"]}_nir_pca_stats_rad1_10m.tif')) # with np.errstate(divide='ignore', invalid='ignore'): # nir_mean_variance = np.true_divide( # np.add( # np.sqrt(nir3_variance_arr), # np.sqrt(nir2_variance_arr), # np.sqrt(nir1_variance_arr), # ), 3) # array_to_raster(nir_mean_variance, reference_raster=nir, out_raster=os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_nir_pca_var_texture_10m.tif')) swir_arr = raster_to_array(swir).astype('uint32') swir_rast = array_to_raster( swir_arr, reference_raster=swir, out_raster=os.path.join( temp_dir, f'{self.metadata["basename"]}_swir_pca_uint32_10m.tif')) swir_stats_3 = local_stats( swir_rast, os.path.join( temp_dir, f'{self.metadata["basename"]}_swir_pca_uint32_stats_10m.tif' ), options={'radius': 3}, band=2) # swir_stats_2 = local_stats(swir, os.path.join(temp_dir, f'{self.metadata["basename"]}_swir_pca_stats_rad2_10m.tif'), options={'radius': 2}, band=2) # swir_stats_1 = local_stats(swir, os.path.join(temp_dir, f'{self.metadata["basename"]}_swir_pca_stats_rad1_10m.tif'), options={'radius': 1}, band=2) swir3_variance_arr = raster_to_array( os.path.join(temp_dir, swir_stats_3)) # swir2_variance_arr = raster_to_array(os.path.join(temp_dir, swir_stats_2)) # swir1_variance_arr = raster_to_array(os.path.join(temp_dir, swir_stats_1)) array_to_raster( swir3_variance_arr, reference_raster=swir, out_raster=os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_swir_pca_var_texture_10m_rad3.tif' )) # with np.errstate(divide='ignore', invalid='ignore'): # swir_mean_sqrt3 = np.sqrt(swir3_variance_arr) # swir_mean_sqrt2 = np.sqrt(swir2_variance_arr) # swir_mean_sqrt1 = np.sqrt(swir1_variance_arr) # swir_mean_sqrt_sum = np.add(swir_mean_sqrt3, swir_mean_sqrt2) # swir_mean_sqrt_sum = np.add(swir_mean_sqrt_sum, swir_mean_sqrt1) # swir_mean_variance = np.true_divide(swir_mean_sqrt_sum, 3).astype('float32') # swir_mean_variance = np.true_divide( # np.add( # np.sqrt(swir3_variance_arr), # np.sqrt(swir2_variance_arr), # np.sqrt(swir1_variance_arr), # ), 3).astype('float32') # array_to_raster(swir_mean_variance, reference_raster=swir, out_raster=os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_swir_pca_var_texture_10m.tif')) finally: self.update_custom() shutil.rmtree(temp_dir) print(f'execution took: {round(time.time() - before, 2)}s')
import sys; sys.path.append('..') import numpy as np from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import mean_filter, median_filter, standard_deviation_filter, cdef_filter folder = '/mnt/c/users/caspe/desktop/data/satf_preprocess/' b16_path = folder + '16-02-2019_crop.tif' b22_path = folder + '22-02-2019_crop.tif' b16 = raster_to_array(b16_path) b22 = raster_to_array(b22_path) maxz = raster_to_array(folder + '__max_z.tif') array_to_raster(cdef_filter(maxz), reference_raster=b16_path, out_raster=folder + '__max_z_cdf.tif') # b16_mean = mean_filter(b16, 15) # b16_median = median_filter(b16, 15) # b22_mean = mean_filter(b22, 15) # b22_median = median_filter(b22, 15) # b16_std = standard_deviation_filter(b16, 15) # b22_std = standard_deviation_filter(b22, 15) # b16_22_median_difference = np.abs(b16_median - b22_median) # b16_mean_after_difference = b16_mean - (b16_22_median_difference - b16_mean) # b22_mean_after_difference = b22_mean - (b16_22_median_difference - b22_mean) # with np.errstate(divide='ignore', invalid='ignore'): # b16_zscore = np.abs((b16_mean - b16_mean_after_difference) / b16_std) # b16_zscore[b16_std == 0] = 0 # b22_zscore = np.abs((b22_mean - b22_mean_after_difference) / b22_std)
def super_sample_bands(self, bands=['B05', 'B06', 'B07', 'B8A']): try: temp_dir = self._make_temp_dir() B4 = self.get_raw_image('B04', 10) B8 = self.get_raw_image('B08', 10) B4_arr = raster_to_array(B4) B8_arr = raster_to_array(B8) if 'B8A' in bands: # Test if resampled version already exists band_potential_path = self.get_custom_image( f'{self.metadata["basename"]}_B8A_rs_10m') band_exists = os.path.exists(band_potential_path) if band_potential_path is not False and band_exists is True: resampled_name = band_potential_path else: resampled_name = os.path.join( temp_dir, f'{self.metadata["basename"]}_B8A_rs_10m.tif') resample(self.get_raw_image('B8A', 20), reference_raster=B8, out_raster=resampled_name) pansharpened_name = os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_B8A_ss_10m.tif') pansharpen(B8, resampled_name, pansharpened_name, out_datatype='uint16') band_arrays = {} for band in bands: # Special case, will be handed at end of function. if band == 'B8A': continue band_path = self.get_raw_image(band, 20) band_array = raster_to_array(band_path) B4_distance = self.s2_spectral_profile[band][ 'edge_bot'] - self.s2_spectral_profile['B04']['edge_top'] B8_distance = self.s2_spectral_profile['B08'][ 'edge_bot'] - self.s2_spectral_profile[band]['edge_top'] distance_sum = B4_distance + B8_distance B4_weight = 1 - (B4_distance / distance_sum) B8_weight = 1 - (B8_distance / distance_sum) ratio = np.add(np.multiply(B4_arr, B4_weight), np.multiply(B8_arr, B8_weight)) # Test if resampled version already exists band_potential_path = self.get_custom_image( f'{self.metadata["basename"]}_{band}_rs_10m') band_exists = os.path.exists(band_potential_path) if band_potential_path is not False and band_exists is True: resampled_name = band_potential_path else: resampled_name = os.path.join( temp_dir, f'{self.metadata["basename"]}_{band}_rs_10m.tif') resample(band_path, reference_raster=B8, out_raster=resampled_name) ratio_name = os.path.join( temp_dir, f'{self.metadata["basename"]}_{band}_ratio_10m.tif') array_to_raster(ratio, reference_raster=B8, out_raster=ratio_name) pansharpened_name = os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_{band}_ss_10m.tif') pansharpen(ratio_name, resampled_name, pansharpened_name, out_datatype='uint16') resample(self.get_raw_image('B11', 20), reference_raster=B8, out_raster=os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_B11_ss_10m.tif')) resample(self.get_raw_image('B12', 20), reference_raster=B8, out_raster=os.path.join( self.folders['custom'], f'{self.metadata["basename"]}_B12_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B02', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B02_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B03', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B03_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B04', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B04_ss_10m.tif')) shutil.copyfile( self.get_raw_image('B04', 10), os.path.join(self.folders['custom'], f'{self.metadata["basename"]}_B08_ss_10m.tif')) finally: self.update_custom() shutil.rmtree(temp_dir)
# path_prox = phase4_folder + 'Phase4/urban_proximity.tif' # dens_arr = np.ma.add(raster_to_array(path_dens), 1) # prox_arr = np.ma.add(raster_to_array(path_prox), 1) # merge = dens_arr / np.sqrt(prox_arr) # merge = np.ma.masked_where(ref.mask, merge) # out = phase4_folder + 'Phase4/urban_merge_1km.tif' # array_to_raster(merge, reference_raster=ref_path, out_raster=out) # dense_urban = raster_to_array(phase4_folder + 'Phase4/dens_urban_merge_1km.tif') # urban = raster_to_array(phase4_folder + 'Phase4/urban_merge_1km.tif') # suburban = raster_to_array(phase4_folder + 'Phase4/suburban_merge_1km.tif') # rural = raster_to_array(phase4_folder + 'Phase4/rural_outskirts_merge_1km.tif') # hinterlands = raster_to_array(phase4_folder + 'Phase4/hinterlands_proximity_merge_1km.tif') weights = np.array([1.0, 0.9, 0.8, 0.7, 0.5], dtype=np.double) merged = np.ma.array([ raster_to_array(phase4_folder + 'Phase4/dense_urban_density.tif'), raster_to_array(phase4_folder + 'Phase4/urban_density.tif'), raster_to_array(phase4_folder + 'Phase4/suburban_density.tif'), raster_to_array(phase4_folder + 'Phase4/rural_outskirts_density.tif'), raster_to_array(phase4_folder + 'Phase4/hinterlands_density.tif'), ]) highest = highest_filter(merged, weights) array_to_raster(highest, reference_raster=ref_path, out_raster=phase4_folder + 'Phase4/merged_06.tif')
src_folder = '/mnt/c/users/caspe/Desktop/Analysis/Data/nightlights/' dst_folder = '/mnt/c/users/caspe/Desktop/Analysis/Data/nightlights/clipped/' # files = glob(src_folder + '*.tif') # for f in files: # clip_raster(f, out_raster=dst_folder + f.rsplit('/', 1)[1], cutline=clip_to, cutline_all_touch=True, crop_to_cutline=True) files = glob(dst_folder + '*cf_cvg*.tif') base = raster_to_array(files[0]) for i, f in enumerate(files): if i == 0: continue base = np.add(base, raster_to_array(f)) array_to_raster(base, out_raster=dst_folder + 'count.tif', reference_raster=files[0]) count = raster_to_array(dst_folder + 'count.tif') files = glob(dst_folder + '*avg_rade9h*.tif') base = files[0] base_count = files[0].rsplit('.', 2)[0] + '.cf_cvg.tif' base_weight = raster_to_array(base_count) / count base = raster_to_array(base) * base_weight # # import pdb; pdb.set_trace() for i, f in enumerate(files): if i == 0: continue f_count = f.rsplit('.', 2)[0] + '.cf_cvg.tif'
import sys; sys.path.append('..') import numpy as np from time import time import cv2 from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import threshold_filter, median_filter, median_deviation_filter folder = '/mnt/c/Users/caspe/Desktop/Analysis/Data/mosaic/aligned/' b4_path = folder + 'B04.tif' b8_path = folder + 'B08.tif' bs_path = folder + 'BS.tif' blur_path = folder + 'bs_blur.tif' B4 = raster_to_array(b4_path) B8 = raster_to_array(b8_path) # bs_blur = cv2.GaussianBlur(raster_to_array(bs_path), (11, 11), 0) # array_to_raster(np.sqrt(raster_to_array(blur_path)).astype('float32'), out_raster=folder + 'bs_blur_sqrt.tif', reference_raster=b4_path) array_to_raster((B4 - B8) / (B4 + B8), out_raster=folder + 'inv_ndvi.tif', reference_raster=b4_path) adding some new code here
from glob import glob from pathlib import Path from lib.raster_io import raster_to_array, array_to_raster from lib.stats_filters import median_filter, mean_filter, threshold_array, close_filter, truncate_filter, sum_filter, fast_sum_filter, median_deviation_filter, standardise_filter folder = 'c:/users/caspe/desktop/Analysis/data/' prox = folder + 's1_wet_perm-proximity.tif' dens = folder + 's1_wet_mask-density_5km.tif' merge = folder + 's1_wet_perm.tif' prox_arr = np.ma.add(raster_to_array(prox), 1) dens_arr = np.ma.add(raster_to_array(dens), 1) merge_arr = dens_arr / np.ma.sqrt(prox_arr) array_to_raster(merge_arr, reference_raster=prox, out_raster=merge) array_to_raster(standardise_filter(merge_arr.astype('float32')), reference_raster=prox, out_raster=folder + '/standardized/s1_wet_perm.tif') # bands = [ # 's2_b04_10m_dry', # 's2_b04_10m_wet', # 's2_b08_10m_dry', # 's2_b08_10m_wet', # 's2_b12_10m_dry', # 's2_b12_10m_wet', # ] # for band in bands: # arr = median_filter(np.ma.abs(median_deviation_filter(raster_to_array(folder + band + '.tif'), 3, distance_calc='power')), 3, distance_calc='power', iterations=2)