def filter_video_dem_by_nmad(ds_list, min_count=2, max_nmad=5): """ Filter Video DEM composites using NMAD and count stats This function will look for and eliminate pixels in median DEM where less than <min_count> pairwise DEMs contributed and their vertical variability (NMAD) is higher than <max_nmad> Parameters ----------- ds_list: list list of gdal datasets, containing median, count and nmad composites in order min_count: numeric minimum count to use in filtering max_nmad: numeric maximum NMAD variability to filter, if count is also <= min_count Returns ----------- dem_filt: masked array filtered DEM count_filt_c: masked array filtered NMAD map nmad_filt_c: masked array filtered count map """ dem = iolib.ds_getma(ds_list[0]) count = iolib.ds_getma(ds_list[1]) nmad = iolib.ds_getma(ds_list[2]) nmad_filt = np.ma.masked_where(nmad > 5, nmad) count_filt = np.ma.masked_where(count <= 2, count) print(type(nmad_filt.mask)) invalid_mask = np.logical_and(nmad_filt.mask, count_filt.mask) nmad_filt_c = np.ma.array(nmad_filt, mask=invalid_mask) count_filt_c = np.ma.array(count_filt, mask=invalid_mask) dem_filt = np.ma.array(dem, mask=invalid_mask) return dem_filt, count_filt_c, nmad_filt_c
def dz_fltr(dem_fn, refdem_fn, perc=None, abs_dz_lim=(0, 30), smooth=True): """Absolute elevation difference range filter using values from a source raster file and a reference raster file """ try: open(refdem_fn) except IOError: sys.exit('Unable to open reference DEM: %s' % refdem_fn) dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first') dem = iolib.ds_getma(dem_ds) refdem = iolib.ds_getma(refdem_ds) out = dz_fltr_ma(dem, refdem, perc, abs_dz_lim, smooth) return out
def main(): parser = getparser() args = parser.parse_args() src_fn = args.src_fn new_ndv = args.new_ndv #Input argument is a string, which is not recognized by set_fill_value #Must use np.nan object if new_ndv == 'nan' or new_ndv == 'np.nan': new_ndv = np.nan else: new_ndv = float(new_ndv) #Output filename will have ndv appended if args.overwrite: out_fn = src_fn else: out_fn = os.path.splitext(src_fn)[0] + '_ndv.tif' ds = gdal.Open(src_fn) b = ds.GetRasterBand(1) #Extract old ndv old_ndv = iolib.get_ndv_b(b) print(src_fn) print("Replacing old ndv %s with new ndv %s" % (old_ndv, new_ndv)) #Load masked array bma = iolib.ds_getma(ds) #Set new fill value bma.set_fill_value(new_ndv) #Fill ma with new value and write out iolib.writeGTiff(bma.filled(), out_fn, ds, ndv=new_ndv)
def raster_shpclip(r_fn, shp_fn, extent='raster'): r_ds = iolib.fn_getds(r_fn) r_srs = geolib.get_ds_srs(r_ds) r_extent = geolib.ds_extent(r_ds) shp_ds = ogr.Open(shp_fn) lyr = shp_ds.GetLayer() shp_srs = lyr.GetSpatialRef() shp_extent = lyr.GetExtent() #Define the output - can set to either raster or shp #Accept as cl arg out_srs = r_srs if extent == 'raster': out_extent = r_extent elif extent == 'shp': out_extent = shp_extent #r = iolib.ds_getma(r_ds) r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic') r = iolib.ds_getma(r_ds) mask = geolib.shp2array(shp_fn, r_ds) r = np.ma.array(r, mask=mask) return r
def main(): parser = argparse.ArgumentParser(description="Utility to compute hypsometry for input DEM") parser.add_argument('-mask_fn', type=str, default=None, help='Glacier Polygon filename (mask.shp)') parser.add_argument('-bin_width', type=float, default=100.0, help='Elevation bin with (default: %(default)s)') parser.add_argument('dem_fn', type=str, help='Input DEM filename') args = parser.parse_args() #Input DEM dem_fn = args.dem_fn #Extract GDAL dataset from input dem_fn dem_ds = iolib.fn_getds(dem_fn) #Extract NumPy masked array from dem_ds print("Loading input DEM: %s" % args.dem_fn) dem = iolib.ds_getma(dem_ds) #Fill dem? #Extract DEM resolution (m) dem_res = geolib.get_res(dem_ds, square=True)[0] #Generate glacier mask from shp if args.mask_fn is not None: print("Masking input DEM using: %s" % args.mask_fn) #This calls gdal_rasterize with parameters of dem_ds mask = geolib.shp2array(args.mask_fn, r_ds=dem_ds) #Apply mask to DEM dem = np.ma.array(dem, mask=mask) #Generate aed print("Generating AED") bin_centers, bin_areas = aed(dem, dem_res, args.bin_width) #Write out to csv csv_fn = os.path.splitext(dem_fn)[0]+'_aed.csv' write_aed(bin_centers, bin_areas, csv_fn) #Generate plot plot_dem_aed(dem, bin_centers, bin_areas)
def gen_ts_fn(fn, dt_ref=None, ma=False): from osgeo import gdal from pygeotools.lib import iolib print("Generating timestamp for: %s" % fn) fn_ts = os.path.splitext(fn)[0] + '_ts.tif' if not os.path.exists(fn_ts) or dt_ref is not None: ds = gdal.Open(fn) #Should be ok with float ordinals here a = iolib.ds_getma(ds) ts = fn_getdatetime(fn) #Want to check that dt_ref is valid datetime object if dt_ref is not None: t = ts - dt_ref t = t.total_seconds() / 86400. fn_ts = os.path.splitext(fn)[0] + '_ts_rel.tif' else: t = dt2o(ts) a[~np.ma.getmaskarray(a)] = t #Probably want to be careful about ndv here - could be 0 for rel #ndv = 1E20 ndv = -9999.0 a.set_fill_value(ndv) iolib.writeGTiff(a, fn_ts, ds) if ma: return a else: return fn_ts
def abs_range_fltr_lowresDEM(dem_fn, refdem_fn, pad=30): try: open(refdem_fn) except IOError: sys.exit('Unable to open reference DEM: %s' % refdem_fn) dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first') dem = iolib.ds_getma(dem_ds) refdem = iolib.ds_getma(refdem_ds) rangelim = (refdem.min(), refdem.max()) rangelim = (rangelim[0] - pad, rangelim[1] + pad) print('Excluding values outside of padded ({0:0.1f} m) lowres DEM range: {1:0.1f} to {2:0.1f} m'.format(pad, *rangelim)) out = range_fltr(dem, rangelim) return out
def main(): parser = getparser() args = parser.parse_args() ras_fn = args.ras_fn min = args.min max = args.max print("Loading dz raster into masked array") ras_ds = iolib.fn_getds(ras_fn) ras = iolib.ds_getma(ras_ds, 1) #Cast input ma as float32 so np.nan filling works ras = ras.astype(np.float32) ras_fltr = ras #Absolute range filter ras_fltr = filtlib.range_fltr(ras_fltr, (min, max)) if args.stats: print("Input dz raster stats:") malib.print_stats(ras) print("Filtered dz raster stats:") malib.print_stats(ras_fltr) #Output filename will have 'filt' appended dst_fn = os.path.splitext(ras_fn)[0] + '_filt.tif' print("Writing out filtered dz raster: %s" % dst_fn) #Note: writeGTiff writes ras_fltr.filled() iolib.writeGTiff(ras_fltr, dst_fn, ras_ds)
def get_toa_mask(toa_ds, min_toa): print("\nApplying TOA filter (masking values < %0.4f)" % min_toa) toa = iolib.ds_getma(toa_ds) toa_mask = np.ma.masked_less(toa, min_toa) #This should be 1 for valid surfaces, nan for removed surfaces toa_mask = ~(np.ma.getmaskarray(toa_mask)) return toa_mask
def proc_modscag(fn_list, extent=None, t_srs=None): """Process the MODSCAG products for full date range, create composites and reproject """ #Use cubic spline here for improve upsampling ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline') stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) #Create stack here - no need for most of mastack machinery, just make 3D array #Mask values greater than 100% (clouds, bad pixels, etc) ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8) stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8) stack_count.set_fill_value(0) stack_min = ma_stack.min(axis=0).astype(np.uint8) stack_min.set_fill_value(0) stack_max = ma_stack.max(axis=0).astype(np.uint8) stack_max.set_fill_value(0) stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8) stack_med.set_fill_value(0) out_fn = stack_fn + '_count.tif' iolib.writeGTiff(stack_count, out_fn, ds_list[0]) out_fn = stack_fn + '_max.tif' iolib.writeGTiff(stack_max, out_fn, ds_list[0]) out_fn = stack_fn + '_min.tif' iolib.writeGTiff(stack_min, out_fn, ds_list[0]) out_fn = stack_fn + '_med.tif' iolib.writeGTiff(stack_med, out_fn, ds_list[0]) ds = gdal.Open(out_fn) return ds
def get_toa_mask(toa_ds, toa_thresh=0.4): print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh) toa = iolib.ds_getma(toa_ds) toa_mask = np.ma.masked_greater(toa, toa_thresh) #This should be 1 for valid surfaces, 0 for snowcovered surfaces toa_mask = ~(np.ma.getmaskarray(toa_mask)) return toa_mask
def main(): parser = getparser() args = parser.parse_args() mask_list = [] if args.toa: mask_list.append('toa') if args.snodas: mask_list.append('snodas') if args.modscag: mask_list.append('modscag') if args.bareground: mask_list.append('bareground') if args.glaciers: mask_list.append('glaciers') if args.nlcd: mask_list.append('nlcd') if not mask_list: parser.print_help() sys.exit("Must specify at least one mask type") #This directory should or will contain the relevant data products #if args.datadir is None: # datadir = iolib.get_datadir() dem_fn = args.dem_fn dem_ds = gdal.Open(dem_fn) print(dem_fn) #Get DEM masked array dem = iolib.ds_getma(dem_ds) print("%i valid pixels in original input tif" % dem.count()) #Set up cascading mask preparation #True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked #Initialize the mask #newmask = ~(np.ma.getmaskarray(dem)) #Basename for output files if args.outdir is not None: if not os.path.exists(args.outdir): os.makedirs(args.outdir) else: args.outdir = os.path.split(dem_fn)[0] newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args) #Apply mask to original DEM - use these surfaces for co-registration newdem = np.ma.array(dem, mask=newmask) #Check that we have enough pixels, good distribution min_validpx_count = 100 min_validpx_std = 10 validpx_count = newdem.count() validpx_std = newdem.std() print("%i valid pixels in masked output tif to be used as ref" % validpx_count) print("%0.2f std in masked output tif to be used as ref" % validpx_std) #if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std): if (validpx_count > min_validpx_count): out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif') print("Writing out %s" % out_fn) iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds) else: print("Not enough valid pixels!")
def get_tri_mask(dem_ds, min_tri): print("Applying TRI filter (masking smooth values < %0.4f)" % min_tri) dem = iolib.ds_getma(dem_ds) tri = geolib.gdaldem_mem_ds(dem_ds, 'TRI', returnma=True) tri_mask = np.ma.masked_less(tri, min_tri) #This should be 1 for valid surfaces, nan for removed surfaces tri_mask = ~(np.ma.getmaskarray(tri_mask)) return tri_mask
def get_slope_mask(dem_ds, max_slope): print("Applying DEM slope filter (masking values > %0.1f)" % max_slope) dem = iolib.ds_getma(dem_ds) slope = geolib.gdaldem_mem_ds(dem_ds, 'slope', returnma=True) slope_mask = np.ma.masked_greater(slope, max_slope) #This should be 1 for valid surfaces, nan for removed surfaces slope_mask = ~(np.ma.getmaskarray(slope_mask)) return slope_mask
def get_rough_mask(dem_ds, max_rough): print("Applying DEM roughness filter (masking values > %0.4f)" % max_rough) dem = iolib.ds_getma(dem_ds) rough = geolib.gdaldem_mem_ds(dem_ds, 'Roughness', returnma=True) rough_mask = np.ma.masked_greater(rough, max_rough) #This should be 1 for valid surfaces, nan for removed surfaces rough_mask = ~(np.ma.getmaskarray(rough_mask)) return rough_mask
def get_dark_mask(toa_ds, min_toa): print( "\nApplying TOA filter to remove dark areas (water and shadows) using pan TOA (masking values < %0.4f)" % min_toa) toa = iolib.ds_getma(toa_ds) dark_mask = np.ma.masked_less(toa, min_toa) #This should be 1 for valid surfaces, nan for removed surfaces dark_mask = ~(np.ma.getmaskarray(dark_mask)) return dark_mask
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn ndv = args.ndv max_slope = args.max_slope reduce_pct = args.reduce_pct #slope_res = args.slope_res slopelim = (0.1, max_slope) out_base = os.path.splitext(dem_fn)[0] print "\n\tGetting a slopemasked dem...\n" if reduce_pct != 100: print "\tCoarsening by %s.." % (reduce_pct) out_fn = out_base + '_pct' + str(reduce_pct) + '.tif' if not os.path.exists(out_fn): red_pct_str = str(reduce_pct) + "% " + str(reduce_pct) + "%" cmdStr = " ".join([ "gdal_translate", "-r", "cubic", "-outsize", red_pct_str, dem_fn, out_fn ]) print cmdStr #cmdStr = "gdal_translate -r cubic -outsize " + str(reduce_pct) + "% " + str(reduce_pct) + "%" #cmdStr += dem_fn + out_fn cmd = subprocess.Popen(cmdStr, stdout=subprocess.PIPE, shell=True) stdOut, err = cmd.communicate() dem_fn = out_fn # Get slope as a masked array dem_slope_ma = geolib.gdaldem_wrapper(dem_fn, product='slope', returnma=True) # Get reduced dem ds dem_ds = iolib.fn_getds(dem_fn) dem_ma = iolib.ds_getma(dem_ds) # Get a new slope ma using max slope slopemask = (dem_slope_ma > max_slope) # Get whichever dem ma was there in the first place demmask = (dem_ma == ndv) newmask = np.logical_or(demmask, slopemask) # Apply mask from slope to slope newdem = np.ma.array(dem_ma, mask=newmask) # Save the new masked DEM dst_fn = out_base + '_slopemasked.tif' iolib.writeGTiff(newdem, dst_fn, dem_ds, ndv=ndv) return dst_fn
def slope_fltr_ds(dem_ds, slopelim=(0, 40)): print("Slope filter: %0.2f - %0.2f" % slope_lim) from pygeotools.lib import geolib dem = iolib.ds_getma(dem_ds) dem_slope = geolib.gdaldem_mem_ds(dem_ds, processing='slope', returnma=True, computeEdges=True) print("Initial count: %i" % dem_slope.count()) dem_slope = range_fltr(dem_slope, slopelim) print("Final count: %i" % dem_slope.count()) return np.ma.array(dem, mask=np.ma.getmaskarray(dem_slope))
def get_lidar_mask(dem_ds, lidar_ds, max_thresh): print("Applying ground mask using Lidar CHM values > %0.4f)" % max_thresh) #If you want to feed in the file #chm_ds=warplib.memwarp_multi_fn([chm_file,], res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0] lidar_array=iolib.ds_getma(lidar_ds) lidar_mask = np.ma.masked_greater(lidar_array, max_thresh) lidar_mask= ~(np.ma.getmaskarray(lidar_mask)) return lidar_mask
def iv_ds(ds, full=False, return_ma=False, **kwargs): if full: a = iolib.ds_getma(ds) else: a, ds = iolib.ds_getma_sub(ds, return_ds=True) ax = iv(a, ds=ds, **kwargs) if return_ma: out = (ax, a) else: out = ax return out
def get_t_factor_fn(fn1, fn2, ds=None): t_factor = None #Extract timestamps from input filenames t1 = fn_getdatetime(fn1) t2 = fn_getdatetime(fn2) t_factor = get_t_factor(t1,t2) #Attempt to load timestamp arrays (for mosaics with variable timestamps) t1_fn = os.path.splitext(fn1)[0]+'_ts.tif' t2_fn = os.path.splitext(fn2)[0]+'_ts.tif' if os.path.exists(t1_fn) and os.path.exists(t2_fn) and ds is not None: print("Preparing timestamp arrays") from pygeotools.lib import warplib t1_ds, t2_ds = warplib.memwarp_multi_fn([t1_fn, t2_fn], extent=ds, res=ds) print("Loading timestamps into masked arrays") from pygeotools.lib import iolib t1 = iolib.ds_getma(t1_ds) t2 = iolib.ds_getma(t2_ds) #This is a new masked array t_factor = (t2 - t1) / 365.25 return t_factor
def get_date_a(ds, date_shp_lyr, glac_geom_mask, datefield): date_r_ds = iolib.mem_drv.CreateCopy('', ds) #Shapefile order should be sorted by time, but might want to think about sorting here #Can automatically search for datefield gdal.RasterizeLayer(date_r_ds, [1], date_shp_lyr, options=["ATTRIBUTE=%s" % datefield]) date_a = np.ma.array(iolib.ds_getma(date_r_ds), mask=glac_geom_mask) #Note: NED dates are in integer years, assume source imagery was flown in late summer for mountains if datefield == 'S_DATE_CLN': date_a += 0.75 return date_a
def domask(tile_fn): prefix = '-'.join(tile_fn.split('-')[:-1]) print("\nLoading: %s" % tile_fn) #dem_fn = prefix+'-median.tif' dem_fn = tile_fn dem_ds = iolib.fn_getds(dem_fn) dem = iolib.ds_getma(dem_ds) #Get original mask, True where masked mask = np.ma.getmaskarray(dem) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) min_count = 2 count_fn = prefix + '-count.tif' print("Loading: %s" % count_fn) count = iolib.fn_getma(count_fn) print("min: %i max: %i" % (count.min(), count.max())) print("Masking: (count < %i)" % min_count) mask = np.logical_or(mask, (count < min_count)) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) max_std = 3.0 #std_fn = prefix+'-std.tif' std_fn = prefix + '-nmad.tif' print("Loading: %s" % std_fn) std = iolib.fn_getma(std_fn) print("min: %i max: %i" % (std.min(), std.max())) print("Masking: (std/nmad >= %i)" % max_std) mask = np.logical_or(mask, (std >= max_std)) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) #Modified so we always write out, even if empty tif #Easier for tracking progress print("Applying mask") dem_masked = np.ma.array(dem, mask=mask) out_fn = os.path.splitext(dem_fn)[0] + '_masked.tif' print("Writing: %s" % out_fn) iolib.writeGTiff(dem_masked, out_fn, dem_ds)
def plot_composite_fig(ortho, dem, count, nmad, outfn, product='triplet'): """ Plot the gallery figure for final DEM products Parameters ------------ ortho: str path to orthoimage dem: str path to dem count: str path to count map nmad: str path to NMAD outfn: str path to save output figure ortho: str product to plot (triplet/video) """ if product == 'triplet': figsize = (10, 8) else: figsize = (10, 3) f, ax = plt.subplots(1, 4, figsize=figsize) ds_list = warplib.memwarp_multi_fn([ortho, dem, count, nmad], res='max') ortho, dem, count, nmad = [iolib.ds_getma(x) for x in ds_list] pltlib.iv(ortho, ax=ax[0], cmap='gray', scalebar=True, cbar=False, ds=ds_list[0], skinny=False) pltlib.iv(dem, ax=ax[1], hillshade=True, scalebar=False, ds=ds_list[1], label='Elevation (m WGS84)', skinny=False) pltlib.iv(count, ax=ax[2], cmap='YlOrRd', label='DEM count', skinny=False) pltlib.iv(nmad, ax=ax[3], cmap='inferno', clim=(0, 10), label='Elevation NMAD (m)', skinny=False) plt.tight_layout() f.savefig(outfn, dpi=300, bbox_inches='tight', pad_inches=0.1)
def main(): parser = getparser() args = parser.parse_args() src_fn = args.src_fn if not iolib.fn_check(src_fn): sys.exit("Unable to find src_fn: %s" % src_fn) #This is a wrapper around gdal.Open() src_ds = iolib.fn_getds(src_fn) src_gt = src_ds.GetGeoTransform() print("Loading input raster into masked array") bma = iolib.ds_getma(src_ds) print("Computing min/max indices for mask") edge_env = malib.edgefind2(bma, intround=True) print("Updating output geotransform") out_gt = list(src_gt) #This should be OK, as edge_env values are integer multiples, and the initial gt values are upper left pixel corner #Update UL_X out_gt[0] = src_gt[0] + src_gt[1] * edge_env[2] #Update UL_Y, note src_gt[5] is negative out_gt[3] = src_gt[3] + src_gt[5] * edge_env[0] out_gt = tuple(out_gt) #debug #print([0, bma.shape[0], 0, bma.shape[1]]) #print(edge_env) #print(src_gt) #print(out_gt) out_fn = os.path.splitext(src_fn)[0] + '_trim.tif' print("Writing out: %s" % out_fn) #Extract valid subsection from input array #indices+1 are necessary to include valid row/col on right and bottom edges iolib.writeGTiff(bma[edge_env[0]:edge_env[1] + 1, edge_env[2]:edge_env[3] + 1], out_fn, src_ds, gt=out_gt) bma = None
def make_dem_mosaic_index_ts(index_tif_fn): index_txt_fn = index_tif_fn + '-index-map.txt' out_fn = os.path.splitext(index_tif_fn)[0] + '_ts.tif' if not os.path.exists(index_tif_fn): print("Unable to find input file: %s" % index_tif_fn) return False if not os.path.exists(index_txt_fn): print("Unable to find input file: %s" % index_txt_fn) return False if os.path.exists(out_fn): print("Existing ts output found: %s" % out_fn) return True #Load dem_mosaic index tif index_tif_ds = iolib.fn_getds(index_tif_fn) index_tif = iolib.ds_getma(index_tif_ds) #Create output array #index_ts_tif = np.zeros(index_tif.shape, dtype=np.float32) #index_ts_tif = np.ma.masked_all_like(index_tif) index_ts_tif = np.ma.masked_all(index_tif.shape, dtype=np.float32) #Read in dem_mosaic index txt file - should be "fn, index" for each record index_txt = np.atleast_2d(np.genfromtxt(index_txt_fn, dtype=str)) #Extract Python datetime object from filename (should pull out YYYYMMDD_HHMM) index_ts = [timelib.fn_getdatetime(fn) for fn in index_txt[:, 0]] #Convert to desired output timestamp format #Python ordinal #index_ts = timelib.dt2o(index_ts) #YYYYMMDD integer #index_ts = [ts.strftime('%Y%m%d') for ts in index_ts] #Decimal year index_ts = [timelib.dt2decyear(ts) for ts in index_ts] for n, dt in enumerate(index_ts): index_ts_tif[index_tif == n] = dt iolib.writeGTiff(index_ts_tif, out_fn, index_tif_ds, ndv=0) return True
def main(): parser = getparser() args = parser.parse_args() chm_fn = args.chm_fn outdir = args.outdir ndv = args.ndv min_height = args.min_height max_height = args.max_height if not iolib.fn_check(chm_fn): sys.exit("Unable to find chm_fn: %s" % chm_fn) out_base = os.path.splitext(chm_fn)[0] if outdir is not None: inputdir, chmname = os.path.split(chm_fn) out_base = os.path.join(outdir, os.path.splitext(chmname)[0]) # Get chm ds and ma chm_ds = iolib.fn_getds(chm_fn) chm_ma = iolib.ds_getma(chm_ds) # Get a new chm ma using max height heightmasklo = (chm_ma < min_height) heightmaskhi = (chm_ma > max_height) # Get whichever dem ma was there in the first place chmmask = (chm_ma == ndv) # https://stackoverflow.com/questions/20528328/numpy-logical-or-for-more-than-two-arguments newmask = np.logical_or.reduce( (chmmask, heightmasklo, heightmaskhi)) #np.logical_or(heightmasklo, heightmaskhi) # Apply mask newchm = np.ma.array(chm_ma, mask=newmask) # Save the new masked CHM dst_fn = out_base + '_htmasked.tif' iolib.writeGTiff(newchm, dst_fn, chm_ds, ndv=ndv) print(dst_fn) return dst_fn
def dz_fltr_dir(dem_fn_list, refdem_fn, abs_dz_lim, out_dir): #names = dem_fn_list[0] for names in dem_fn_list: #print("Loading ouput DEM into masked array") dem_ds = iolib.fn_getds(names) dem_fltr = iolib.ds_getma(dem_ds, 1) #Difference filter, need to specify refdem_fn dem_fltr = filtlib.dz_fltr(names, refdem_fn, abs_dz_lim) # create output directory and file name parts = names.split('/') file_name = parts[len(parts) - 1] dst_fn = out_dir + file_name.split( '.')[0] + '_filt%ipx.tif' % abs_dz_lim[1] print("Writing out filtered DEM: %s" % dst_fn) #Note: writeGTiff writes dem_fltr.filled() iolib.writeGTiff(dem_fltr, dst_fn, dem_ds)
def get_cam2rpc_opts(t='pinhole', dem=None, gsd=None, num_samples=50): """ generates cmd for ASP cam2rpc This generates rpc camera models from the optimized frame camera models See documentation here: https://stereopipeline.readthedocs.io/en/latest/tools/cam2rpc.html Parameters ---------- t: str session, or for here, type of input camera, default: pinhole dem: str path to DEM which will be used for calculating RPC polynomials gsd: float Expected ground-samplind distance num_samples: int Sampling for RPC approximation calculation (default=50) Returns ---------- cam2rpc_opts: list A list of arguments for cam2rpc call. """ cam2rpc_opts = [] cam2rpc_opts.extend(['--dem-file', dem]) dem_ds = iolib.fn_getds(dem) dem_proj = dem_ds.GetProjection() dem = iolib.ds_getma(dem_ds) min_height, max_height = np.percentile(dem.compressed(), (0.01, 0.99)) tsrs = epsg2geolib(4326) xmin, ymin, xmax, ymax = geolib.ds_extent(ds, tsrs) cam2rpc_opts.extend(['--height-range', str(min_height), str(max_height)]) cam2rpc_opts.extend( ['--lon-lat-range', str(xmin), str(ymin), str(xmax), str(ymax)]) if gsd: cam2rpc_opts.extend(['--gsd', str(gsd)]) cam2rpc_opts.extend(['--session', t]) cam2rpc_opts.extend(['--num-samples', str(num_samples)]) return cam2rpc_opts
def gdaldem(ds, producttype='slope', returnma=True): """ perform gdaldem operations such as slope, hillshade etc via the python api Parameters ----------- ds: gdal dataset DEM dataset for which derived products are to be computed producttype: str operation to perform (e.g., slope, aspect, hillshade) returnma: bool return the product as masked array if true, or a dataset if false Returns ----------- out: masked array or dataset output product in form of masked array or dataset (see params) """ dem_p_ds = gdal.DEMProcessing('', ds, producttype, format='MEM') ma = iolib.ds_getma(dem_p_ds) if returnma: out = ma else: out = dem_p_ds return out
else: print "Unrecognized extension, continuing without filtering or scaling" #Parse GCPs gcp_fn = os.path.splitext(in_fn)[0]+'.gcp' gcp = parse_gcp(gcp_fn) #Load DEM #Should be in projected, cartesian coords dem_fn = sys.argv[3] #Extract DEM to ma dem_ds = iolib.fn_getds(dem_fn) dem_srs = geolib.get_ds_srs(dem_ds) dem_gt = dem_ds.GetGeoTransform() dem = iolib.ds_getma(dem_ds) #Compute azimuth pixel size in meters (function of range) az_pixel_spacing = az_angle_step * np.arange(near_range_slc, far_range_slc, range_pixel_spacing) #Downsample DEM to match radar GSD, or 2x radar GSD? #min(range, az) #Want to allow for input more precise DGPS coordinates for GPRI origin #Trimble GeoXH shp output has XY, need to process raw data for XYZ #46.78364631 #-121.7502352 #ref_coord = [-121.7502352, 46.78364631, ref_coord[2]] #Need to correct boresight for "principal point" of radar relative to GPS point at top of tower - this will depend on antenna used for interferogram #Convert GPRI origin to projected coords
#! /usr/bin/env python from osgeo import gdal from pygeotools.lib import iolib from pygeotools.lib import geolib from pygeotools.lib import malib def dist(pos1, pos2): return np.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2) pos1 = [595396.48277,5181880.22677] pos2 = [596168.611,5182875.521] fn = ('rainierlidar_wgs84_shpclip.tif') ds = iolib.fn_getds(fn) dem = iolib.ds_getma(ds) x, y = geolib.get_xy_grids(ds) d = dist(pos1, pos2) grid = np.array([x, y]) b = dist(pos1, grid) c = dist(pos2, grid) conv = np.rad2deg(np.arccos((b**2 + c**2 - d**2)/(2*b*c))) conv_m = np.ma.array(conv, mask=dem.mask) malib.print_stats(conv_m)