def gen_d_sub(d_sub_fn, dx, dy, pad_perc=0.1, ndv=-9999): nl = dx.shape[0] ns = dx.shape[1] #Use GDT_Byte or GDT_Int16 to save space? dtype = gdal.GDT_Int32 opt = iolib.gdal_opt d_sub_ds = iolib.gtif_drv.Create(d_sub_fn, ns, nl, 3, dtype, opt) d_sub_ds.GetRasterBand(1).WriteArray(np.rint(dx.filled(ndv)).astype(np.int32)) d_sub_ds.GetRasterBand(2).WriteArray(np.rint(dy.filled(ndv)).astype(np.int32)) d_sub_ds.GetRasterBand(3).WriteArray((~dx.mask).astype(np.int32)) for n in range(1, d_sub_ds.RasterCount+1): band = d_sub_ds.GetRasterBand(n) band.SetNoDataValue(float(ndv)) d_sub_ds = None #Now write D_sub_spread.tif - defines spread around D_sub values d_sub_ds = iolib.fn_getds(d_sub_fn) d_sub_spread_fn = os.path.splitext(d_sub_fn)[0]+'_spread.tif' d_sub_spread_ds = iolib.gtif_drv.CreateCopy(d_sub_spread_fn, d_sub_ds, 0) dx_spread = np.ma.abs(dx * pad_perc) dy_spread = np.ma.abs(dy * pad_perc) d_sub_spread_ds.GetRasterBand(1).WriteArray(np.rint(dx_spread.filled(ndv)).astype(np.int32)) d_sub_spread_ds.GetRasterBand(2).WriteArray(np.rint(dy_spread.filled(ndv)).astype(np.int32)) d_sub_spread_ds.GetRasterBand(3).WriteArray((~dx_spread.mask).astype(np.int32)) for n in range(1, d_sub_spread_ds.RasterCount+1): band = d_sub_spread_ds.GetRasterBand(n) band.SetNoDataValue(float(ndv)) d_sub_spread_ds = None
def main(): parser = getparser() args = parser.parse_args() ras_fn = args.ras_fn min = args.min max = args.max print("Loading dz raster into masked array") ras_ds = iolib.fn_getds(ras_fn) ras = iolib.ds_getma(ras_ds, 1) #Cast input ma as float32 so np.nan filling works ras = ras.astype(np.float32) ras_fltr = ras #Absolute range filter ras_fltr = filtlib.range_fltr(ras_fltr, (min, max)) if args.stats: print("Input dz raster stats:") malib.print_stats(ras) print("Filtered dz raster stats:") malib.print_stats(ras_fltr) #Output filename will have 'filt' appended dst_fn = os.path.splitext(ras_fn)[0] + '_filt.tif' print("Writing out filtered dz raster: %s" % dst_fn) #Note: writeGTiff writes ras_fltr.filled() iolib.writeGTiff(ras_fltr, dst_fn, ras_ds)
def main(): parser = argparse.ArgumentParser(description="Utility to compute hypsometry for input DEM") parser.add_argument('-mask_fn', type=str, default=None, help='Glacier Polygon filename (mask.shp)') parser.add_argument('-bin_width', type=float, default=100.0, help='Elevation bin with (default: %(default)s)') parser.add_argument('dem_fn', type=str, help='Input DEM filename') args = parser.parse_args() #Input DEM dem_fn = args.dem_fn #Extract GDAL dataset from input dem_fn dem_ds = iolib.fn_getds(dem_fn) #Extract NumPy masked array from dem_ds print("Loading input DEM: %s" % args.dem_fn) dem = iolib.ds_getma(dem_ds) #Fill dem? #Extract DEM resolution (m) dem_res = geolib.get_res(dem_ds, square=True)[0] #Generate glacier mask from shp if args.mask_fn is not None: print("Masking input DEM using: %s" % args.mask_fn) #This calls gdal_rasterize with parameters of dem_ds mask = geolib.shp2array(args.mask_fn, r_ds=dem_ds) #Apply mask to DEM dem = np.ma.array(dem, mask=mask) #Generate aed print("Generating AED") bin_centers, bin_areas = aed(dem, dem_res, args.bin_width) #Write out to csv csv_fn = os.path.splitext(dem_fn)[0]+'_aed.csv' write_aed(bin_centers, bin_areas, csv_fn) #Generate plot plot_dem_aed(dem, bin_centers, bin_areas)
def raster_shpclip(r_fn, shp_fn, extent='raster'): r_ds = iolib.fn_getds(r_fn) r_srs = geolib.get_ds_srs(r_ds) r_extent = geolib.ds_extent(r_ds) shp_ds = ogr.Open(shp_fn) lyr = shp_ds.GetLayer() shp_srs = lyr.GetSpatialRef() shp_extent = lyr.GetExtent() #Define the output - can set to either raster or shp #Accept as cl arg out_srs = r_srs if extent == 'raster': out_extent = r_extent elif extent == 'shp': out_extent = shp_extent #r = iolib.ds_getma(r_ds) r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic') r = iolib.ds_getma(r_ds) mask = geolib.shp2array(shp_fn, r_ds) r = np.ma.array(r, mask=mask) return r
def main(): parser = getparser() args = parser.parse_args() r_fn = args.r_fn if not os.path.exists(r_fn): sys.exit("Unable to find r_fn: %s" % r_fn) # r_fn to r_ds r_ds = iolib.fn_getds(r_fn) # ...to array r_arr = r_ds.GetRasterBand(1).ReadAsArray() r_arr = r_arr * args.scale_factor arr_out = r_arr.astype(getattr( np, args.out_type)) # this works like r_arr.astype(np.unit16) print "\tOutput array type: %s" % (arr_out.dtype) print "\tOutput nodata value: %s" % (args.nodata_val) # Set No Data Value: Deal with negatives and nan arr_out = np.where(arr_out < 0, args.nodata_val, arr_out) arr_out = np.where(np.isnan(arr_out), args.nodata_val, arr_out) #Write out out_fn = os.path.splitext(r_fn)[0] + '_' + args.out_type + '.tif' print "\tOutput file: %s" % (out_fn) #Note: passing r_fn here as the src_ds iolib.writeGTiff(arr_out, out_fn, r_fn)
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn ndv = args.ndv max_slope = args.max_slope reduce_pct = args.reduce_pct #slope_res = args.slope_res slopelim = (0.1, max_slope) out_base = os.path.splitext(dem_fn)[0] print "\n\tGetting a slopemasked dem...\n" if reduce_pct != 100: print "\tCoarsening by %s.." % (reduce_pct) out_fn = out_base + '_pct' + str(reduce_pct) + '.tif' if not os.path.exists(out_fn): red_pct_str = str(reduce_pct) + "% " + str(reduce_pct) + "%" cmdStr = " ".join([ "gdal_translate", "-r", "cubic", "-outsize", red_pct_str, dem_fn, out_fn ]) print cmdStr #cmdStr = "gdal_translate -r cubic -outsize " + str(reduce_pct) + "% " + str(reduce_pct) + "%" #cmdStr += dem_fn + out_fn cmd = subprocess.Popen(cmdStr, stdout=subprocess.PIPE, shell=True) stdOut, err = cmd.communicate() dem_fn = out_fn # Get slope as a masked array dem_slope_ma = geolib.gdaldem_wrapper(dem_fn, product='slope', returnma=True) # Get reduced dem ds dem_ds = iolib.fn_getds(dem_fn) dem_ma = iolib.ds_getma(dem_ds) # Get a new slope ma using max slope slopemask = (dem_slope_ma > max_slope) # Get whichever dem ma was there in the first place demmask = (dem_ma == ndv) newmask = np.logical_or(demmask, slopemask) # Apply mask from slope to slope newdem = np.ma.array(dem_ma, mask=newmask) # Save the new masked DEM dst_fn = out_base + '_slopemasked.tif' iolib.writeGTiff(newdem, dst_fn, dem_ds, ndv=ndv) return dst_fn
def skysat_footprint(img_fn, incrs=None): """ Define ground corner footprint from RPC model Parameters ---------- img_fn: str path to image with embedded RPC info in tiff tag incrs: dict crs to convert the final footprint into, by default the footprint is returned in geographic coordinates (EPSG:4326) Returns ---------- footprint_shp: geopandas geodataframe geodataframe containg ground footprints in specified incrs """ if os.path.islink(img_fn): img_ds = iolib.fn_getds(os.readlink(img_fn)) else: img_ds = iolib.fn_getds(img_fn) nx = img_ds.RasterXSize ny = img_ds.RasterYSize #img_coord (0,0), (nx,0), (nx,ny), (0,ny) correspond to ul,ur,lr,ll z = np.float(img_ds.GetMetadata('RPC')['HEIGHT_OFF']) #z = np.float(ht.split(' ',1)[1].splitlines()[0]) img_x = [0, nx, nx, 0] img_y = [0, 0, ny, ny] img_z = [ z, z, z, z ] #should ideally accept a common height above datum, read from rpc #done mx, my = asp_utils.rpc2map(img_fn, img_x, img_y, img_z) coord_list = list(zip(mx, my)) footprint_poly = Polygon(coord_list) geo_crs = {'init': 'epsg:4326'} footprint_shp = gpd.GeoDataFrame(index=[0], geometry=[footprint_poly], crs=geo_crs) if incrs: footprint_shp = footprint_shp.to_crs(incrs) return footprint_shp
def domask(tile_fn): prefix = '-'.join(tile_fn.split('-')[:-1]) print("\nLoading: %s" % tile_fn) #dem_fn = prefix+'-median.tif' dem_fn = tile_fn dem_ds = iolib.fn_getds(dem_fn) dem = iolib.ds_getma(dem_ds) #Get original mask, True where masked mask = np.ma.getmaskarray(dem) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) min_count = 2 count_fn = prefix + '-count.tif' print("Loading: %s" % count_fn) count = iolib.fn_getma(count_fn) print("min: %i max: %i" % (count.min(), count.max())) print("Masking: (count < %i)" % min_count) mask = np.logical_or(mask, (count < min_count)) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) max_std = 3.0 #std_fn = prefix+'-std.tif' std_fn = prefix + '-nmad.tif' print("Loading: %s" % std_fn) std = iolib.fn_getma(std_fn) print("min: %i max: %i" % (std.min(), std.max())) print("Masking: (std/nmad >= %i)" % max_std) mask = np.logical_or(mask, (std >= max_std)) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) #Modified so we always write out, even if empty tif #Easier for tracking progress print("Applying mask") dem_masked = np.ma.array(dem, mask=mask) out_fn = os.path.splitext(dem_fn)[0] + '_masked.tif' print("Writing: %s" % out_fn) iolib.writeGTiff(dem_masked, out_fn, dem_ds)
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn ndv = args.ndv max_slope = args.max_slope reduce_pct = args.reduce_pct #slope_res = args.slope_res slopelim = (0.1, max_slope) print "\n\tGetting masked slope of input dem..." #Get a coarsened version of DEM on which to calc slope dem_fn_reduced = os.path.splitext(dem_fn)[0] + '_' + str( reduce_pct) + 'pct.vrt' #dem_fn_reduced = os.path.splitext(dem_fn)[0]+'_'+ str(slope_res) +'m.vrt' #print "\tReducing percent by %s..." %(str(reduce_pct)) run_os("gdal_translate -of VRT -r cubic -outsize " + str(reduce_pct) + "% " + str(reduce_pct) + "% " + dem_fn + " " + dem_fn_reduced) #run_os("gdal_translate -of VRT -r cubic -tr " + str(slope_res) + "% " + str(slope_res) + "% " + dem_fn + " " + dem_fn_reduced) # Run slope dem_slope_fn = geolib.gdaldem_wrapper(dem_fn_reduced, product='slope', returnma=False) # Get original ma and ds dem_slope = iolib.fn_getma(dem_slope_fn) dem_slope_ds = iolib.fn_getds(dem_slope_fn) # Apply mask from slope to slope dem_slope = np.ma.array(dem_slope, mask=np.ma.masked_outside(dem_slope, *slopelim).mask, keep_mask=True, fill_value=dem_slope.fill_value) # Save the filtered slope dataset dst_fn = os.path.splitext(dem_slope_fn)[0] + '_mask.tif' iolib.writeGTiff(dem_slope, dst_fn, dem_slope_ds, ndv=ndv) run_os("rm -fv " + dem_slope_fn) return dst_fn
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn weight = args.weight poly_order = args.order # Get original DEM array_dem = iolib.fn_getma(dem_fn) dem_ds = iolib.fn_getds(dem_fn) # Create empty meshgrid x = range (0, dem_ds.RasterXSize, 1) y = range (0, dem_ds.RasterYSize, 1) X, Y = np.meshgrid(x, y) print "\nGet the gradient of the dem..." dzdy,dzdx = np.gradient(array_dem) print "\nCreate copies for modification, and set the masked areas to 0 in the gradients..." dzdx_mod = dzdx.copy() dzdy_mod = dzdy.copy() print "\nOsmanoglu Algorithm..." #routine to get the scale corrected surface back from gradients. #Note that this is with the original slopes, I want the result to be close to the original DEM as possible fco_dem = frankotchellappaosmanoglu(dzdx, dzdy) ; #FrankotChellappa removes long wavelength trends print "\nSubtract the recovered DEM from the original and estimate a surface..." planefit, fitfunc = fitSurface(X.ravel(), Y.ravel(), (array_dem - fco_dem).ravel()) print "\nAdd the surface to the masked gradient derived DEM..." dem_interp = frankotchellappaosmanoglu(dzdx_mod, dzdy_mod) + fitfunc(planefit, X, Y, weight, poly_order) print("\nWriting DEM with interpolated surfaces:") dst_fn = os.path.splitext(dem_fn)[0]+'_interp.tif' print(dst_fn) iolib.writeGTiff(dem_interp, dst_fn, dem_ds) return dst_fn # Return a numpy masked array return dem_interp
def res_sort(img_list): """ sort images based on resolution, finest resolution on top Parameters ---------- img_list: list list of images to be sorted Returns ---------- sorted_img_list: list list of sorted images with finest resolution on top """ ds_list = [iolib.fn_getds(img) for img in img_list] res_list = [geolib.get_res(ds, square=True)[0] for ds in ds_list] #https://www.geeksforgeeks.org/python-sort-values-first-list-using-second-list zipped_pairs = zip(res_list, img_list) sorted_img_list = [x for _, x in sorted(zipped_pairs)] return sorted_img_list
def main(): parser = getparser() args = parser.parse_args() src_fn = args.src_fn if not iolib.fn_check(src_fn): sys.exit("Unable to find src_fn: %s" % src_fn) #This is a wrapper around gdal.Open() src_ds = iolib.fn_getds(src_fn) src_gt = src_ds.GetGeoTransform() print("Loading input raster into masked array") bma = iolib.ds_getma(src_ds) print("Computing min/max indices for mask") edge_env = malib.edgefind2(bma, intround=True) print("Updating output geotransform") out_gt = list(src_gt) #This should be OK, as edge_env values are integer multiples, and the initial gt values are upper left pixel corner #Update UL_X out_gt[0] = src_gt[0] + src_gt[1] * edge_env[2] #Update UL_Y, note src_gt[5] is negative out_gt[3] = src_gt[3] + src_gt[5] * edge_env[0] out_gt = tuple(out_gt) #debug #print([0, bma.shape[0], 0, bma.shape[1]]) #print(edge_env) #print(src_gt) #print(out_gt) out_fn = os.path.splitext(src_fn)[0] + '_trim.tif' print("Writing out: %s" % out_fn) #Extract valid subsection from input array #indices+1 are necessary to include valid row/col on right and bottom edges iolib.writeGTiff(bma[edge_env[0]:edge_env[1] + 1, edge_env[2]:edge_env[3] + 1], out_fn, src_ds, gt=out_gt) bma = None
def make_dem_mosaic_index_ts(index_tif_fn): index_txt_fn = index_tif_fn + '-index-map.txt' out_fn = os.path.splitext(index_tif_fn)[0] + '_ts.tif' if not os.path.exists(index_tif_fn): print("Unable to find input file: %s" % index_tif_fn) return False if not os.path.exists(index_txt_fn): print("Unable to find input file: %s" % index_txt_fn) return False if os.path.exists(out_fn): print("Existing ts output found: %s" % out_fn) return True #Load dem_mosaic index tif index_tif_ds = iolib.fn_getds(index_tif_fn) index_tif = iolib.ds_getma(index_tif_ds) #Create output array #index_ts_tif = np.zeros(index_tif.shape, dtype=np.float32) #index_ts_tif = np.ma.masked_all_like(index_tif) index_ts_tif = np.ma.masked_all(index_tif.shape, dtype=np.float32) #Read in dem_mosaic index txt file - should be "fn, index" for each record index_txt = np.atleast_2d(np.genfromtxt(index_txt_fn, dtype=str)) #Extract Python datetime object from filename (should pull out YYYYMMDD_HHMM) index_ts = [timelib.fn_getdatetime(fn) for fn in index_txt[:, 0]] #Convert to desired output timestamp format #Python ordinal #index_ts = timelib.dt2o(index_ts) #YYYYMMDD integer #index_ts = [ts.strftime('%Y%m%d') for ts in index_ts] #Decimal year index_ts = [timelib.dt2decyear(ts) for ts in index_ts] for n, dt in enumerate(index_ts): index_ts_tif[index_tif == n] = dt iolib.writeGTiff(index_ts_tif, out_fn, index_tif_ds, ndv=0) return True
def main(): parser = getparser() args = parser.parse_args() chm_fn = args.chm_fn outdir = args.outdir ndv = args.ndv min_height = args.min_height max_height = args.max_height if not iolib.fn_check(chm_fn): sys.exit("Unable to find chm_fn: %s" % chm_fn) out_base = os.path.splitext(chm_fn)[0] if outdir is not None: inputdir, chmname = os.path.split(chm_fn) out_base = os.path.join(outdir, os.path.splitext(chmname)[0]) # Get chm ds and ma chm_ds = iolib.fn_getds(chm_fn) chm_ma = iolib.ds_getma(chm_ds) # Get a new chm ma using max height heightmasklo = (chm_ma < min_height) heightmaskhi = (chm_ma > max_height) # Get whichever dem ma was there in the first place chmmask = (chm_ma == ndv) # https://stackoverflow.com/questions/20528328/numpy-logical-or-for-more-than-two-arguments newmask = np.logical_or.reduce( (chmmask, heightmasklo, heightmaskhi)) #np.logical_or(heightmasklo, heightmaskhi) # Apply mask newchm = np.ma.array(chm_ma, mask=newmask) # Save the new masked CHM dst_fn = out_base + '_htmasked.tif' iolib.writeGTiff(newchm, dst_fn, chm_ds, ndv=ndv) print(dst_fn) return dst_fn
def dz_fltr_dir(dem_fn_list, refdem_fn, abs_dz_lim, out_dir): #names = dem_fn_list[0] for names in dem_fn_list: #print("Loading ouput DEM into masked array") dem_ds = iolib.fn_getds(names) dem_fltr = iolib.ds_getma(dem_ds, 1) #Difference filter, need to specify refdem_fn dem_fltr = filtlib.dz_fltr(names, refdem_fn, abs_dz_lim) # create output directory and file name parts = names.split('/') file_name = parts[len(parts) - 1] dst_fn = out_dir + file_name.split( '.')[0] + '_filt%ipx.tif' % abs_dz_lim[1] print("Writing out filtered DEM: %s" % dst_fn) #Note: writeGTiff writes dem_fltr.filled() iolib.writeGTiff(dem_fltr, dst_fn, dem_ds)
def get_cam2rpc_opts(t='pinhole', dem=None, gsd=None, num_samples=50): """ generates cmd for ASP cam2rpc This generates rpc camera models from the optimized frame camera models See documentation here: https://stereopipeline.readthedocs.io/en/latest/tools/cam2rpc.html Parameters ---------- t: str session, or for here, type of input camera, default: pinhole dem: str path to DEM which will be used for calculating RPC polynomials gsd: float Expected ground-samplind distance num_samples: int Sampling for RPC approximation calculation (default=50) Returns ---------- cam2rpc_opts: list A list of arguments for cam2rpc call. """ cam2rpc_opts = [] cam2rpc_opts.extend(['--dem-file', dem]) dem_ds = iolib.fn_getds(dem) dem_proj = dem_ds.GetProjection() dem = iolib.ds_getma(dem_ds) min_height, max_height = np.percentile(dem.compressed(), (0.01, 0.99)) tsrs = epsg2geolib(4326) xmin, ymin, xmax, ymax = geolib.ds_extent(ds, tsrs) cam2rpc_opts.extend(['--height-range', str(min_height), str(max_height)]) cam2rpc_opts.extend( ['--lon-lat-range', str(xmin), str(ymin), str(xmax), str(ymax)]) if gsd: cam2rpc_opts.extend(['--gsd', str(gsd)]) cam2rpc_opts.extend(['--session', t]) cam2rpc_opts.extend(['--num-samples', str(num_samples)]) return cam2rpc_opts
def main(): parser = getparser() args = parser.parse_args() r_fn = args.r_fn if not os.path.exists(r_fn): sys.exit("Unable to find r_fn: %s" % r_fn) windowsize=args.windowsize # r_fn to r_ds r_ds = iolib.fn_getds(r_fn) r_arr = r_ds.GetRasterBand(1).ReadAsArray() # Creating data range #r_arr = np.ma.masked_outside(r_arr,0,100) # mask all values outside this interval #r_arr = np.ma.masked_invalid(r_arr) # mask all nan and inf values # Forget about masked arrays...just use np.where to put 0 for all invalid vals r_arr = np.where((r_arr > args.min) & (r_arr <= args.max), r_arr, 0) #myType=float #r_arr = r_arr.astype(myType) print "\tDoing moving window on array..." arr_out = moving_window(r_arr.astype(np.uint16), func=np.sum, window_size=windowsize) #r = filtlib.rolling_fltr(r_arr, f=np.sum, size=windowsize, circular=True) #r = gauss_fltr(r_arr, sigma=1) print "\tCheck array type OUTPUT from filter: %s" %(arr_out.dtype) # Deal with negatives and nan arr_out = np.where(arr_out < 0, -99, arr_out) arr_out = np.where(np.isnan(arr_out),-99, arr_out) #Write out win_str = "%02d" % (int(windowsize)) out_fn = os.path.splitext(r_fn)[0]+'_win'+win_str+'sum.tif' #Note: passing r_fn here as the src_ds iolib.writeGTiff(arr_out, out_fn, r_fn)
def iv_fn(fn, full=False, return_ma=False, **kwargs): ds = iolib.fn_getds(fn) return iv_ds(ds, full=full, return_ma=return_ma, **kwargs)
#SCG #dem_clim = (760, 2270) #Baker #dem_clim = (550, 2650) #Ngozumpa #dem_clim = (4500, 7400) #GM #dem_clim = (1766, 3247) #SBB #dem_clim = (2934, 3983) hs_clim = (1, 255) for i, dem_fn in enumerate(dem_fn_list): ax = grid[i] print(dem_fn) dem_ds = iolib.fn_getds(dem_fn) dem = iolib.ds_getma_sub(dem_ds) dem_hs_fn = os.path.splitext(dem_fn)[0] + '_hs_az315.tif' if os.path.exists(dem_hs_fn): dem_hs = iolib.fn_getma_sub(dem_hs_fn) else: dem_hs = geolib.gdaldem_mem_ds(dem_ds, 'hillshade', returnma=True) dt = timelib.fn_getdatetime(dem_fn) if dt is not None: title = dt.strftime('%Y-%m-%d') t = ax.set_title(title, fontdict={'fontsize': 6}) t.set_position([0.5, 0.95]) hs_im = ax.imshow(dem_hs, vmin=hs_clim[0], vmax=hs_clim[1], cmap='gray') dem_im = ax.imshow(dem, vmin=dem_clim[0], vmax=dem_clim[1],
def main(): parser = getparser() args = parser.parse_args() if args.seedmode == 'existing_velocity': if args.vx_fn is None or args.vy_fn is None: parser.error('"-seedmode existing_velocity" requires "-vx_fn" and "-vy_fn"') print('\n%s' % datetime.now()) print('%s UTC\n' % datetime.utcnow()) align = args.align seedmode = args.seedmode spr = args.refinement erode = args.erode #Correlator tile timeout #With proper seeding, correlation should be very fast #timeout = 360 timeout = 1200 threads = args.threads kernel = (args.kernel, args.kernel) #SGM correlator if spr > 3: #kernel = (7,7) kernel = (11,11) erode = 0 #Smooth the output F.tif smoothF = args.filter res = args.tr #Resample input to something easier to work with #res = 4.0 #Open input files fn1 = args.fn1 fn2 = args.fn2 if not iolib.fn_check(fn1) or not iolib.fn_check(fn2): sys.exit("Unable to locate input files") if args.outdir is not None: outdir = args.outdir else: outdir = '%s__%s_vmap_%sm_%ipx_spm%i' % (os.path.splitext(os.path.split(fn1)[1])[0], \ os.path.splitext(os.path.split(fn2)[1])[0], res, kernel[0], spr) #Note, can encounter filename length issues in boost, just use vmap prefix outprefix = '%s/vmap' % (outdir) if not os.path.exists(outdir): os.makedirs(outdir) #Check to see if inputs have geolocation and projection information ds1 = iolib.fn_getds(fn1) ds2 = iolib.fn_getds(fn2) if geolib.srs_check(ds1) and geolib.srs_check(ds2): ds1_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn1))[0]+'_warp.tif') ds2_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn2))[0]+'_warp.tif') if not os.path.exists(ds1_clip_fn) or not os.path.exists(ds2_clip_fn): #This should write out files to new subdir ds1_clip, ds2_clip = warplib.diskwarp_multi_fn([fn1, fn2], extent='intersection', res=res, r='average', outdir=outdir) ds1_clip = None ds2_clip = None #However, if inputs have identical extent/res/proj, then link to original files if not os.path.exists(ds1_clip_fn): os.symlink(os.path.abspath(fn1), ds1_clip_fn) if not os.path.exists(ds2_clip_fn): os.symlink(os.path.abspath(fn2), ds2_clip_fn) align = 'None' #Mask support - limit correlation only to rock/ice surfaces, no water/veg #This masks input images - guarantee we won't waste time correlating over vegetation #TODO: Add support to load arbitrary raster or shp mask if args.mask_input: ds1_masked_fn = os.path.splitext(ds1_clip_fn)[0]+'_masked.tif' ds2_masked_fn = os.path.splitext(ds2_clip_fn)[0]+'_masked.tif' if not os.path.exists(ds1_masked_fn) or not os.path.exists(ds2_masked_fn): #Load NLCD or bareground mask from demcoreg.dem_mask import get_lulc_mask ds1_clip = iolib.fn_getds(ds1_clip_fn) lulc_mask_fn = os.path.join(outdir, 'lulc_mask.tif') #if not os.path.exists(nlcd_mask_fn): lulc_mask = get_lulc_mask(ds1_clip, mask_glaciers=False, filter='not_forest') iolib.writeGTiff(lulc_mask, lulc_mask_fn, ds1_clip) ds1_clip = None #Now apply to original images #This could be problematic for huge inputs, see apply_mask.py #lulc_mask = lulc_mask.astype(int) for fn in (ds1_clip_fn, ds2_clip_fn): ds = iolib.fn_getds(fn) a = iolib.ds_getma(ds) a = np.ma.array(a, mask=~(lulc_mask)) if a.count() > 0: out_fn = os.path.splitext(fn)[0]+'_masked.tif' iolib.writeGTiff(a,out_fn,ds) a = None else: sys.exit("No unmasked pixels over bare earth") ds1_clip_fn = ds1_masked_fn ds2_clip_fn = ds2_masked_fn else: ds1_clip_fn = fn1 ds2_clip_fn = fn2 #Now let user specify alignment methods as option - don't hardcode #align = 'Homography' #align = 'AffineEpipolar' ds1 = None ds2 = None #Should have extra kwargs option here stereo_opt = get_stereo_opt(threads=threads, kernel=kernel, timeout=timeout, \ erode=erode, spr=spr, align=align) #Stereo arguments #Latest version of ASP should accept tif without camera models #stereo_args = [ds1_clip_fn, ds2_clip_fn, outprefix] #Nope - still need to provide dummy camera models, and they must be unique files #Use the dummy.tsai file bundled in the vmap repo dummy_tsai = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'dummy.tsai') dummy_tsai2 = os.path.splitext(dummy_tsai)[0]+'2.tsai' if not os.path.exists(dummy_tsai2): dummy_tsai2 = os.symlink(dummy_tsai, os.path.splitext(dummy_tsai)[0]+'2.tsai') stereo_args = [ds1_clip_fn, ds2_clip_fn, dummy_tsai, dummy_tsai2, outprefix] #Run stereo_pprc if not os.path.exists(outprefix+'-R_sub.tif'): run_cmd('stereo_pprc', stereo_opt+stereo_args, msg='0: Preprocessing') #Copy proj info to outputs, this should happen automatically now? for ext in ('L', 'R', 'L_sub', 'R_sub', 'lMask', 'rMask', 'lMask_sub', 'rMask_sub'): geolib.copyproj(ds1_clip_fn, '%s-%s.tif' % (outprefix,ext)) #Prepare seeding for stereo_corr #TODO: these are untested after refactoring if not os.path.exists(outprefix+'_D_sub.tif'): #Don't need to do anything for default seed-mode 1 if seedmode == 'sparse_disp': #Sparse correlation of full-res images stereo_opt.extend(['--corr-seed-mode', '3']) sparse_disp_opt = [] sparse_disp_opt.extend(['--Debug', '--coarse', '512', '--fine', '256', '--no_epipolar_fltr']) sparse_disp_opt.extend(['-P', str(threads)]) sparse_disp_args = [outprefix+'-L.tif', outprefix+'-R.tif', outprefix] run_cmd('sparse_disp', sparse_disp_opt+sparse_disp_args, msg='0.5: D_sub generation') elif seedmode == 'existing_velocity': #User-input low-res velocity maps for seeding #TODO: Add functions that fetch best available velocities for Ant/GrIS or user-defined low-res velocities #Automatically query GoLive velocities here vx_fn = args.vx_fn vy_fn = args.vy_fn #Check for existence #HMA seeding vdir = '/nobackup/deshean/rpcdem/hma/velocity_jpl_amaury_2013-2015' vx_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.x_vel.TIF') vy_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.y_vel.TIF') if os.path.exists(vx_fn) and os.path.exists(vy_fn): ds1_clip = iolib.fn_getds(ds1_clip_fn) ds1_res = geolib.get_res(ds1_clip, square=True)[0] #Compute L_sub res - use this for output dimensions L_sub_fn = outprefix+'-L_sub.tif' L_sub_ds = gdal.Open(L_sub_fn) L_sub_x_scale = float(ds1_clip.RasterXSize) / L_sub_ds.RasterXSize L_sub_y_scale = float(ds1_clip.RasterYSize) / L_sub_ds.RasterYSize L_sub_scale = np.max([L_sub_x_scale, L_sub_y_scale]) L_sub_res = ds1_res * L_sub_scale #Since we are likely upsampling here, use cubicspline vx_ds_clip, vy_ds_clip = warplib.memwarp_multi_fn([vx_fn, vy_fn], extent=ds1_clip, \ t_srs=ds1_clip, res=L_sub_res, r='cubicspline') ds1_clip = None #Get vx and vy arrays vx = iolib.ds_getma(vx_ds_clip) vy = iolib.ds_getma(vy_ds_clip) #Determine time interval between inputs #Use to scaling of known low-res velocities t_factor = get_t_factor_fn(ds1_clip_fn, ds2_clip_fn, ds=vx_ds_clip) if t_factor is not None: #Compute expected offset in scaled pixels dx = (vx*t_factor)/L_sub_res dy = (vy*t_factor)/L_sub_res #Note: Joughin and Rignot's values are positive y up! #ASP is positive y down, so need to multiply these values by -1 #dy = -(vy*t_factor)/L_sub_res #Should smooth/fill dx and dy #If absolute search window is only 30x30 #Don't seed, just use fixed search window #search_window_area_thresh = 900 search_window_area_thresh = 0 search_window = np.array([dx.min(), dy.min(), dx.max(), dy.max()]) dx_p = calcperc(dx, perc=(0.5, 99.5)) dy_p = calcperc(dy, perc=(0.5, 99.5)) search_window = np.array([dx_p[0], dy_p[0], dx_p[1], dy_p[1]]) search_window_area = (search_window[2]-search_window[0]) * (search_window[3]-search_window[1]) if search_window_area < search_window_area_thresh: stereo_opt.extend(['--corr-seed-mode', '0']) stereo_opt.append('--corr-search') stereo_opt.extend([str(x) for x in search_window]) #pad_perc=0.1 #stereo_opt.extend(['--corr-sub-seed-percent', str(pad_perc)] #Otherwise, generate a D_sub map from low-res velocity else: stereo_opt.extend(['--corr-seed-mode', '3']) #This is relative to the D_sub scaled disparities d_sub_fn = L_sub_fn.split('-L_sub')[0]+'-D_sub.tif' gen_d_sub(d_sub_fn, dx, dy) #If the above didn't generate a D_sub.tif for seeding, run stereo_corr to generate Low-res D_sub.tif if not os.path.exists(outprefix+'-D_sub.tif'): newopt = ['--compute-low-res-disparity-only',] run_cmd('stereo_corr', newopt+stereo_opt+stereo_args, msg='1.1: Low-res Correlation') #Copy projection info to D_sub geolib.copyproj(outprefix+'-L_sub.tif', outprefix+'-D_sub.tif') #Mask D_sub to limit correlation over bare earth surfaces #This _should_ be a better approach than masking input images, but stereo_corr doesn't honor D_sub #Still need to mask input images before stereo_pprc #Left this in here for reference, or if this changes in ASP if False: D_sub_ds = gdal.Open(outprefix+'-D_sub.tif', gdal.GA_Update) #Mask support - limit correlation only to rock/ice surfaces, no water/veg from demcoreg.dem_mask import get_nlcd, mask_nlcd nlcd_fn = get_nlcd() nlcd_ds = warplib.diskwarp_multi_fn([nlcd_fn,], extent=D_sub_ds, res=D_sub_ds, t_srs=D_sub_ds, r='near', outdir=outdir)[0] #validmask = mask_nlcd(nlcd_ds, valid='rock+ice') validmask = mask_nlcd(nlcd_ds, valid='not_forest', mask_glaciers=False) nlcd_mask_fn = os.path.join(outdir, 'nlcd_validmask.tif') iolib.writeGTiff(validmask, nlcd_mask_fn, nlcd_ds) #Now apply to D_sub (band 3 is valid mask) #validmask = validmask.astype(int) for b in (1,2,3): dsub = iolib.ds_getma(D_sub_ds, b) dsub = np.ma.array(dsub, mask=~(validmask)) D_sub_ds.GetRasterBand(b).WriteArray(dsub.filled()) D_sub_ds = None #OK, finally run stereo_corr full-res integer correlation with appropriate seeding if not os.path.exists(outprefix+'-D.tif'): run_cmd('stereo_corr', stereo_opt+stereo_args, msg='1: Correlation') geolib.copyproj(ds1_clip_fn, outprefix+'-D.tif') #Run stereo_rfne if spr > 0: if not os.path.exists(outprefix+'-RD.tif'): run_cmd('stereo_rfne', stereo_opt+stereo_args, msg='2: Refinement') geolib.copyproj(ds1_clip_fn, outprefix+'-RD.tif') d_fn = make_ln(outdir, outprefix, '-RD.tif') else: ln_fn = outprefix+'-RD.tif' if os.path.lexists(ln_fn): os.remove(ln_fn) os.symlink(os.path.split(outprefix)[1]+'-D.tif', ln_fn) #Run stereo_fltr if not os.path.exists(outprefix+'-F.tif'): run_cmd('stereo_fltr', stereo_opt+stereo_args, msg='3: Filtering') geolib.copyproj(ds1_clip_fn, outprefix+'-F.tif') d_fn = make_ln(outdir, outprefix, '-F.tif') if smoothF and not os.path.exists(outprefix+'-F_smooth.tif'): print('Smoothing F.tif') from pygeotools.lib import filtlib #Fill holes and smooth F F_fill_fn = outprefix+'-F_smooth.tif' F_ds = gdal.Open(outprefix+'-F.tif', gdal.GA_ReadOnly) #import dem_downsample_fill #F_fill_ds = dem_downsample_fill.gdalfill_ds(F_fill_ds) print('Creating F_smooth.tif') F_fill_ds = iolib.gtif_drv.CreateCopy(F_fill_fn, F_ds, 0, options=iolib.gdal_opt) F_ds = None for n in (1, 2): print('Smoothing band %i' % n) b = F_fill_ds.GetRasterBand(n) b_fill_bma = iolib.b_getma(b) #b_fill_bma = iolib.b_getma(dem_downsample_fill.gdalfill(b)) #Filter extreme values (careful, could lose areas of valid data with fastest v) #b_fill_bma = filtlib.perc_fltr(b_fill_bma, perc=(0.01, 99.99)) #These filters remove extreme values and fill data gaps #b_fill_bma = filtlib.median_fltr_skimage(b_fill_bma, radius=7, erode=0) #b_fill_bma = filtlib.median_fltr(b_fill_bma, fsize=7, origmask=True) #Gaussian filter b_fill_bma = filtlib.gauss_fltr_astropy(b_fill_bma, size=9) b.WriteArray(b_fill_bma) F_fill_ds = None d_fn = make_ln(outdir, outprefix, '-F_smooth.tif') print('\n%s' % datetime.now()) print('%s UTC\n' % datetime.utcnow()) #If time interval is specified, convert pixel displacements to rates if args.dt != 'none': #Check if vm.tif already exists #Should probably just overwrite by default #if os.path.exists(os.path.splitext(d_fn)[0]+'_vm.tif'): # print("\nFound existing velocity magnitude map!\n" #else: #Generate output velocity products and figure #Requires that vmap repo is in PATH cmd = ['disp2v.py', d_fn] #Note: this will attempt to automatically determine control surfaces #disp2v.py will accept arbitrary mask, could pass through here if args.remove_offsets: cmd.append('-remove_offsets') cmd.extend(['-dt', args.dt]) print("Converting disparities to velocities") print(cmd) subprocess.call(cmd)
def dem_align(ref_dem, source_dem, max_displacement, outprefix, align, trans_only=False, threads=n_cpu): """ This function implements the full DEM alignment workflow using ASP's pc_align and point2dem programs See relevent doumentation here: https://stereopipeline.readthedocs.io/en/latest/tools/pc_align.html Parameters ---------- ref_dem: str path to reference DEM for alignment source_dem: str path to source DEM to be aligned max_displacement: float Maximum expected displacement between input DEMs, useful for culling outliers before solving for shifts, default: 100 m outprefix: str prefix with which pc_align results will be saved (can be a path, general convention for repo is some path with run prefix, eg., aligned_to/run) align: str ICP's alignment algorithm to use. default: point-to-plane trans_only: bool if True, this instructs the program to compute translation only when point cloud optimization. Default: False threads: int number of threads to use for each stereo job """ # this block checks wheter reference DEM is finer resolution or source DEM # if reference DEM is finer resolution, then source is aligned to reference # if source DEM is finer, then reference is aligned to source and source is corrected via the inverse transformation matrix of source to reference alignment. source_ds = iolib.fn_getds(source_dem) ref_ds = iolib.fn_getds(ref_dem) source_res = geolib.get_res(source_ds, square=True)[0] ref_res = geolib.get_res(ref_ds, square=True)[0] tr = source_res tsrs = source_ds.GetProjection() print(type(tsrs)) if ref_res <= source_res: source = True pc_align_args = [ref_dem, source_dem] pc_id = 'trans_source.tif' pc_align_vec = '-transform.txt' else: source = False pc_align_args = [source_dem, ref_dem] pc_id = 'trans_reference.tif' pc_align_vec = '-inverse-transform.txt' print("Aligning clouds via the {} method".format(align)) pc_align_opts = get_pc_align_opts(outprefix, max_displacement, align=align, source=source, trans_only=trans_only, threads=threads) pc_align_log = run_cmd('pc_align', pc_align_opts + pc_align_args) print(pc_align_log) # this try, except block checks for 2 things. #- Did the transformed point-cloud got produced ? #- was the maximum displacement greater than twice the max_displacement specified by the user ? # 2nd condition is implemented for tricky alignement of individual triplet DEMs to reference, as some small DEMs might be awkardly displaced to > 1000 m. # if the above conditions are not met, then gridding of the transformed point-cloud into final DEM will not occur. try: pc = glob.glob(outprefix + '*' + pc_id)[0] pc_log = sorted(glob.glob(outprefix + '*' + 'log-pc_align*.txt'))[ -1] # this will hopefully pull out latest transformation log except: print("Failed to find aligned point cloud file") sys.exit() max_disp = get_total_shift(pc_log) print("Maximum displacement is {}".format(max_disp)) if max_disp <= 2 * max_displacement: grid = True else: grid = False if grid == True: point2dem_opts = get_point2dem_opts(tr, tsrs, threads=threads) point2dem_args = [pc] print("Saving aligned reference DEM at {}-DEM.tif".format( os.path.splitext(pc)[0])) p2dem_log = run_cmd('point2dem', point2dem_opts + point2dem_args) # create alignment vector with consistent name of alignment vector for camera alignment final_align_vector = os.path.join(os.path.dirname(outprefix), 'alignment_vector.txt') pc_align_vec = glob.glob(os.path.join(outprefix + pc_align_vec))[0] print("Creating DEM alignment vector at {final_align_vector}") shutil.copy2(pc_align_vec, final_align_vector) print(p2dem_log) elif grid == False: print( "aligned cloud not produced or the total shift applied to cloud is greater than 2 times the max_displacement specified, gridding abandoned" )
def main(): parser = getparser() args = parser.parse_args() fn = args.fn if not iolib.fn_check(fn): sys.exit("Unable to locate input file: %s" % fn) #Need some checks on these param = args.param print("Loading input raster into masked array") ds = iolib.fn_getds(fn) #Currently supports only single band operations r = iolib.ds_getma(ds, 1) #May need to cast input ma as float32 so np.nan filling works #r = r.astype(np.float32) #Want function that checks and returns float32 if necessary #Should filter, then return original dtype r_fltr = r #Loop through all specified input filters #for filt in args.filt: filt = args.filt[0] if len(param) == 1: param = param[0] param_str = '' if filt == 'range': #Range filter param = [float(i) for i in param[1:]] r_fltr = filtlib.range_fltr(r_fltr, param) param_str = '_{0:0.2f}-{1:0.2f}'.format(*param) elif filt == 'absrange': #Range filter of absolute values param = [float(i) for i in param[1:]] r_fltr = filtlib.absrange_fltr(r_fltr, param) param_str = '_{0:0.2f}-{1:0.2f}'.format(*param) elif filt == 'perc': #Percentile filter param = [float(i) for i in param[1:]] r_fltr = filtlib.perc_fltr(r, perc=param) param_str = '_{0:0.2f}-{1:0.2f}'.format(*param) elif filt == 'med': #Median filter param = int(param) r_fltr = filtlib.rolling_fltr(r_fltr, f=np.nanmedian, size=param) #r_fltr = filtlib.median_fltr(r_fltr, fsize=param, origmask=True) #r_fltr = filtlib.median_fltr_skimage(r_fltr, radius=4, origmask=True) param_str = '_%ipx' % param elif filt == 'gauss': #Gaussian filter (default) param = int(param) r_fltr = filtlib.gauss_fltr_astropy(r_fltr, size=param, origmask=False, fill_interior=False) param_str = '_%ipx' % param elif filt == 'highpass': #High pass filter param = int(param) r_fltr = filtlib.highpass(r_fltr, size=param) param_str = '_%ipx' % param elif filt == 'sigma': #n*sigma filter, remove outliers param = int(param) r_fltr = filtlib.sigma_fltr(r_fltr, n=param) param_str = '_n%i' % param elif filt == 'mad': #n*mad filter, remove outliers #Maybe better to use a percentile filter param = int(param) r_fltr = filtlib.mad_fltr(r_fltr, n=param) param_str = '_n%i' % param elif filt == 'dz': #Difference filter, need to specify ref_fn and range #Could let the user compute their own dz, then just run a standard range or absrange filter ref_fn = param[0] ref_ds = warplib.memwarp_multi_fn([ ref_fn, ], res=ds, extent=ds, t_srs=ds)[0] ref = iolib.ds_getma(ref_ds) param = [float(i) for i in param[1:]] r_fltr = filtlib.dz_fltr_ma(r, ref, rangelim=param) #param_str = '_{0:0.2f}-{1:0.2f}'.format(*param) param_str = '_{0:0.0f}_{1:0.0f}'.format(*param) else: sys.exit("No filter type specified") #Compute and print stats before/after if args.stats: print("Input stats:") malib.print_stats(r) print("Filtered stats:") malib.print_stats(r_fltr) #Write out dst_fn = os.path.splitext(fn)[0] + '_%sfilt%s.tif' % (filt, param_str) if args.outdir is not None: outdir = args.outdir if not os.path.exists(outdir): os.makedirs(outdir) dst_fn = os.path.join(outdir, os.path.split(dst_fn)[-1]) print("Writing out filtered raster: %s" % dst_fn) iolib.writeGTiff(r_fltr, dst_fn, ds)
def slope_fltr_fn(dem_fn, slopelim=(0,40)): dem_ds = iolib.fn_getds(dem_fn) return slope_fltr_ds(dem_ds, slopelim)
import os import sys import numpy as np from datetime import datetime from pygeotools.lib import timelib, iolib #SRTM, then systematic timestamps dt_list = [datetime(2000,2,11), datetime(2000,5,31), datetime(2009,5,31), datetime(2018,5,31)] stack_fn=sys.argv[1] #Use tif on disk if available trend_fn=os.path.splitext(stack_fn)[0]+'_trend.tif' intercept_fn=os.path.splitext(stack_fn)[0]+'_intercept.tif' #Otherwise load stack and compute trend/intercept if necessary trend_ds = iolib.fn_getds(trend_fn) trend = iolib.ds_getma(trend_ds)/365.25 intercept = iolib.fn_getma(intercept_fn) #Can vectorize #dt_list_o = timelib.dt2o(dt_list) #z_list = trend*dt_list_o[:,None,None]+intercept for dt in dt_list: dt_o = timelib.dt2o(dt) z = trend*dt_o+intercept out_fn=os.path.splitext(stack_fn)[0]+'_%s.tif' % dt.strftime('%Y%m%d') print("Writing out: %s" % out_fn) iolib.writeGTiff(z, out_fn, trend_ds)
def main(): if len(sys.argv) != 2: sys.exit("Usage: %s dz.tif" % os.path.basename(sys.argv[0])) #This is mean density for N Cascades snow #rho = 0.5 #Density of pure ice rho = 0.917 #Clip negative values to 0 filt = False src_fn = sys.argv[1] src_ds = iolib.fn_getds(src_fn) res = geolib.get_res(src_ds, square=True)[0] bma = iolib.ds_getma(src_ds) #Attempt to extract t1 and t2 from input filename ts = timelib.fn_getdatetime_list(src_fn) #Hardcode timestamps #ts = [datetime.datetime(2013,9,10), datetime.datetime(2014,5,14)] dt_yr = None if len(ts) == 2: dt = ts[1] - ts[0] year = datetime.timedelta(days=365.25) dt_yr = dt.total_seconds() / year.total_seconds() #Can add filter here to remove outliers, perc_fltr(0.01, 99.9) if filt: mask = np.ma.getmaskarray(bma) bma[bma < 0] = 0 bma = np.ma.array(bma, mask=mask) #Print out stats print('\n') stats = malib.print_stats(bma) count = stats[0] area = res**2 * count mean = stats[3] med = stats[5] s_m3 = np.ma.sum(bma) * res**2 s_km3 = s_m3 / 1E9 s_mwe = mean * rho s_gt = s_km3 * rho s_mm = s_gt / 374 if dt_yr is not None: print("%s to %s: %0.2f yr" % (ts[0], ts[1], dt_yr)) print("%0.0f m^3 (%0.0f m^3/yr)" % (s_m3, s_m3 / dt_yr)) print("%0.3f km^3 (%0.3f km^3/yr)" % (s_km3, s_km3 / dt_yr)) print("Density: %0.3f g/cc" % rho) print("%0.3f GT (%0.3f GT/yr)" % (s_gt, s_gt / dt_yr)) print("%0.6f mm SLR (%0.6f mm/yr)" % (s_mm, s_mm / dt_yr)) print("%0.3f m.w.e. (%0.3f m.w.e./yr)" % (s_mwe, s_mwe / dt_yr)) else: print("Area: %0.2f km2" % (area / 1E6)) print("%0.0f m^3" % s_m3) print("%0.3f km^3" % s_km3) print("Density: %0.3f g/cc" % rho) print("%0.3f GT" % s_gt) print("%0.6f mm SLR" % s_mm) print("%0.3f m.w.e." % s_mwe)
#! /usr/bin/env python from osgeo import gdal from pygeotools.lib import iolib from pygeotools.lib import geolib from pygeotools.lib import malib def dist(pos1, pos2): return np.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2) pos1 = [595396.48277,5181880.22677] pos2 = [596168.611,5182875.521] fn = ('rainierlidar_wgs84_shpclip.tif') ds = iolib.fn_getds(fn) dem = iolib.ds_getma(ds) x, y = geolib.get_xy_grids(ds) d = dist(pos1, pos2) grid = np.array([x, y]) b = dist(pos1, grid) c = dist(pos2, grid) conv = np.rad2deg(np.arccos((b**2 + c**2 - d**2)/(2*b*c))) conv_m = np.ma.array(conv, mask=dem.mask) malib.print_stats(conv_m)
def main(): """ # chm_refine.py # Main goals: # (1)fix non-forest "heights" # (2)fix dense interior forest height estimates # (3)remove water # Basic logic #Divide HRSI CHM into forest and non-forest; #to estimate max canopy height, within the forest mask run a 'max'filter (filtlib) #to remove spurious 'heights' in the non-forest using a 'min' filter (filtlib) # #(1) invert roughmask to get 'forest' # -run a 'max' filter, #(2) use roughmask (get_lo_rough_mask) to get 'non-forest' pixels # -run a 'min' filter, # -maybe a small (3 pix) window filter #(3) then mask the result with the toamask (this removes water and other dark (shadow) areas # -remove dark and smooth (water) # -smooth is non-veg land # -dark and rough is woody veg land # #(4) for later: then mask with the slopemask, toatrimask # step 1 # get_dark_mask from ortho_toa --> remove the areas that are TOA dark (water,shadow) from the chm # step 2 # get_hi_slope_mask from DEM --> remove areas of high slopes from chm # step 3 # get_lo_rough_mask from DEM --> remove areas that are NOT rough (aka remove non-forest) # run a max filter on remaining pixels * mask outputs should all consistently show the 'masked' area as valid """ parser = getparser() args = parser.parse_args() outdir = args.outdir pairname = args.pairname if not os.path.exists(outdir): os.mkdir(outdir) outfolder = os.path.join(outdir, pairname) if not os.path.exists(outfolder): os.mkdir(outfolder) auto_min_toa = args.auto_min_toa #Symlink files to working directory. #symlinks=['out-DEM_1m.tif','{}_ortho.tif'.format(pairname)] print("\nSymlinking Files to Working Directory\n") cmd = "ln -sf /att/pubrepo/DEM/hrsi_dsm/v2/{}/*ortho*tif {}".format( pairname, outfolder) subprocess.call(cmd, shell=True) cmd = "ln -sf /att/pubrepo/DEM/hrsi_dsm/v2/{}/out-DEM*m.tif {}".format( pairname, outfolder) subprocess.call(cmd, shell=True) cmd = "xml_fn_list=$(ls /att/pubrepo/DEM/hrsi_dsm/v2/{}/*.xml);ln -sf $xml_fn_list {}".format( pairname, outfolder) subprocess.call(cmd, shell=True) #dsm_maindir='/att/pubrepo/DEM/hrsi_dsm/v2/' #dsm_dir=os.path.join(dsm_maindir,pairname) chm_dir = '/att/gpfsfs/briskfs01/ppl/pmontesa/chm_work/hrsi_chm_sgm_filt/chm' chm_name = '{}_sr05_4m-sr05-min_1m-sr05-max_dz_eul.tif'.format(pairname) chm_fn = os.path.join(chm_dir, chm_name) print("[1]\nLoading Input CHM into masked array\n") chm_ds = iolib.fn_getds(chm_fn) print("[2]\nGetting Dark Mask from Ortho TOA\n") #May need to include get_toa_fn from DEM_control.py print("\n\t-Compute TOA from Ortho\n") dem_fn = os.path.join(outfolder, 'out-DEM_1m.tif') toa_fn = get_toa_fn(dem_fn) print("\nt-Warp TOA to CHM...\n") toa_ds = warplib.memwarp_multi_fn([ toa_fn, ], res=chm_ds, extent=chm_ds, t_srs=chm_ds)[0] #Determine from inputs or calculate lowest acceptable TOA valuesfor masking if auto_min_toa: # Compute a good min TOA value m, s = get_min_gaus(toa_fn, 50, 4) min_toa = m + s min_toa = m else: min_toa = args.min_toa #Write TOA Mins for reference with open( os.path.join( os.path.split(toa_fn)[0], "min_toa_" + pairname + ".txt"), "w") as text_file: text_file.write(os.path.basename(__file__)) text_file.write("\nMinimum TOA used for mask:\n{0}".format(min_toa)) # Should mask dark areas and dilate dark_mask = get_dark_mask(toa_ds, min_toa) print("\n\t-Completed Calculating Dark Mask\n") print("[3]\nGetting High Slope Mask from DEM\n") max_slope = args.max_slope dem_ds = iolib.fn_getds(dem_fn) slope_mask = get_hi_slope_mask(dem_ds, max_slope) print("\n\t-Completed Sploe Masking\n") print("[4]\nGetting Roughness for Forest/Non-Forest Classification\n") #NOTE: Not sure which mask we want to use. Will write up both min_rough = args.min_rough lo_rough_mask = get_lo_rough_mask(dem_ds, min_rough) #Areas less than min_rough is masked# min_tri = args.min_tri lo_tri_mask = get_lo_tri_mask(dem_ds, min_tri) #Valid areas are forest forest_mask = np.logical_or(lo_rough_mask, log_tri_mask) ground_mask = ~forest_mask
def main(): parser = getparser() args = parser.parse_args() fn = args.fn #This is mean density for N Cascades snow #rho = 0.5 #Density of pure ice rho = args.rho #If number is in kg/m^3 rather than g/cc if rho > 10.: rho /= 1000. #Clip negative values to 0 filt = False src_ds = iolib.fn_getds(fn) res = geolib.get_res(src_ds, square=True)[0] bma = iolib.ds_getma(src_ds) #Attempt to extract t1 and t2 from input filename ts = timelib.fn_getdatetime_list(fn) #Hardcode timestamps #ts = [datetime.datetime(2013,9,10), datetime.datetime(2014,5,14)] dt_yr = None if len(ts) == 2: dt = ts[1] - ts[0] year = datetime.timedelta(days=365.25) dt_yr = dt.total_seconds() / year.total_seconds() #Can add filter here to remove outliers, perc_fltr(0.01, 99.9) if filt: mask = np.ma.getmaskarray(bma) bma[bma < 0] = 0 bma = np.ma.array(bma, mask=mask) #Print out stats print('\n') stats = malib.print_stats(bma) print('\n') count = stats[0] area = res**2 * count mean = stats[3] med = stats[5] s_m3 = np.ma.sum(bma) * res**2 s_km3 = s_m3 / 1E9 s_mwe = mean * rho s_gt = s_km3 * rho #s_mm = s_gt/374 #https://climatesanity.wordpress.com/conversion-factors-for-ice-and-water-mass-and-volume/ s_mm = s_gt / 360 if dt_yr is not None: print("%s to %s: %0.2f yr" % (ts[0], ts[1], dt_yr)) print("%0.0f m^3 (%0.0f m^3/yr)" % (s_m3, s_m3 / dt_yr)) print("%0.3f km^3 (%0.3f km^3/yr)" % (s_km3, s_km3 / dt_yr)) print("Density: %0.3f g/cc" % rho) print("%0.3f GT (%0.3f GT/yr)" % (s_gt, s_gt / dt_yr)) print("%0.6f mm SLR (%0.6f mm/yr)" % (s_mm, s_mm / dt_yr)) print("%0.3f m.w.e. (%0.3f m.w.e./yr)" % (s_mwe, s_mwe / dt_yr)) else: print("Area: %0.2f km2" % (area / 1E6)) print("%0.0f m^3" % s_m3) print("%0.3f km^3" % s_km3) print("Density: %0.3f g/cc" % rho) print("%0.3f GT" % s_gt) print("%0.6f mm SLR" % s_mm) print("%0.3f m.w.e." % s_mwe) print('\n')
#! /usr/bin/env python #Extract sample values at training point locations import numpy as np from pygeotools.lib import iolib, geolib pt_fn = 'trainingpts_utm_xycode.csv' r_fn = 'NOAA2017_DEM-0000014848-0000000000.tif' r_ds = iolib.fn_getds(r_fn) #xy_srs = geolib.wgs_srs xcol = 0 ycol = 1 print("Loading points: %s" % pt_fn) #This needs to return a header pts = iolib.readcsv(pt_fn) out_samp = [] for b in range(1, 5): samp = geolib.sample(r_ds, pts[:, xcol], pts[:, ycol], bn=b, pad=1, count=True) out_samp.append(samp) #Not finished...
#! /usr/bin/env python """ Script to mask SRTM elevation values above a predefined error threshold """ import sys import os from pygeotools.lib import iolib import numpy as np #Max allowable error values in meters max_err = 5 hgt_fn = sys.argv[1] err_fn = sys.argv[2] print(hgt_fn) hgt_ds = iolib.fn_getds(hgt_fn) hgt = iolib.ds_getma(hgt_ds) err = iolib.fn_getma(err_fn) #Note: Units of err are mm, multiply by 1000 err[(err > float(max_err*1000))] = np.ma.masked hgt_masked = np.ma.array(hgt, mask=np.ma.getmaskarray(err)) out_fn = os.path.splitext(hgt_fn)[0]+'_lt%sm_err.tif' % max_err iolib.writeGTiff(hgt_masked, out_fn, hgt_ds)
def main(): start_time = timer() parser = getparser() args = parser.parse_args() r_fn = args.r_fn metric_list = args.metric_list.split(" ") win_size_list = args.win_size_list.split(" ") distance_list = args.distance_list.split(" ") print "\tGLCM metrics to be processed: %s" % metric_list if not os.path.exists(r_fn): sys.exit("Unable to find r_fn: %s" % r_fn) # r_fn to r_ds r_ds = iolib.fn_getds(r_fn) # ...to array r_arr = r_ds.GetRasterBand(1).ReadAsArray() # Forget about masked arrays...just use np.where to put 0 for all invalid vals r_arr = np.where((r_arr > 0.0) & (r_arr <= 1.0), r_arr, np.nan) # A way of checking the histograms of the orig image, and the scaled image # scale and set to to byte ##fit_GMM(r_arr, os.path.split(r_fn)[0], os.path.split(r_fn)[1], 5, 'float') ##r_arr = img_as_ubyte(r_arr) ##r_arr = img_as_uint(r_arr) ##fit_GMM(r_arr, os.path.split(r_fn)[0], os.path.split(r_fn)[1], 5, 'byte') end_readdata = timer() print "\tData: %s" % (os.path.basename(r_fn)) print "\n\tTime to read in data: {} minutes\n".format( round(find_elapsed_time(start_time, end_readdata), 3)) print "\tWindow sizes to be processed: %s" % win_size_list print "\tDistances to be processed: %s" % distance_list for win_size in win_size_list: print "\n\tWindow size: %s" % win_size win_size = int(win_size) for distance in distance_list: print "\tDistance: %s" % distance start_win_dist = timer() distance = int(distance) # Set up arrays to hold GLCM output # these need to be float con = np.copy(r_arr).astype(np.float32) con[:] = 0 dis = np.copy(r_arr).astype(np.float32) dis[:] = 0 cor = np.copy(r_arr).astype(np.float32) cor[:] = 0 asm = np.copy(r_arr).astype(np.float32) asm[:] = 0 start_glcm = timer() print "\tCalculating GLCM and its properties..." # Loop over pixel windows (row and col, by win_size for i in range(con.shape[0]): print i, for j in range(con.shape[1]): #windows needs to fit completely in image if i < ((win_size - 1) / 2) or j < ((win_size - 1) / 2): continue if i > (con.shape[0] - ((win_size + 1) / 2)) or j > (con.shape[0] - ( (win_size + 1) / 2)): continue #Calculate GLCM on a window # # converting to byte array right before GLCM processing # output arrays still with input precision in_arr = img_as_ubyte(r_arr) in_glcm_window_arr = in_arr[(i - ((win_size - 1) / 2)):( i + ((win_size + 1) / 2)), (j - ((win_size - 1) / 2)):( j + ((win_size + 1) / 2))] del in_arr out_glcm_window_arr = greycomatrix(in_glcm_window_arr, \ distances=[distance],\ #angles=[0, np.pi/4, np.pi/2, 3*np.pi/4],\ angles=[0],\ levels=256, symmetric=True, normed=True ) con[i, j], dis[i, j], cor[i, j], asm[i, j] = [ greycoprops(out_glcm_window_arr, metric) for metric in metric_list ] del out_glcm_window_arr end_glcm = timer() print "\n\tTime to compute this GLCM and its properties: {} minutes\n".format( round(find_elapsed_time(start_glcm, end_glcm), 3)) out_glcm_list = [con, dis, cor, asm] for num, metric in enumerate(metric_list): out_fn = os.path.splitext(r_fn)[0] + '_TEXTij_win' + str( win_size) + '_' + 'dist' + str( distance) + '_' + metric + '.tif' print '\tWriting: %s' % (out_fn) iolib.writeGTiff(out_glcm_list[num], out_fn, r_fn) end_win_dist = timer() print "\tTotal compute time for GLCM on this window & distance: {} minutes\n".format( round(find_elapsed_time(start_win_dist, end_win_dist), 3))
#mask_mat = scipy.io.loadmat(mask_fn) #mask_key = key+'mask' #mask = mask_mat[mask_key].T else: print "Unrecognized extension, continuing without filtering or scaling" #Parse GCPs gcp_fn = os.path.splitext(in_fn)[0]+'.gcp' gcp = parse_gcp(gcp_fn) #Load DEM #Should be in projected, cartesian coords dem_fn = sys.argv[3] #Extract DEM to ma dem_ds = iolib.fn_getds(dem_fn) dem_srs = geolib.get_ds_srs(dem_ds) dem_gt = dem_ds.GetGeoTransform() dem = iolib.ds_getma(dem_ds) #Compute azimuth pixel size in meters (function of range) az_pixel_spacing = az_angle_step * np.arange(near_range_slc, far_range_slc, range_pixel_spacing) #Downsample DEM to match radar GSD, or 2x radar GSD? #min(range, az) #Want to allow for input more precise DGPS coordinates for GPRI origin #Trimble GeoXH shp output has XY, need to process raw data for XYZ #46.78364631 #-121.7502352 #ref_coord = [-121.7502352, 46.78364631, ref_coord[2]]