def domask(tile_fn): prefix = '-'.join(tile_fn.split('-')[:-1]) print("\nLoading: %s" % tile_fn) #dem_fn = prefix+'-median.tif' dem_fn = tile_fn dem_ds = iolib.fn_getds(dem_fn) dem = iolib.ds_getma(dem_ds) #Get original mask, True where masked mask = np.ma.getmaskarray(dem) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) min_count = 2 count_fn = prefix + '-count.tif' print("Loading: %s" % count_fn) count = iolib.fn_getma(count_fn) print("min: %i max: %i" % (count.min(), count.max())) print("Masking: (count < %i)" % min_count) mask = np.logical_or(mask, (count < min_count)) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) max_std = 3.0 #std_fn = prefix+'-std.tif' std_fn = prefix + '-nmad.tif' print("Loading: %s" % std_fn) std = iolib.fn_getma(std_fn) print("min: %i max: %i" % (std.min(), std.max())) print("Masking: (std/nmad >= %i)" % max_std) mask = np.logical_or(mask, (std >= max_std)) valid_px_count = (~mask).sum() if valid_px_count < 1: print("No valid pixels remain") else: print("Valid pixel count: %i" % valid_px_count) #Modified so we always write out, even if empty tif #Easier for tracking progress print("Applying mask") dem_masked = np.ma.array(dem, mask=mask) out_fn = os.path.splitext(dem_fn)[0] + '_masked.tif' print("Writing: %s" % out_fn) iolib.writeGTiff(dem_masked, out_fn, dem_ds)
def slope_fltr(dem_fn, slopelim=(0.1, 40)): #Note, Noh and Howat set minimum slope of 20 deg for coregistration purposes #perc = (0.01, 99.99) #slopelim = malib.calcperc(dem_slope, perc) #dem_slope = np.gradient(dem) dem_slope = geolib.gdaldem_slope(dem_fn) dem = iolib.fn_getma(dem_fn) out = np.ma.array(dem, mask=np.ma.masked_outside(dem_slope, *slopelim).mask, keep_mask=True, fill_value=dem.fill_value) shutil.rm(os.path.splitext(dem_fn)[0]+'_slope.tif') return out
def get_min_gaus(ras_fn, sample_step=50, ncomp=3): # Get ma masked_array = iolib.fn_getma(ras_fn) # Sample ma masked_array = sample_ma(masked_array, sample_step) if masked_array is None: mean_min = 0 stdev = 0 print "No shift will be done. Masked array is None. Setting mean and stdv to 0." else: # Do gaussian fitting means, vars, weights = fit_gaus(masked_array, ncomp) sample_step_str = "%03d" % (sample_step) histo = matplotlib.pyplot.hist(masked_array.compressed(), 300, normed=True, color='gray', alpha=0.5) #Write histogram fig_name = ras_fn.split('/')[-1].strip('.tif') + "_" + str( ncomp) + "_" + sample_step_str + '.png' i = 0 out_means = [] out_stdevs = [] for w, m, c in zip(weights, means, vars): i += 1 matplotlib.pyplot.plot( histo[1], w * scipy.stats.norm.pdf(histo[1], m, np.sqrt(c)), linewidth=3) #matplotlib.pyplot.axis([min(masked_array.compressed()),max(masked_array.compressed()),0,1]) gauss_num = 'Gaussian peak #%s' % (i) print 'Gaussian peak #%s (mean, stdv): %s, %s' % (i, round( m, 3), round(np.sqrt(c), 3)) out_means.append(m) out_stdevs.append(np.sqrt(c)) matplotlib.pyplot.savefig( os.path.join(os.path.dirname(ras_fn), fig_name)) matplotlib.pyplot.clf() print "Saved histogram fig:" print os.path.join(os.path.dirname(ras_fn), fig_name) # Find min mean_min = min(out_means) stdev = np.sqrt(vars[out_means.index(mean_min)]) return mean_min, stdev
def slope_fltr_chm(chm_array, hi_sun_dem_fn, slopelim=(0.1, 30)): """Apply a filter to a chm array based on a slope mask calc'd from the associated hi-sun-elev (ground) DSM """ #dem_slope = np.gradient(dem) dem_slope = geolib.gdaldem_slope(hi_sun_dem_fn) dem = iolib.fn_getma(hi_sun_dem_fn) ##out = np.ma.array(chm_array, mask=np.ma.masked_outside(dem_slope, *slopelim).mask, keep_mask=True, fill_value=-9999) ##https://stackoverflow.com/questions/35435015/extending-numpy-mask out = np.ma.array(*np.broadcast( chm_array, np.ma.masked_outside(dem_slope, *slopelim).mask), keep_mask=True, fill_value=-9999) shutil.rm(os.path.splitext(hi_sun_dem_fn)[0] + '_slope.tif') return out
def main(): parser = getparser() args = parser.parse_args() ras_fn = args.ras_fn min_val = args.min_val max_val = args.max_val sample_step = args.sample_step # Get ma ma = iolib.fn_getma(ras_fn) # Sample ma if min_val is not None: ma = np.ma.masked_less(ma, min_val) if max_val is not None: ma = np.ma.masked_greater(ma, max_val) ma = sample_ma(ma, sample_step) if ma is None: print "No histogram. Array is None." fig_name = "" else: sample_step_str = "%03d" % (sample_step) histo = matplotlib.pyplot.hist(ma.compressed(), 300, normed=True, color='gray', alpha=0.5) matplotlib.pyplot.xticks(np.arange(min_val, max_val, 1.0)) matplotlib.pyplot.xlabel(args.axis_lab_x, fontsize=12) #Write histogram fig_name = ras_fn.split('/')[-1].strip('.tif') + '_hist.png' matplotlib.pyplot.savefig( os.path.join(os.path.dirname(ras_fn), fig_name)) matplotlib.pyplot.clf() print "Saved histogram fig:" print os.path.join(os.path.dirname(ras_fn), fig_name) return fig_name
def get_min_gaus(ras_fn, sample_step=50, ncomp=3): # Get ma masked_array = iolib.fn_getma(ras_fn) # Sample ma masked_array = sample_ma(masked_array, sample_step) # Do gaussian fitting means, vars, weights = fit_gaus(masked_array, ncomp) sample_step_str = "%03d" % (sample_step) histo = matplotlib.pyplot.hist(masked_array.compressed(), 300, normed=True, color='gray', alpha=0.5) #Write histogram fig_name = ras_fn.split('/')[-1].strip('.tif') + "_" + str( ncomp) + "_" + sample_step_str + '.png' i = 0 out_means = [] out_stdevs = [] for w, m, c in zip(weights, means, vars): i += 1 matplotlib.pyplot.plot(histo[1], w * scipy.stats.norm.pdf(histo[1], m, np.sqrt(c)), linewidth=3) #matplotlib.pyplot.axis([min(masked_array.compressed()),max(masked_array.compressed()),0,1]) gauss_num = 'Gaussian peak #%s' % (i) print '\t' + gauss_num + ' mean: ', m, ' std dev:', np.sqrt(c) #print '\tGaussian peak #%s : mean: %04d , std dev: %04d' %(i, m, np.sqrt(c)) out_means.append(m) out_stdevs.append(np.sqrt(c)) matplotlib.pyplot.savefig(os.path.join(os.path.dirname(ras_fn), fig_name)) matplotlib.pyplot.clf() print "Saved histogram fig:" print os.path.join(os.path.dirname(ras_fn), fig_name) # Find min #print out_means #print out_stdevs mean_min = min(out_means) stdev = np.sqrt(vars[out_means.index(mean_min)]) #print mean_min, stdev return mean_min, stdev
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn ndv = args.ndv max_slope = args.max_slope reduce_pct = args.reduce_pct #slope_res = args.slope_res slopelim = (0.1, max_slope) print "\n\tGetting masked slope of input dem..." #Get a coarsened version of DEM on which to calc slope dem_fn_reduced = os.path.splitext(dem_fn)[0] + '_' + str( reduce_pct) + 'pct.vrt' #dem_fn_reduced = os.path.splitext(dem_fn)[0]+'_'+ str(slope_res) +'m.vrt' #print "\tReducing percent by %s..." %(str(reduce_pct)) run_os("gdal_translate -of VRT -r cubic -outsize " + str(reduce_pct) + "% " + str(reduce_pct) + "% " + dem_fn + " " + dem_fn_reduced) #run_os("gdal_translate -of VRT -r cubic -tr " + str(slope_res) + "% " + str(slope_res) + "% " + dem_fn + " " + dem_fn_reduced) # Run slope dem_slope_fn = geolib.gdaldem_wrapper(dem_fn_reduced, product='slope', returnma=False) # Get original ma and ds dem_slope = iolib.fn_getma(dem_slope_fn) dem_slope_ds = iolib.fn_getds(dem_slope_fn) # Apply mask from slope to slope dem_slope = np.ma.array(dem_slope, mask=np.ma.masked_outside(dem_slope, *slopelim).mask, keep_mask=True, fill_value=dem_slope.fill_value) # Save the filtered slope dataset dst_fn = os.path.splitext(dem_slope_fn)[0] + '_mask.tif' iolib.writeGTiff(dem_slope, dst_fn, dem_slope_ds, ndv=ndv) run_os("rm -fv " + dem_slope_fn) return dst_fn
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn weight = args.weight poly_order = args.order # Get original DEM array_dem = iolib.fn_getma(dem_fn) dem_ds = iolib.fn_getds(dem_fn) # Create empty meshgrid x = range (0, dem_ds.RasterXSize, 1) y = range (0, dem_ds.RasterYSize, 1) X, Y = np.meshgrid(x, y) print "\nGet the gradient of the dem..." dzdy,dzdx = np.gradient(array_dem) print "\nCreate copies for modification, and set the masked areas to 0 in the gradients..." dzdx_mod = dzdx.copy() dzdy_mod = dzdy.copy() print "\nOsmanoglu Algorithm..." #routine to get the scale corrected surface back from gradients. #Note that this is with the original slopes, I want the result to be close to the original DEM as possible fco_dem = frankotchellappaosmanoglu(dzdx, dzdy) ; #FrankotChellappa removes long wavelength trends print "\nSubtract the recovered DEM from the original and estimate a surface..." planefit, fitfunc = fitSurface(X.ravel(), Y.ravel(), (array_dem - fco_dem).ravel()) print "\nAdd the surface to the masked gradient derived DEM..." dem_interp = frankotchellappaosmanoglu(dzdx_mod, dzdy_mod) + fitfunc(planefit, X, Y, weight, poly_order) print("\nWriting DEM with interpolated surfaces:") dst_fn = os.path.splitext(dem_fn)[0]+'_interp.tif' print(dst_fn) iolib.writeGTiff(dem_interp, dst_fn, dem_ds) return dst_fn # Return a numpy masked array return dem_interp
def main(): parser = getparser() args = parser.parse_args() mode = args.mode session = args.t img_folder = os.path.abspath(args.img) outdir = os.path.abspath(args.outdir) if not os.path.exists(outdir): try: os.makedir(outdir) except: os.makedirs(outdir) if mode == 'video': sampling = args.video_sampling_mode frame_index = skysat.parse_frame_index(args.frame_index, True) product_level = 'l1a' num_samples = len(frame_index) frames = frame_index.name.values sampler = args.sampler outdf = os.path.join(outdir, os.path.basename(args.frame_index)) if sampling == 'sampling_interval': print( "Hardcoded sampling interval results in frame exclusion at the end of the video sequence based on step size, better to chose the num_images mode and the program will equally distribute accordingly" ) idx = np.arange(0, num_samples, sampler) outdf = '{}_sampling_inteval_{}.csv'.format( os.path.splitext(outdf)[0], sampler) else: print("Sampling {} from {} of the input video sequence".format( sampler, num_samples)) idx = np.linspace(0, num_samples - 1, sampler, dtype=int) outdf = '{}_sampling_inteval_aprox{}.csv'.format( os.path.splitext(outdf)[0], idx[1] - idx[0]) sub_sampled_frames = frames[idx] sub_df = frame_index[frame_index['name'].isin( list(sub_sampled_frames))] sub_df.to_csv(outdf, sep=',', index=False) #this is camera/gcp initialisation n = len(sub_sampled_frames) img_list = [ glob.glob(os.path.join(img_folder, '{}*.tiff'.format(frame)))[0] for frame in sub_sampled_frames ] pitch = [1] * n out_fn = [ os.path.join(outdir, '{}_frame_idx.tsai'.format(frame)) for frame in sub_sampled_frames ] out_gcp = [ os.path.join(outdir, '{}_frame_idx.gcp'.format(frame)) for frame in sub_sampled_frames ] frame_index = [args.frame_index] * n camera = [None] * n gcp_factor = 4 elif mode == 'triplet': df = pd.read_pickle(args.overlap_pkl) img_list = list( np.unique(np.array(list(df.img1.values) + list(df.img2.values)))) img_list = [ os.path.splitext(os.path.basename(img))[0] for img in img_list ] cam_list = [ glob.glob(os.path.join(img_folder, '{}*.tif'.format(img)))[0] for img in img_list ] n = len(img_list) if args.product_level == 'l1b': pitch = [0.8] * n else: pitch = [1.0] * n out_fn = [ os.path.join(outdir, '{}_rpc.tsai'.format(frame)) for frame in img_list ] out_gcp = [ os.path.join(outdir, '{}_rpc.gcp'.format(frame)) for frame in img_list ] camera = cam_list frame_index = [None] * n img_list = cam_list gcp_factor = 8 fl = [553846.153846] * n cx = [1280] * n cy = [540] * n dem = args.dem ht_datum = [malib.get_stats_dict(iolib.fn_getma(dem))['median'] ] * n # use this value for height where DEM has no-data gcp_std = [1] * n datum = ['WGS84'] * n refdem = [dem] * n n_proc = 30 #n_proc = cpu_count() cam_gen_log = p_map(asp.cam_gen, img_list, fl, cx, cy, pitch, ht_datum, gcp_std, out_fn, out_gcp, datum, refdem, camera, frame_index, num_cpus=n_proc) print("writing gcp with basename removed") # count expexted gcp print(f"Total expected GCP {gcp_factor*n}") asp.clean_gcp(out_gcp, outdir) # saving subprocess consolidated log file from datetime import datetime now = datetime.now() log_fn = os.path.join(outdir, 'camgen_{}.log'.format(now)) print("saving subprocess camgen log at {}".format(log_fn)) with open(log_fn, 'w') as f: for log in cam_gen_log: f.write(log) print("Script is complete !")
else: tiledir = sys.argv[1] print(tiledir) dem_fn = glob.glob(os.path.join(tiledir, 'DEM/*DEM.tif'))[0] print(dem_fn) dem_ds = iolib.fn_getds(dem_fn) dem = iolib.ds_getma(dem_ds) print(dem.count()) #Get original mask, True where masked mask = np.ma.getmaskarray(dem) #Theoretical height error err_fn = glob.glob(os.path.join(tiledir, 'AUXFILES/*HEM.tif'))[0] err = iolib.fn_getma(err_fn) max_err_multi = 1.5 mask = np.logical_or(mask, (err.data > max_err_multi)) #Water mask wam_fn = glob.glob(os.path.join(tiledir, 'AUXFILES/*WAM.tif'))[0] wam = iolib.fn_getma(wam_fn) wam_clim = (33, 127) #wam_clim = (3,127) mask = np.logical_or(mask, (wam >= wam_clim[0]) & (wam <= wam_clim[1])) #Consistency mask com_fn = glob.glob(os.path.join(tiledir, 'AUXFILES/*COM.tif'))[0] com = iolib.fn_getma(com_fn) com_valid = (8, 9, 10) #4 is only one obs
def bma_fig(fig, bma, cmap='cpt_rainbow', clim=None, clim_perc=(2, 98), bg=None, bg_perc=(2, 98), n_subplt=1, subplt=1, label=None, title=None, contour_int=None, contour_fn=None, alpha=0.5, ticks=False, scalebar=None, ds=None, shp=None, imshow_kwargs={'interpolation': 'nearest'}, cbar_kwargs={'orientation': 'vertical'}, **kwargs): #We don't use the kwargs, just there to save parsing in main if clim is None: clim = pltlib.get_clim(bma, clim_perc=clim_perc) print("Colorbar limits: %0.3f %0.3f" % (clim[0], clim[1])) #Link all subplots for zoom/pan sharex = sharey = None if len(fig.get_axes()) > 0: sharex = sharey = fig.get_axes()[0] #Hack to catch situations with only 1 subplot, but a subplot number > 1 if n_subplt == 1: subplt = 1 #One row, multiple columns ax = fig.add_subplot(1, n_subplt, subplt, sharex=sharex, sharey=sharey) #This occupies the full figure #ax = fig.add_axes([0., 0., 1., 1., ]) #ax.patch.set_facecolor('black') ax.patch.set_facecolor('white') #Set appropriate nodata value color cmap_name = cmap cmap = pltlib.cmap_setndv(cmap_name) #ax.set_title("Band %i" % subplt, fontsize=10) if title is not None: ax.set_title(title) #If a background image is provided, plot it first if bg is not None: #Note, alpha=1 is opaque, 0 completely transparent #alpha = 0.6 bg_perc = (4, 96) bg_alpha = 1.0 #bg_clim = malib.calcperc(bg, bg_perc) bg_clim = (1, 255) bg_cmap_name = 'gray' bg_cmap = pltlib.cmap_setndv(bg_cmap_name, cmap_name) #bg_cmap = plt.get_cmap(bg_cmap_name) #if 'inferno' in cmap_name: # bg_cmap.set_bad('0.5', alpha=1) #else: # bg_cmap.set_bad('k', alpha=1) #Set the overlay bad values to completely transparent, otherwise darkens the bg cmap.set_bad(alpha=0) bgplot = ax.imshow(bg, cmap=bg_cmap, clim=bg_clim, alpha=bg_alpha) imgplot = ax.imshow(bma, alpha=alpha, cmap=cmap, clim=clim, **imshow_kwargs) else: imgplot = ax.imshow(bma, cmap=cmap, clim=clim, **imshow_kwargs) gt = None if ds is not None: gt = np.array(ds.GetGeoTransform()) gt_scale_factor = min( np.array([ds.RasterYSize, ds.RasterXSize]) / np.array(bma.shape, dtype=float)) gt[1] *= gt_scale_factor gt[5] *= gt_scale_factor ds_srs = geolib.get_ds_srs(ds) if ticks: scale_ticks(ax, ds) else: pltlib.hide_ticks(ax) xres = geolib.get_res(ds)[0] else: pltlib.hide_ticks(ax) #This forces the black line outlining the image subplot to snap to the actual image dimensions #depreciated in 2.2 #ax.set_adjustable('box-forced') if cbar_kwargs: #Should set the format based on dtype of input data #cbar_kwargs['format'] = '%i' #cbar_kwargs['format'] = '%0.1f' #cbar_kwargs['orientation'] = 'horizontal' #Determine whether we need to add extend triangles to colorbar cbar_kwargs['extend'] = pltlib.get_cbar_extend(bma, clim) #Add the colorbar to the axes cbar = pltlib.add_cbar(ax, imgplot, label=label, cbar_kwargs=cbar_kwargs) #Plot contours every contour_int interval and update colorbar appropriately if contour_int is not None: if contour_fn is not None: contour_bma = iolib.fn_getma(contour_fn) contour_bma_clim = malib.calcperc(contour_bma) else: contour_bma = bma contour_bma_clim = clim #PIG bed ridge contours #bma_clim = (-1300, -300) #Jak front shear margin contours #bma_clim = (2000, 4000) contour_bma_clim = (100, 250) cstart = int(np.floor(contour_bma_clim[0] / contour_int)) * contour_int cend = int(np.ceil(contour_bma_clim[1] / contour_int)) * contour_int #Turn off dashed negative (beds are below sea level) #matplotlib.rcParams['contour.negative_linestyle'] = 'solid' clvl = np.arange(cstart, cend + 1, contour_int) contour_prop = { 'levels': clvl, 'linestyle': '-', 'linewidths': 0.5, 'alpha': 1.0 } #contours = ax.contour(contour_bma, colors='k', **contour_prop) #contour_cmap = 'gray' contour_cmap = 'gray_r' #This prevents white contours contour_cmap_clim = (0, contour_bma_clim[-1]) contours = ax.contour(contour_bma, cmap=contour_cmap, vmin=contour_cmap_clim[0], \ vmax=contour_cmap_clim[-1], **contour_prop) #Add labels ax.clabel(contours, inline=True, inline_spacing=0, fontsize=4, fmt='%i') #Update the cbar with contour locations #cbar.add_lines(contours) #cbar.set_ticks(contours.levels) #Plot shape overlay, moved code to pltlib if shp is not None: pltlib.shp_overlay(ax, ds, shp, gt=gt, color='k') if scalebar: scale_ticks(ax, ds) sb_loc = pltlib.best_scalebar_location(bma) #Force scalebar position #sb_loc = 'lower right' pltlib.add_scalebar(ax, xres, location=sb_loc) if not ticks: pltlib.hide_ticks(ax) #Set up interactive display global gbma gbma = bma global ggt ggt = gt #Clicking on a subplot will make it active for z-coordinate display fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.mpl_connect('axes_enter_event', enter_axis) #Add support for interactive z-value display ax.format_coord = format_coord
import os import sys import numpy as np from datetime import datetime from pygeotools.lib import timelib, iolib #SRTM, then systematic timestamps dt_list = [datetime(2000,2,11), datetime(2000,5,31), datetime(2009,5,31), datetime(2018,5,31)] stack_fn=sys.argv[1] #Use tif on disk if available trend_fn=os.path.splitext(stack_fn)[0]+'_trend.tif' intercept_fn=os.path.splitext(stack_fn)[0]+'_intercept.tif' #Otherwise load stack and compute trend/intercept if necessary trend_ds = iolib.fn_getds(trend_fn) trend = iolib.ds_getma(trend_ds)/365.25 intercept = iolib.fn_getma(intercept_fn) #Can vectorize #dt_list_o = timelib.dt2o(dt_list) #z_list = trend*dt_list_o[:,None,None]+intercept for dt in dt_list: dt_o = timelib.dt2o(dt) z = trend*dt_o+intercept out_fn=os.path.splitext(stack_fn)[0]+'_%s.tif' % dt.strftime('%Y%m%d') print("Writing out: %s" % out_fn) iolib.writeGTiff(z, out_fn, trend_ds)
def main(): parser = getparser() args = parser.parse_args() fn = args.fn ext = os.path.splitext(fn)[1] col = args.col #This expects the sample.csv, not the errors.csv if 'csv' in ext: #a = np.loadtxt(fn, delimiter=',', skiprows=1) #This will more gracefully handle hader a = iolib.readcsv(fn) #Signed difference values are in column 5 dz_m = a[:, col] dz_m = dz_m[~np.isnan(dz_m)] #If pc_align was run with reference grid, then load the dz raster elif 'tif' in ext: a = iolib.fn_getma(fn) dz_m = a.compressed() else: sys.exit('Unsupported input type') dz_m_abs = np.abs(dz_m) #Extract fn date #d = f[0:13] #print("Date: %s" % d) #print("Filename: %s" % f) print("Count: %i" % (dz_m.shape[0] - 1)) rmse = np.sqrt(np.sum(dz_m**2) / dz_m.size) print("RMSE: %0.3f" % rmse) mean = np.mean(dz_m) print("Mean Error: %0.3f" % mean) std = np.std(dz_m) print("Standard Deviation: %0.3f" % std) #thresh = 3 * std med = np.median(dz_m) print("Median Error: %0.3f" % med) p16, p84 = np.percentile(dz_m, (15.9, 84.2)) spread = p84 - p16 print("16th Percentile: %0.3f" % p16) print("84th Percentile: %0.3f" % p84) print("Spread: %0.3f" % spread) absmed = np.median(dz_m_abs) print("Absolute Median Error: %0.3f" % absmed) mad = np.median(np.abs(dz_m - med)) #print("MAD: %0.3f" % mad) nmad = 1.4826 * mad print("NMAD: %0.3f" % nmad) p68, p95 = np.percentile(dz_m_abs, (68.3, 95.0)) print("68.3th Percentile: %0.3f" % p68) print("95th Percentile: %0.3f" % p95)
#! /usr/bin/env python """ Script to mask SRTM elevation values above a predefined error threshold """ import sys import os from pygeotools.lib import iolib import numpy as np #Max allowable error values in meters max_err = 5 hgt_fn = sys.argv[1] err_fn = sys.argv[2] print(hgt_fn) hgt_ds = iolib.fn_getds(hgt_fn) hgt = iolib.ds_getma(hgt_ds) err = iolib.fn_getma(err_fn) #Note: Units of err are mm, multiply by 1000 err[(err > float(max_err*1000))] = np.ma.masked hgt_masked = np.ma.array(hgt, mask=np.ma.getmaskarray(err)) out_fn = os.path.splitext(hgt_fn)[0]+'_lt%sm_err.tif' % max_err iolib.writeGTiff(hgt_masked, out_fn, hgt_ds)
def main(): parser = getparser() args = parser.parse_args() img = args.img # populate image list img_list = sorted(glob.glob(os.path.join(img, '*.tif'))) if len(img_list) < 2: img_list = sorted(glob.glob(os.path.join(img, '*.tiff'))) #img_list = [os.path.basename(x) for x in img_list] if os.path.islink(img_list[0]): img_list = [os.readlink(x) for x in img_list] # populate camera model list if args.cam: cam = os.path.abspath(args.cam) if 'run' in os.path.basename(cam): cam_list = sorted(glob.glob(cam + '-*.tsai')) else: cam_list = sorted(glob.glob(os.path.join(cam, '*.tsai'))) cam_list = cam_list[:len(img_list)] session = args.t # output ba_prefix if args.ba_prefix: ba_prefix = os.path.abspath(args.ba_prefix) if args.initial_transform: initial_transform = os.path.abspath(initial_transform) if args.input_adjustments: input_adjustments = os.path.abspath(input_adjustments) # triplet stereo overlap list if args.overlap_list: overlap_list = os.path.abspath(args.overlap_list) # Populate GCP list if args.gcp: gcp_list = sorted(glob.glob(os.path.join(args.gcp, '*.gcp'))) mode = args.mode if args.bound: bound = gpd.read_file(args.bound) geo_crs = {'init': 'epsg:4326'} if bound.crs is not geo_crs: bound = bound.to_crs(geo_crs) lon_min, lat_min, lon_max, lat_max = bound.total_bounds # Select whether to float both translation/rotation, or only rotation if args.camera_param2float == 'trans+rot': cam_wt = 0 else: # this will invoke adjustment with rotation weight of 0 and translation weight of 0.4 cam_wt = None print(f"Camera weight is {cam_wt}") # not commonly used if args.dem: dem = iolib.fn_getma(args.dem) dem_stats = malib.get_stats_dict(dem) min_elev, max_elev = [dem_stats['min'] - 500, dem_stats['max'] + 500] if mode == 'full_video': # read subsampled frame index, populate gcp, image and camera models appropriately frame_index = args.frame_index df = pd.read_csv(frame_index) gcp = os.path.abspath(args.gcp) # block to determine automatically overlap limit of 40 seconds for computing match points df['dt'] = [ datetime.strptime(date.split('+00:00')[0], '%Y-%m-%dT%H:%M:%S.%f') for date in df.datetime.values ] delta = (df.dt.values[1] - df.dt.values[0]) / np.timedelta64(1, 's') # i hardocde overlap limit to have 40 seconds coverage overlap_limit = np.int(np.ceil(40 / delta)) print("Calculated overlap limit as {}".format(overlap_limit)) img_list = [ glob.glob(os.path.join(img, '*{}*.tiff'.format(x)))[0] for x in df.name.values ] cam_list = [ glob.glob(os.path.join(cam, '*{}*.tsai'.format(x)))[0] for x in df.name.values ] gcp_list = [ glob.glob(os.path.join(gcp, '*{}*.gcp'.format(x)))[0] for x in df.name.values ] #also append the clean gcp here print(os.path.join(gcp, '*clean*_gcp.gcp')) gcp_list.append(glob.glob(os.path.join(gcp, '*clean*_gcp.gcp'))[0]) # this attempt did not work here # but given videos small footprint, the median (scale)+trans+rotation is good enough for all terrain # so reverting back to them #stereo_baseline = 10 #fix_cam_idx = np.array([0]+[0+stereo_baseline]) #ip_per_tile is switched to default, as die to high scene to scene overlap and limited perspective difference, this produces abundant matches round1_opts = get_ba_opts(ba_prefix, overlap_limit=overlap_limit, flavor='2round_gcp_1', session=session, ip_per_tile=4000, num_iterations=args.num_iter, num_pass=args.num_pass, camera_weight=cam_wt, fixed_cam_idx=None, robust_threshold=None) print("Running round 1 bundle adjustment for input video sequence") if session == 'nadirpinhole': ba_args = img_list + cam_list else: ba_args = img_list # Check if this command executed till last print('Running bundle adjustment round1') run_cmd('bundle_adjust', round1_opts + ba_args) # Make files used to evaluate solution quality init_residual_fn_def = sorted( glob.glob(ba_prefix + '*initial*no_loss_*pointmap*.csv'))[0] init_per_cam_reproj_err = sorted( glob.glob( ba_prefix + '-*initial_residuals_no_loss_function_raw_pixels.txt'))[0] init_per_cam_reproj_err_disk = os.path.splitext( init_per_cam_reproj_err)[0] + '_initial_per_cam_reproj_error.txt' init_residual_fn = os.path.splitext( init_residual_fn_def)[0] + '_initial_reproj_error.csv' shutil.copy2(init_residual_fn_def, init_residual_fn) shutil.copy2(init_per_cam_reproj_err, init_per_cam_reproj_err_disk) # Copy final reprojection error files before transforming cameras final_residual_fn_def = sorted( glob.glob(ba_prefix + '*final*no_loss_*pointmap*.csv'))[0] final_residual_fn = os.path.splitext( final_residual_fn_def)[0] + '_final_reproj_error.csv' final_per_cam_reproj_err = sorted( glob.glob(ba_prefix + '-*final_residuals_no_loss_function_raw_pixels.txt'))[0] final_per_cam_reproj_err_disk = os.path.splitext( final_per_cam_reproj_err)[0] + '_final_per_cam_reproj_error.txt' shutil.copy2(final_residual_fn_def, final_residual_fn) shutil.copy2(final_per_cam_reproj_err, final_per_cam_reproj_err_disk) if session == 'nadirpinhole': # prepare for second run to apply a constant transform to the self-consistent models using initial ground footprints identifier = os.path.basename(cam_list[0]).split( df.name.values[0])[0] print(ba_prefix + identifier + '-{}*.tsai'.format(df.name.values[0])) cam_list = [ glob.glob(ba_prefix + identifier + '-{}*.tsai'.format(img))[0] for img in df.name.values ] print(len(cam_list)) ba_args = img_list + cam_list + gcp_list #fixed_cam_idx2 = np.delete(np.arange(len(img_list),dtype=int),fix_cam_idx) round2_opts = get_ba_opts(ba_prefix, overlap_limit=overlap_limit, flavor='2round_gcp_2', session=session, gcp_transform=True, camera_weight=0, num_iterations=0, num_pass=1) else: # round 1 is adjust file input_adjustments = ba_prefix round2_opts = get_ba_opts(ba_prefix, overlap_limit=overlap_limit, input_adjustments=ba_prefix, flavor='2round_gcp_2', session=session) ba_args = img_list + gcp_list print("running round 2 bundle adjustment for input video sequence") run_cmd('bundle_adjust', round2_opts + ba_args) elif mode == 'full_triplet': if args.overlap_list is None: print( "Attempted bundle adjust will be expensive, will try to find matches in each and every pair" ) # the concept is simple #first 3 cameras, and then corresponding first three cameras from next collection are fixed in the first go # these serve as a kind of #GCP, preventing a large drift in the triangulated points/camera extrinsics during optimization img_time_identifier_list = np.array( [os.path.basename(img).split('_')[1] for img in img_list]) img_time_unique_list = np.unique(img_time_identifier_list) second_collection_list = np.where( img_time_identifier_list == img_time_unique_list[1])[0][[0, 1, 2]] fix_cam_idx = np.array([0, 1, 2] + list(second_collection_list)) print(type(fix_cam_idx)) round1_opts = get_ba_opts(ba_prefix, session=session, num_iterations=args.num_iter, num_pass=args.num_pass, fixed_cam_idx=fix_cam_idx, overlap_list=args.overlap_list, camera_weight=cam_wt) # enter round2_opts here only ? if session == 'nadirpinhole': ba_args = img_list + cam_list else: ba_args = img_list print( "Running round 1 bundle adjustment for given triplet stereo combination" ) run_cmd('bundle_adjust', round1_opts + ba_args) # Save the first and foremost bundle adjustment reprojection error file init_residual_fn_def = sorted( glob.glob(ba_prefix + '*initial*no_loss_*pointmap*.csv'))[0] init_residual_fn = os.path.splitext( init_residual_fn_def)[0] + '_initial_reproj_error.csv' init_per_cam_reproj_err = sorted( glob.glob( ba_prefix + '-*initial_residuals_no_loss_function_raw_pixels.txt'))[0] init_per_cam_reproj_err_disk = os.path.splitext( init_per_cam_reproj_err)[0] + '_initial_per_cam_reproj_error.txt' shutil.copy2(init_residual_fn_def, init_residual_fn) shutil.copy2(init_per_cam_reproj_err, init_per_cam_reproj_err_disk) if session == 'nadirpinhole': identifier = os.path.basename(cam_list[0]).split('_', 14)[0][:2] print(ba_prefix + '-{}*.tsai'.format(identifier)) cam_list = sorted( glob.glob( os.path.join(ba_prefix + '-{}*.tsai'.format(identifier)))) ba_args = img_list + cam_list fixed_cam_idx2 = np.delete(np.arange(len(img_list), dtype=int), fix_cam_idx) round2_opts = get_ba_opts(ba_prefix, overlap_list=overlap_list, session=session, fixed_cam_idx=fixed_cam_idx2, camera_weight=cam_wt) else: # round 1 is adjust file # Only camera model parameters for the first three stereo pairs float in this round input_adjustments = ba_prefix round2_opts = get_ba_opts( ba_prefix, overlap_limit, input_adjustments=ba_prefix, flavor='2round_gcp_2', session=session, elevation_limit=[min_elev, max_elev], lon_lat_limit=[lon_min, lat_min, lon_max, lat_max]) ba_args = img_list + gcp_list print( "running round 2 bundle adjustment for given triplet stereo combination" ) run_cmd('bundle_adjust', round2_opts + ba_args) # Save state for final condition reprojection errors for the sparse triangulated points final_residual_fn_def = sorted( glob.glob(ba_prefix + '*final*no_loss_*pointmap*.csv'))[0] final_residual_fn = os.path.splitext( final_residual_fn_def)[0] + '_final_reproj_error.csv' shutil.copy2(final_residual_fn_def, final_residual_fn) final_per_cam_reproj_err = sorted( glob.glob(ba_prefix + '-*final_residuals_no_loss_function_raw_pixels.txt'))[0] final_per_cam_reproj_err_disk = os.path.splitext( final_per_cam_reproj_err)[0] + '_final_per_cam_reproj_error.txt' shutil.copy2(final_per_cam_reproj_err, final_per_cam_reproj_err_disk) # input is just a transform from pc_align or something similar with no optimization if mode == 'transform_pc_align': if session == 'nadirpinhole': if args.gcp: ba_args = img_list + cam_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, flavor='2round_gcp_2', session=session, gcp_transform=True) else: ba_args = img_list + cam_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, flavor='2round_gcp_2', session=session, gcp_transform=True) else: if args.gcp: ba_args = img_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, initial_transform=initial_transform, flavor='2round_gcp_2', session=session, gcp_transform=True) else: ba_args = img_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, initial_transform=initial_transform, flavor='2round_gcp_2', session=session, gcp_transform=True) print("Simply transforming the cameras without optimization") run_cmd('bundle_adjust', ba_opt + ba_args, 'Running bundle adjust') # general usecase bundle adjust if mode == 'general_ba': round1_opts = get_ba_opts(ba_prefix, overlap_limit=args.overlap_limit, flavor='2round_gcp_1', session=session) print("Running general purpose bundle adjustment") if session == 'nadirpinhole': ba_args = img_list + cam_list else: ba_args = img_list # Check if this command executed till last run_cmd('bundle_adjust', round1_opts + ba_args, 'Running bundle adjust') print("Script is complete !")
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn # Write out because they will be used to mask CHM writeall = True # Auto compute min TOA with gaussian mixture model compute_min_toa = True #Basename for output files out_fn_base = os.path.splitext(dem_fn)[0] #Need some checks on these param = args.filt_param if param is not None and len(param) == 1: param = param[0] # Get original DEM dem = iolib.fn_getma(dem_fn) print("\nLoading input DEM into masked array") dem_ds = iolib.fn_getds(dem_fn) toa_mask = None toa_tri_mask = None # probably not used by itself; done as part of toa_mask rough_mask = None slope_mask = None mask_list = [toa_tri_mask, toa_mask, rough_mask, slope_mask] if args.filtdz: print( "\nFilter with dz from ref DEM to remove cloud returns and blunders (shadows)..." ) print("Reference DEM: %s" % os.path.split(param[0])[1]) print("Absolute dz (+/-): %s \n" % param[2]) #May need to cast input ma as float32 so np.nan filling works dem = dem.astype(np.float32) #Difference filter, need to specify ref_fn and range #Could let the user compute their own dz, then just run a standard range or absrange filter ref_fn = param[0] ref_ds = warplib.memwarp_multi_fn([ ref_fn, ], res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0] ref = iolib.ds_getma(ref_ds) param = map(float, param[1:]) # A dem that has been masked based on the dz filter dem = filtlib.dz_fltr_ma(dem, ref, rangelim=param) if writeall: out_fn = os.path.splitext(dem_fn)[0] + '_dzfilt.tif' print("Writing out %s\n" % out_fn) iolib.writeGTiff(dem, out_fn, src_ds=dem_ds, ndv=args.ndv) #Initialize a control mask that we'll update #True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked controlmask = ~(np.ma.getmaskarray(dem)) # DEM masking: Each block returns a masked output (not a mask) # TOA: mask dark and/or smooth areas (shadows and/or water) # Roughness # Slope if args.toamask or args.toatrimask: print("\nCompute TOA from ortho...\n") toa_fn = get_toa_fn(dem_fn) print("\nWarp TOA to DEM...\n") toa_ds = warplib.memwarp_multi_fn([ toa_fn, ], res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0] if args.toamask: if compute_min_toa: # Compute a good min TOA value m, s = get_min_gaus(toa_fn, 50, 4) min_toa = m + s min_toa = m else: min_toa = args.min_toa with open(os.path.join(os.path.split(toa_fn)[0], "min_toa.txt"), "w") as text_file: text_file.write(os.path.basename(__file__)) text_file.write( "\nMinimum TOA used for mask:\n{0}".format(min_toa)) # Should mask dark areas and dilate toa_mask = get_toa_mask(toa_ds, min_toa) #Dilate the mask if args.dilate_toa is not None: niter = args.dilate_toa print("Dilating TOA mask with %i iterations" % niter) from scipy import ndimage toa_mask = ~(ndimage.morphology.binary_dilation( ~toa_mask, iterations=niter)) controlmask = np.logical_and(toa_mask, controlmask) # Mask islands here controlmask = malib.mask_islands(controlmask, 5) if writeall: out_fn = out_fn_base + '_toamask.tif' print("Writing out %s\n" % out_fn) iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds) if args.toatrimask: # Should mask smooth areas (measures local variance) toa_tri_mask = get_tri_mask(toa_ds, args.min_toatri) controlmask = np.logical_and(toa_tri_mask, controlmask) if writeall: out_fn = out_fn_base + '_toatrimask.tif' print("Writing out %s\n" % out_fn) iolib.writeGTiff(toa_tri_mask, out_fn, src_ds=dem_ds) if args.slopemask: slope_mask = get_slope_mask(dem_ds, args.max_slope) controlmask = np.logical_and(slope_mask, controlmask) if writeall: out_fn = out_fn_base + '_slopemask.tif' print("Writing out %s\n" % out_fn) iolib.writeGTiff(slope_mask, out_fn, src_ds=dem_ds) # CHM mask will be a subset of the Control mask; slope_mask, toa_mask, toa_tri_mask chmmask = controlmask print("Generating final CHM mask to apply later") out_fn = out_fn_base + '_chmmask.tif' print("Writing out %s\n" % out_fn) iolib.writeGTiff(chmmask, out_fn, src_ds=dem_ds) if args.roughmask: rough_mask = get_rough_mask(dem_ds, args.max_rough) controlmask = np.logical_and(rough_mask, controlmask) if writeall: out_fn = out_fn_base + '_roughmask.tif' print("Writing out %s\n" % out_fn) iolib.writeGTiff(rough_mask, out_fn, src_ds=dem_ds) print( "Generating final mask to use for reference surfaces, and applying to input DEM" ) #Now invert to use to create final masked array controlmask = ~controlmask #Dilate the mask if args.dilate_con is not None: niter = args.dilate_con print("Dilating control mask with %i iterations" % niter) from scipy import ndimage controlmask = ~(ndimage.morphology.binary_dilation(~controlmask, iterations=niter)) #Apply mask to original DEM - use these surfaces for co-registration newdem = np.ma.array(dem, mask=controlmask) if True: print("\nStats of valid DEM with maskes applied:") valid_stats = malib.print_stats(newdem) valid_stats_med = valid_stats[5] print("\nWriting DEM control surfaces:") dst_fn = os.path.splitext(dem_fn)[0] + '_control.tif' print(dst_fn) iolib.writeGTiff(newdem, dst_fn, dem_ds) return dst_fn
def main(): parser = getparser() args = parser.parse_args() hs_overlay = args.hs_overlay kmz = args.kmz opacity = args.alpha cmap = args.cmap fn = args.fn print(fn) ds = gdal.Open(fn) b = ds.GetRasterBand(1) ndv = iolib.get_ndv_b(b) print("Loading input raster") a = iolib.b_getma(b) clim = args.clim if clim is None: clim = malib.calcperc(a, (2, 98)) print("Generating color ramp") cramp_fn = os.path.splitext(fn)[0]+'_ramp.txt' ncolors = 21 csteps = np.linspace(0, 1, ncolors) cm = plt.get_cmap(cmap) #Compute raster values between specified min/max vals = np.linspace(clim[0], clim[1], ncolors) #Compute rgba for these values on the given color ramp cvals = cm(csteps, bytes=True) #Combine into single array cramp = np.vstack((vals, cvals.T)).T #Set alpha to desired transparency cramp[:,-1] = opacity * 255 header = '#val r g b a' footer = 'nv %s %s %s 0' % (ndv, ndv, ndv) np.savetxt(cramp_fn, cramp, fmt='%f %i %i %i %i', header=header, footer=footer, comments='') print("Generating gdaldem color-relief tif") color_fn = os.path.splitext(fn)[0]+'_color.tif' if not os.path.exists(color_fn): #cmd = 'gdaldem color-relief -nearest_color_entry -alpha %s %s %s' % (fn, cramp_fn, color_fn) cmd = ['gdaldem', 'color-relief', '-alpha'] cmd.extend(iolib.gdal_opt_co) cmd.extend([fn, cramp_fn, color_fn]) print(' '.join(cmd)) subprocess.call(cmd, shell=False) if kmz: make_kmz(color_fn) if hs_overlay: print("Generating shaded relief") hs_fn = os.path.splitext(fn)[0]+'_hs_az315.tif' #Check to see if file exists, or if provided as input if not os.path.exists(hs_fn): cmd = ['gdaldem', 'hillshade'] #cmd.extend('-compute_edges') cmd.extend(iolib.gdal_opt_co) cmd.extend([fn, hs_fn]) print(' '.join(cmd)) subprocess.call(cmd, shell=False) print("Loading shaded relief and calculating percentile stretch") hs = iolib.fn_getma(hs_fn) hs_clim = malib.calcperc(hs, (1, 99)) #Since imagemagick was compiled with quantum depth 16, need to scale levels hs_clim = (hs_clim[0]*65535/255., hs_clim[1]*65535/255.) print("Generating color composite shaded relief") overlay_fn = os.path.splitext(color_fn)[0]+'_hs.tif' if not os.path.exists(overlay_fn): #Can also try hsvmerge.py #cmd = 'composite %s %s -dissolve "%i" %s' % (color_fn, hs_fn, opacity*100, overlay_fn) #This uses imagemagick composite function #For some reason, this level adjustment is not working #cmd = ['convert', hs_fn, color_fn, '-compose', 'dissolve', \ cmd = ['convert', hs_fn, '-level', '%i,%i' % hs_clim, color_fn, '-compose', 'dissolve', \ '-define', 'compose:args=%i' % int(opacity*100), '-composite', '-compress', 'LZW', overlay_fn] #cmd = ['composite', color_fn, hs_fn, '-dissolve', str(int(opacity*100)), '-compress', 'LZW', overlay_fn] print(' '.join(cmd)) subprocess.call(cmd, shell=False) print("Updating georeferencing metadata") out_ndv = 0 overlay_ds = gdal.Open(overlay_fn, gdal.GA_Update) overlay_ds.SetProjection(ds.GetProjection()) overlay_ds.SetGeoTransform(ds.GetGeoTransform()) for n in range(overlay_ds.RasterCount): overlay_ds.GetRasterBand(n+1).SetNoDataValue(out_ndv) overlay_ds = None #Rewrite with blocks and LZW-compression print("Creating tiled and compressed version") tmp_fn = '/tmp/temp_%s.tif' % os.getpid() cmd = ['gdal_translate',] cmd.extend(iolib.gdal_opt_co) cmd.extend((overlay_fn, tmp_fn)) print(' '.join(cmd)) subprocess.call(cmd, shell=False) shutil.move(tmp_fn, overlay_fn) if not os.path.exists(overlay_fn+'.ovr'): print("Generating overviews") cmd = ['gdaladdo', '-ro', '-r', 'average', '--config', \ 'COMPRESS_OVERVIEW', 'LZW', '--config', 'BIGTIFF_OVERVIEW', 'YES', \ overlay_fn, '2', '4', '8', '16', '32', '64'] print(' '.join(cmd)) subprocess.call(cmd, shell=False) if kmz: make_kmz(overlay_fn)
from osgeo import gdal import numpy as np import matplotlib.pyplot as plt from pygeotools.lib import iolib, malib from imview.lib import pltlib udm_b_txt = ['Blackfill', 'Cloud', 'Blue missing', 'Green missing', 'Red missing', \ 'RedEdge missing', 'NIR missing', 'Unused'] img_b_txt = ['B', 'G', 'R', 'NIR'] fn = sys.argv[1] if 'udm' in fn: #fn = '20170411_181913_0e0f_1B_AnalyticMS_DN_udm.tif' udm = iolib.fn_getma(fn) #udm_b = np.unpackbits(udm.ravel(), axis=0).reshape(udm.shape+(8,)) udm_b = np.unpackbits(udm.ravel()[:, np.newaxis], axis=1).reshape(udm.shape + (8, )) f, axa = plt.subplots(8, sharex=True, sharey=True, figsize=(4, 8)) for i in range(udm_b.shape[2]): axa[i].imshow(udm_b[:, :, i], clim=(0, 1), cmap='gray') axa[i].set_title(udm_b_txt[i]) #pltlib.hide_ticks(axa[i]) axa[i].axis('off') else: #fn = '20170411_181913_0e0f_3B_AnalyticMS.tif' img_ds = gdal.Open(fn) img = np.ma.array( [iolib.ds_getma(img_ds, i + 1) for i in range(img_ds.RasterCount)]) f, axa = plt.subplots(4, sharex=True, sharey=True, figsize=(4, 4))
def main(): parser = getparser() args = parser.parse_args() dem_fn = args.dem_fn # Write out because they will be used to mask CHM writeall = True # Auto compute min TOA with gaussian mixture model auto_min_toa = args.auto_min_toa dirname, demname = os.path.split(dem_fn) # The subdir in which the DEM.tif sits will be the pairname pairname = os.path.split(dirname)[1] print("Pairname:", pairname) if args.out_dir is not None: # Create symlink in out_dir to: (1) original out-DEM_4m (2) *_ortho_4m.tif (3) All *.xml files # This should look like <out_dir>/<pairname>_out-DEM_4m dem_fn_lnk = os.path.join(args.out_dir, pairname + '_' + demname) force_symlink(dem_fn, dem_fn_lnk) force_symlink(os.path.join(dirname, pairname + '_ortho_4m.tif'), os.path.join(args.out_dir, pairname + '_ortho_4m.tif') ) xml_list = [f for f in os.listdir(dirname) if f.endswith('r100.xml')] print("\nSymlinks made for:") for x in xml_list: print(x) shutil.copy2(os.path.join(dirname,x), args.out_dir) out_fn_base = os.path.splitext(dem_fn_lnk)[0] dem_fn = dem_fn_lnk else: out_fn_base = os.path.splitext(dem_fn)[0] print("\nBasename for output files:") print(out_fn_base) #Max Threshold value for LiDAR datset; Valid pixels under this value lidar_fn=args.lidar_fn max_thresh=args.max_thresh #Need some checks on these param = args.filt_param if param is not None and len(param) == 1: param = param[0] # Get original DEM dem = iolib.fn_getma(dem_fn) print("\nLoading input DEM into masked array") dem_ds = iolib.fn_getds(dem_fn) toa_mask = None toa_tri_mask = None # probably not used by itself; done as part of toa_mask rough_mask = None slope_mask = None mask_list = [toa_tri_mask, toa_mask, rough_mask, slope_mask] if args.filtdz: print("\nFilter with dz from ref DEM to remove cloud returns and blunders (shadows)...") print("Reference DEM: %s" % os.path.split(param[0])[1] ) print("Absolute dz (+/-): %s \n" % param[2] ) #May need to cast input ma as float32 so np.nan filling works dem = dem.astype(np.float32) #Difference filter, need to specify ref_fn and range #Could let the user compute their own dz, then just run a standard range or absrange filter ref_fn = param[0] ref_ds = warplib.memwarp_multi_fn([ref_fn,], res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0] ref = iolib.ds_getma(ref_ds) param = map(float, param[1:]) # A dem that has been masked based on the dz filter dem = filtlib.dz_fltr_ma(dem, ref, rangelim=param) if writeall: #out_fn = os.path.splitext(dem_fn)[0]+'_dzfilt.tif' out_fn = os.path.join(out_fn_base +'_dzfilt.tif') print("Writing out %s\n" % out_fn) iolib.writeGTiff(dem, out_fn, src_ds=dem_ds, ndv=args.ndv) #Initialize a control mask that we'll update #True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked controlmask = ~(np.ma.getmaskarray(dem)) # DEM masking: Each block returns a masked output (not a mask) # TOA: mask dark and/or smooth areas (shadows and/or water) # Roughness # Slope if args.toamask or args.toatrimask: #try: print("\nCompute TOA from ortho...\n") toa_fn = get_toa_fn(out_fn_base + '.tif') ##--->dem_fn print(toa_fn) print("\nWarp TOA to DEM...\n") toa_ds = warplib.memwarp_multi_fn([toa_fn,], res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0] if args.toamask: if auto_min_toa: # Compute a good min TOA value m,s = get_min_gaus(toa_fn, 50, 4) min_toa = m + s min_toa = m else: min_toa = args.min_toa with open(os.path.join(os.path.split(toa_fn)[0], "min_toa_" + pairname + ".txt"), "w") as text_file: text_file.write(os.path.basename(__file__)) text_file.write("\nMinimum TOA used for mask:\n{0}".format(min_toa)) # Should mask dark areas and dilate toa_mask = get_toa_mask(toa_ds, min_toa) #Dilate the mask if args.dilate_toa is not None: niter = args.dilate_toa print("Dilating TOA mask with %i iterations" % niter) from scipy import ndimage toa_mask = ~(ndimage.morphology.binary_dilation(~toa_mask, iterations=niter)) controlmask = np.logical_and(toa_mask, controlmask) # Mask islands here controlmask = malib.mask_islands(controlmask, 5) if writeall: #out_fn = out_fn_base+'_toamask.tif' out_fn = os.path.join(out_fn_base +'_toamask.tif') print("Writing out %s\n" % out_fn) iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds) if args.toatrimask: # Should mask smooth areas (measures local variance) toa_tri_mask = get_tri_mask(toa_ds, args.min_toatri) controlmask = np.logical_and(toa_tri_mask, controlmask) if writeall: #out_fn = out_fn_base+'_toatrimask.tif' out_fn = os.path.join(out_fn_base +'_toatrimask.tif') print("Writing out %s\n" % out_fn) iolib.writeGTiff(toa_tri_mask, out_fn, src_ds=dem_ds) #except Exception, e: #print "\tFailed to apply TOA masking.\n" if args.slopemask: slope_mask = get_slope_mask(dem_ds, args.max_slope) controlmask = np.logical_and(slope_mask, controlmask) #if args.slopemaskcoarse: #dem_fn2 = args.dem_coarscomp_fn #print("\nLoading input coarse DEM into masked array") #dem2_ds = iolib.fn_getds(dem_fn2) #slope_mask = get_slope_mask(dem2_ds, args.max_slope) #controlmask = np.logical_and(slope_mask, controlmask) if writeall: #out_fn = out_fn_base+'_slopemask.tif' out_fn = os.path.join(out_fn_base +'_slopemask.tif') print("Writing out %s\n" % out_fn) iolib.writeGTiff(slope_mask, out_fn, src_ds=dem_ds) if args.lidar_fn: try: print("Masking DEM file based on Lidar Dataset\n") print("\nWarp Lidar Raster to DEM...\n") lidar_ds=warplib.memwarp_multi_fn([lidar_fn,],r='near', res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0] lidarmask = get_lidar_mask(dem_ds, lidar_ds, max_thresh) controlmask = np.logical_and(lidarmask, controlmask) if writeall: out_fn=out_fn_base+'_lidarmask.tif' print("Writing out %s\n" % out_fn) iolib.writeGTiff(lidarmask, out_fn, src_ds=dem_ds) except Exception as e: print("\tFailed to Apply Lidar Mask") # CHM mask will be a subset of the Control mask; slope_mask, toa_mask, toa_tri_mask chmmask = controlmask print("Generating final CHM mask to apply later") #out_fn = out_fn_base+'_chmmask.tif' out_fn = os.path.join(out_fn_base +'_chmmask.tif') print("Writing out %s\n" % out_fn) iolib.writeGTiff(chmmask, out_fn, src_ds=dem_ds) if args.roughmask: rough_mask = get_rough_mask(dem_ds, args.max_rough) controlmask = np.logical_and(rough_mask, controlmask) if writeall: out_fn = os.path.join(out_fn_base +'_roughmask.tif') print("Writing out %s\n" % out_fn) iolib.writeGTiff(rough_mask, out_fn, src_ds=dem_ds) print("Generating final mask to use for reference surfaces, and applying to input DEM") #Now invert to use to create final masked array # This steps results in the areas to be removed being set to a valid value controlmask = ~controlmask #Dilate the mask if args.dilate_con is not None: niter = args.dilate_con print("Dilating control mask with %i iterations" % niter) from scipy import ndimage # # So, this should work too.... controlmask = ndimage.morphology.binary_dilation(controlmask, iterations=niter)) controlmask = ~(ndimage.morphology.binary_dilation(~controlmask, iterations=niter)) # This steps results in the areas to be removed being set to a valid value, again print("\nApply mask to original DEM - use these control surfaces for co-registration...") newdem = np.ma.array(dem, mask=controlmask) # This sets the valid values of the controlmask to the 'mask' of the DEM, which turns them into NaN values if True: print("\nStats of valid DEM with masks applied:") valid_stats = malib.print_stats(newdem) valid_stats_med = valid_stats[5] print("\nWriting DEM control surfaces:") #if args.out_dir is not None: # dst_fn = os.path.join(args.out_dir, os.path.split(dirname)[1] + os.path.splitext(demname)[0]+'_control.tif') #else: # dst_fn = os.path.splitext(dem_fn)[0]+'_control.tif' dst_fn = os.path.join(out_fn_base +'_control.tif') print(dst_fn) iolib.writeGTiff(newdem, dst_fn, dem_ds) return dst_fn
perc = (np.min([perc1[0], perc2[0]]), np.max([perc1[1], perc2[1]])) abs_max = np.max(np.abs(perc)) perc = (-abs_max, abs_max) return perc dir_list = [os.path.abspath(x) for x in sys.argv[1:]] for dir in dir_list: disparity_file = glob.glob(os.path.join( dir, '*-F.tif'))[0] #this is a multichannel file left_image_warped = glob.glob(os.path.join(dir, '*-L.tif'))[0] #1 channel right_image_warped = glob.glob(os.path.join(dir, '*-R.tif'))[0] #1 channel disp_ds = iolib.fn_getds(disparity_file) error_fn = glob.glob(os.path.join(dir, '*In*.tif'))[0] dem_fn = glob.glob(os.path.join(dir, '*-DEM.tif'))[0] dx = iolib.fn_getma(disparity_file, bnum=1) dy = iolib.fn_getma(disparity_file, bnum=2) img1 = iolib.fn_getma(left_image_warped) img2 = iolib.fn_getma(right_image_warped) error = iolib.fn_getma(error_fn) dem = iolib.fn_getma(dem_fn) dem_ds = iolib.fn_getds(dem_fn) base_dir = os.path.basename(dir) #title_str = dt_string+sat_string+img_string fig, ax = plt.subplots(3, 2, figsize=(9, 6)) #print(disparity_file) #add code to create a fig #do not plot it, just save it as a png. #at some point, try to add a figure showing changes of disparity with elevation, might be good to add DEM as an input. #but that can be done only for map images
def main(): parser = getparser() args = parser.parse_args() ras_fn = args.ras_fn out_name = args.out_name pre_min = args.pre_min pre_max = args.pre_max n_gaus = args.n_gaus sample_step = args.sample_step ##ht_thresh = args.ht_thresh stddev_shift = args.shift driverTiff = gdal.GetDriverByName('GTiff') print '\n\tCHM Correction' print '\tRaster name: %s' % ras_fn # [4] Read in raster as a masked array array = iolib.fn_getma(ras_fn, bnum=1) array = array.astype(np.float32) # TODO: fix Slope filter # Get hi-sun elev warp-trans-ref-DEM #tail_str = "-DEM_warp-trans_reference-DEM" #chm_dir, chm_pairname = os.path.split(ras_fn) # eg chm_pairname WV02_20130804_1030010024808A00_1030010025118000-DEM_warp-trans_reference-DEM_WV01_20150726_1020010043A37200_1020010040698700-DEM_warp-trans_reference-DEM_dz_eul.tif #main_dir = os.path.split(chm_dir)[0] #diff_pairs = chm_pairname.replace(tail_str,"").replace("_dz_eul.tif","") #hi_sun_dem_fn = os.path.join(main_dir,diff_pairs,chm_pairname.split(tail_str)[0] + "-DEM_warp_align",chm_pairname.split(tail_str)[0] + tail_str + ".tif") #array = slope_fltr_chm(array, hi_sun_dem_fn) # TODO: incidence angle correction of heights #Absolute range filter # returns a masked array... array = filtlib.range_fltr(array, (pre_min, pre_max)) # Get gaussian peaks out_gaus_csv = get_hist_n(array, ras_fn, n_gaus, sample_step) with open(out_gaus_csv, 'r') as peaksCSV: """Create a canopy height model Shift the values of the dz raster based on the ground peak identified in the histogram Read in CSV of gaussian peaks computed from the dz raster Apply a shift based on the minimum peak and the stddev_shift Returns a tif of canopy heights. """ hdr = peaksCSV.readline() line = peaksCSV.readline() # Get raster diff dsm name #ras_fn = line.split(',')[0] # Get the min of the means: represents the the offset value that will be subtracted from each pixel of the corresonding diff_dsm gmeans = map(float, line.split(',')[1::2]) # Find the min of the gaussian peak means gmin = min(gmeans) # Get corresponding sd idx = line.split(',').index(str(gmin)) + 1 gsd = float(line.split(',')[idx]) ##array = np.where(array <= -99, np.nan, array) gsd_str = "%04d" % (round(gsd, 2) * 100) print '\n\tApply CHM gaussian correction:' print '\tHeight of the gound peak (m) (gaussian min): %s' % gmin print '\tEstimated height uncertainty (m) (gaussian std dev): %s' % gsd print '\tNumber of std devs used in calculating shift: %s' % stddev_shift shift_val = float(np.subtract(gmin, (stddev_shift * gsd))) print "\t: Final CHM correction value (shift) (m) %s" % shift_val array = np.subtract(array, shift_val) print '\n\tApply masking' print '\t\tConvert values below 0 like this:' print '\t\t np.ma.where(array < (0 - 6 * gsd) , 0, abs(array))' # Better handling of negative values? # 1. take abs value of all negative values? # 2. take abs value of all negative values within 1 stddev of ground peak; all the rest convert to 0 array = np.ma.where(array < (0 - 6 * gsd), 0, abs(array)) #fn_tail = '_chm_'+gsd_str+'.tif' fn_tail = '_chm.tif' if out_name is not None: chm_fn = os.path.join(os.path.split(ras_fn)[0], out_name + fn_tail) else: chm_fn = os.path.join(ras_fn.split('.tif')[0] + fn_tail) # Write array to dataset print "\n\t----------------------" print "\n\tMaking CHM GeoTiff: ", chm_fn iolib.writeGTiff(array, chm_fn, iolib.fn_getds(ras_fn), ndv=-99) cmdStr = "gdaladdo -ro -r nearest " + chm_fn + " 2 4 8 16 32 64" run_wait_os(cmdStr) # Append to a dir level CSV file that holds the uncertainty info for each CHM (gmin, gsd, stddev_shift) out_dir = os.path.split(ras_fn)[0] out_stats_csv = out_dir + '_stats.csv' print "\tAppending stats to %s" % (out_stats_csv) if not os.path.exists(out_stats_csv): writetype = 'wb' # write file if not yet existing else: writetype = 'ab' # append line if exists with open(out_stats_csv, writetype) as out_stats: wr = csv.writer(out_stats, delimiter=",") if writetype == 'wb': wr.writerow([ "chm_name", "ground_peak_mean_m", "ground_peak_stdev_m", "num_stdevs_shift", "final_chm_peak_shift_m" ]) # if new file, write header wr.writerow([ os.path.split(chm_fn)[1], str(round(gmin, 2)), str(round(gsd, 2)), str(round(stddev_shift, 2)), str(round(shift_val, 2)) ]) print "\tFinished chm_correct.py"
dt_list = [timelib.fn_getdatetime_list(dt_str)[0] for dt_str in dt_list_str] else: #SRTM, then systematic ASTER timestamps dt_list = [datetime(2000,2,11), datetime(2000,5,31), datetime(2009,5,31), datetime(2018,5,31)] #Use tif on disk if available out_fn=os.path.splitext(stack_fn)[0] #Otherwise load stack and compute trend/intercept if necessary trend_fn=out_fn+'_trend.tif' trend_ds = iolib.fn_getds(trend_fn) trend = iolib.ds_getma(trend_ds)/365.25 intercept_fn=out_fn+'_intercept.tif' #Hmmm, no 365.25 factor here. Clean up here and in stack generation intercept = iolib.fn_getma(intercept_fn) #Determine local elevation min/max - use to throw out bad trends med_fn = out_fn+'_med.tif' if not os.path.exists(med_fn): med_fn = out_fn+'_mean.tif' med = iolib.fn_getma(med_fn) zlim = malib.calcperc(med, (0.01, 99.99)) zlim = (zlim[0] - zlim_pad, zlim[1] + zlim_pad) med = None #Can vectorize #dt_list_o = timelib.dt2o(dt_list) #z_list = trend*dt_list_o[:,None,None]+intercept filter=True
if shp_fn is not None: pltlib.shp_overlay(axa[1], ds, shp_fn, color='darkgreen') plt.tight_layout() for ax in axa: pltlib.hide_ticks(ax) ax.set_facecolor('k') if title is not None: ax.set_title(title) return f #dem_fn_list = glob.glob('*8m_trans_warp.tif') #dem_ref_fn = 'rainier_allgood_mos-tile-0_warp.tif' shp_fn = '/Volumes/SHEAN_1TB_SSD/usgs_dems/rainier/final_clip/rainier_24k_1970-2015_mb_lines.shp' dem_fn_list = glob.glob('*0_warp_ref.tif') dem_ref_fn = 'scg_all_2008-2016_mos-tile-0.tif' dem_ref = iolib.fn_getma(dem_ref_fn) shp_fn = None outdir = 'movie_2panel' if not os.path.exists(outdir): os.makedirs(outdir) for dem_fn in [dem_ref_fn]+dem_fn_list: print(dem_fn) dem_ds = iolib.fn_getds(dem_fn) dem = iolib.ds_getma(dem_ds) dem_hs_fn = os.path.splitext(dem_fn)[0]+'_hs_az315.tif' dem_hs = iolib.fn_getma(dem_hs_fn) anomaly = dem - dem_ref dt = timelib.fn_getdatetime(dem_fn) if dt is not None:
def main(): parser = getparser() args = parser.parse_args() dir = os.path.abspath(args.DEM_folder) if args.out_folder: out_folder = os.path.abspath(args.out_folder) else: out_folder = os.path.join(dir, 'composite_dems') if not os.path.exists(out_folder): os.makedirs(out_folder) if args.identifier: # for indi align DEMs identifier = args.identifier else: identifier = '' if args.mode == 'triplet': dir_list = sorted(glob.glob(os.path.join(dir, '20*/'))) valid_for_nadir_dir = [] valid_for_aft_dir = [] valid_nadir_aft_dir = [] for for_nadir_dir in sorted(glob.glob(os.path.join(dir_list[0], '*/'))): try: D_sub = iolib.fn_getma( os.path.join(for_nadir_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(for_nadir_dir, 'run*{}*-DEM.tif'.format(identifier)))[0] valid_for_nadir_dir.append(for_nadir_dir) except: continue for for_aft_dir in sorted(glob.glob(os.path.join(dir_list[1], '*/'))): try: # see ASP issue for this dirty hack: https://github.com/NeoGeographyToolkit/StereoPipeline/issues/308 D_sub = iolib.fn_getma( os.path.join(for_aft_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(for_aft_dir, 'run*{}*-DEM.tif'.format(identifier)))[0] valid_for_aft_dir.append(for_aft_dir) except: continue for nadir_aft_dir in sorted(glob.glob(os.path.join(dir_list[2], '*/'))): try: D_sub = iolib.fn_getma( os.path.join(nadir_aft_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(nadir_aft_dir, 'run*{}*-DEM.tif'.format(identifier)))[0] valid_nadir_aft_dir.append(nadir_aft_dir) except: continue for_nadir_list = [ glob.glob(os.path.join(dir, 'run*{}*-DEM.tif'.format(identifier)))[0] for dir in valid_for_nadir_dir ] nadir_aft_list = [ glob.glob(os.path.join(dir, 'run*{}*-DEM.tif'.format(identifier)))[0] for dir in valid_nadir_aft_dir ] for_aft_list = [ glob.glob(os.path.join(dir, 'run*{}*-DEM.tif'.format(identifier)))[0] for dir in valid_for_aft_dir ] total_dem_list = for_nadir_list + for_aft_list + nadir_aft_list stats_list = ['nmad', 'count', 'median'] print('total dems are {}'.format(len(total_dem_list))) out_fn_list = [ os.path.join(out_folder, 'triplet_{}_mos.tif'.format(stat)) for stat in stats_list ] print( "Mosaicing output total per-pixel nmad, count, nmad and 3 DEMs from 3 stereo combinations in parallel" ) dem_mos_log = p_map(asp.dem_mosaic, [total_dem_list] * 3 + [for_aft_list, nadir_aft_list, for_nadir_list], out_fn_list + [ os.path.join(out_folder, x) for x in [ 'for_aft_dem_median_mos.tif', 'nadir_aft_dem_median_mos.tif', 'for_nadir_dem_median_mos.tif' ] ], ['None'] * 6, [None] * 6, stats_list + ['median'] * 3, [None] * 6, num_cpus=4) out_log_fn = os.path.join(out_folder, 'skysat_triplet_dem_mos.log') print("Saving triplet DEM mosaic log at {}".format(out_log_fn)) with open(out_log_fn, 'w') as f: for log in dem_mos_log: f.write(log) elif args.mode == 'video': dir_list = sorted(glob.glob(os.path.join(dir, '1*/'))) valid_video_dir = [] for video_dir in dir_list: try: D_sub = iolib.fn_getma( os.path.join(video_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(video_dir, 'run*{}*-DEM.tif'.format(identifier)))[0] valid_video_dir.append(video_dir) except: continue video_dem_list = [ glob.glob(os.path.join(dir, f'run*{identifier}*-DEM.tif'))[0] for dir in valid_video_dir ] stats_list = ['median', 'count', 'nmad'] print('total dems are {}'.format(len(video_dem_list))) out_fn_list = [ os.path.join(out_folder, 'video_{}_mos.tif'.format(stat)) for stat in stats_list ] dem_mos_log = p_map(asp.dem_mosaic, [video_dem_list] * 3, out_fn_list, ['None'] * 3, [None] * 3, stats_list, [None] * 3) out_log_fn = os.path.join(out_folder, 'skysat_video_dem_mos.log') with open(out_log_fn, 'w') as f: for log in dem_mos_log: f.write(log) if args.filter_dem == 1: print("Filtering DEM using NMAD and count metrics") min_count = args.min_video_count max_nmad = args.max_video_nmad print( f"Filter will use min count of {min_count} and max NMAD of {max_nmad}" ) mos_ds_list = warplib.memwarp_multi_fn(out_fn_list) # Filtered array list contains dem_filtered,nmad_filtered, count_filtered in order filtered_array_list = skysat.filter_video_dem_by_nmad( mos_ds_list, min_count, max_nmad) trailing_str = f'_filt_max_nmad{max_nmad}_min_count{min_count}.tif' out_filter_fn_list = [ os.path.splitext(fn)[0] + trailing_str for fn in out_fn_list ] for idx, fn in enumerate(out_filter_fn_list): iolib.writeGTiff(filtered_array_list[idx], fn, mos_ds_list[idx]) print("Script complete")
def main(): parser = getparser() args = parser.parse_args() img = args.img img_list = sorted(glob.glob(os.path.join(img, '*.tif'))) if len(img_list) < 2: img_list = sorted(glob.glob(os.path.join(img, '*.tiff'))) #img_list = [os.path.basename(x) for x in img_list] if os.path.islink(img_list[0]): img_list = [os.readlink(x) for x in img_list] if args.cam: cam = os.path.abspath(args.cam) if 'run' in os.path.basename(cam): cam_list = sorted(glob.glob(cam + '-*.tsai')) else: cam_list = sorted(glob.glob(os.path.join(cam, '*.tsai'))) cam_list = cam_list[:len(img_list)] session = args.t if args.ba_prefix: ba_prefix = args.ba_prefix if args.initial_transform: initial_transform = os.path.abspath(initial_transform) if args.input_adjustments: input_adjustments = os.path.abspath(input_adjustments) if args.overlap_list: overlap_list = os.path.abspath(args.overlap_list) if args.gcp: gcp_list = sorted(glob.glob(os.path.join(args.gcp, '*.gcp'))) ba_prefix = os.path.abspath(args.ba_prefix) mode = args.mode if args.bound: bound = gpd.read_file(args.bound) geo_crs = {'init': 'epsg:4326'} if bound.crs is not geo_crs: bound = bound.to_crs(geo_crs) lon_min, lat_min, lon_max, lat_max = bound.total_bounds if args.dem: dem = iolib.fn_getma(args.dem) dem_stats = malib.get_stats_dict(dem) min_elev, max_elev = [dem_stats['min'] - 500, dem_stats['max'] + 500] if mode == 'full_video': frame_index = args.frame_index df = pd.read_csv(frame_index) gcp = os.path.abspath(args.gcp) df['dt'] = [ datetime.strptime(date.split('+00:00')[0], '%Y-%m-%dT%H:%M:%S.%f') for date in df.datetime.values ] delta = (df.dt.values[1] - df.dt.values[0]) / np.timedelta64(1, 's') # i hardocde overlap limit to have 40 seconds coverage overlap_limit = np.int(np.ceil(40 / delta)) print(f"Calculated overlap limit as {overlap_limit}") img_list = [ glob.glob(os.path.join(img, f'*{x}*.tiff'))[0] for x in df.name.values ] cam_list = [ glob.glob(os.path.join(cam, f'*{x}*.tsai'))[0] for x in df.name.values ] gcp_list = [ glob.glob(os.path.join(gcp, f'*{x}*.gcp'))[0] for x in df.name.values ] #also append the clean gcp here print(os.path.join(gcp, '*clean*_gcp.gcp')) gcp_list.append(glob.glob(os.path.join(gcp, '*clean*_gcp.gcp'))[0]) round1_opts = get_ba_opts(ba_prefix, overlap_limit=overlap_limit, flavor='2round_gcp_1', session=session, num_iterations=args.num_iter) print("Running round 1 bundle adjustment for input video sequence") if session == 'nadirpinhole': ba_args = img_list + cam_list else: ba_args = img_list # Check if this command executed till last print('Running bundle adjustment round1') #run_cmd('bundle_adjust', round1_opts+ba_args) if session == 'nadirpinhole': identifier = os.path.basename(cam_list[0]).split( df.name.values[0])[0] print(ba_prefix + identifier + f'-{df.name.values[0]}*.tsai') cam_list = [ glob.glob(ba_prefix + identifier + f'-{img}*.tsai')[0] for img in df.name.values ] print(len(cam_list)) ba_args = img_list + cam_list + gcp_list round2_opts = get_ba_opts(ba_prefix, overlap_limit=overlap_limit, flavor='2round_gcp_2', session=session, gcp_transform=True) else: # round 1 is adjust file input_adjustments = ba_prefix round2_opts = get_ba_opts(ba_prefix, overlap_limit=overlap_limit, input_adjustments=ba_prefix, flavor='2round_gcp_2', session=session) ba_args = img_list + gcp_list print("running round 2 bundle adjustment for input video sequence") run_cmd('bundle_adjust', round2_opts + ba_args) elif mode == 'full_triplet': if args.overlap_list is None: print( "Attempted bundle adjust will be expensive, will try to find matches in each and every pair" ) round1_opts = get_ba_opts(ba_prefix, flavor='2round_gcp_1', session=session, num_iterations=args.num_iter) # enter round2_opts here only ? else: round1_opts = get_ba_opts(ba_prefix, overlap_list=overlap_list, flavor='2round_gcp_1', session=session, num_iterations=args.num_iter) if session == 'nadirpinhole': ba_args = img_list + cam_list else: ba_args = img_list print( "Running round 1 bundle adjustment for given triplet stereo combination" ) run_cmd('bundle_adjust', round1_opts + ba_args) if session == 'nadirpinhole': identifier = os.path.basename(cam_list[0]).split( os.path.splitext(os.path.basename(img_list[0]))[0], 2)[0] print(ba_prefix + f'-{identifier}*.tsai') cam_list = glob.glob( os.path.join(ba_prefix + f'-{identifier}*.tsai')) ba_args = img_list + cam_list + gcp_list round2_opts = get_ba_opts(ba_prefix, overlap_list=overlap_list, flavor='2round_gcp_2', session=session, gcp_transform=True) else: # round 1 is adjust file input_adjustments = ba_prefix round2_opts = get_ba_opts( ba_prefix, overlap_limit, input_adjustments=ba_prefix, flavor='2round_gcp_2', session=session, elevation_limit=[min_elev, max_elev], lon_lat_limit=[lon_min, lat_min, lon_max, lat_max]) ba_args = img_list + gcp_list print( "running round 2 bundle adjustment for given triplet stereo combination" ) run_cmd('bundle_adjust', round2_opts + ba_args) # input is just a transform from pc_align or something similar with no optimization if mode == 'transform_pc_align': if session == 'nadirpinhole': if args.gcp: ba_args = img_list + cam_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, flavor='2round_gcp_2', session=session, gcp_transform=True) else: ba_args = img_list + cam_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, flavor='2round_gcp_2', session=session, gcp_transform=True) else: if args.gcp: ba_args = img_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, initial_transform=initial_transform, flavor='2round_gcp_2', session=session, gcp_transform=True) else: ba_args = img_list + gcp_list ba_opt = get_ba_opts(ba_prefix, overlap_list, initial_transform=initial_transform, flavor='2round_gcp_2', session=session, gcp_transform=True) print("Simply transforming the cameras without optimization") run_cmd('bundle_adjust', ba_opt + ba_args, 'Running bundle adjust') # general usecase bundle adjust if mode == 'general_ba': round1_opts = get_ba_opts(ba_prefix, overlap_limit=args.overlap_limit, flavor='2round_gcp_1', session=session) print("Running general purpose bundle adjustment") if session == 'nadirpinhole': ba_args = img_list + cam_list else: ba_args = img_list # Check if this command executed till last run_cmd('bundle_adjust', round1_opts + ba_args, 'Running bundle adjust') print("Script is complete !")
def main(): parser = getparser() args = parser.parse_args() dir = os.path.abspath(args.DEM_folder) if args.out_folder: out_folder = os.path.abspath(args.out_folder) else: out_folder = os.path.join(dir, 'composite_dems') if not os.path.exists(out_folder): os.makedirs(out_folder) if args.identifier: # for indi align DEMs identifier = args.identifier else: identifier = '' if args.mode == 'triplet': dir_list = sorted(glob.glob(os.path.join(dir, '20*/'))) valid_for_nadir_dir = [] valid_for_aft_dir = [] valid_nadir_aft_dir = [] for for_nadir_dir in sorted(glob.glob(os.path.join(dir_list[0], '*/'))): try: D_sub = iolib.fn_getma( os.path.join(for_nadir_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(for_nadir_dir, f'run*{identifier}*-DEM.tif'))[0] valid_for_nadir_dir.append(for_nadir_dir) except: continue for for_aft_dir in sorted(glob.glob(os.path.join(dir_list[2], '*/'))): try: # see ASP issue for this dirty hack: https://github.com/NeoGeographyToolkit/StereoPipeline/issues/308 D_sub = iolib.fn_getma( os.path.join(for_aft_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(for_aft_dir, f'run*{identifier}*-DEM.tif'))[0] valid_for_aft_dir.append(for_aft_dir) except: continue for nadir_aft_dir in sorted(glob.glob(os.path.join(dir_list[1], '*/'))): try: D_sub = iolib.fn_getma( os.path.join(nadir_aft_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(nadir_aft_dir, f'run*{identifier}*-DEM.tif'))[0] valid_nadir_aft_dir.append(nadir_aft_dir) except: continue for_nadir_list = [ glob.glob(os.path.join(dir, f'run*{identifier}*-DEM.tif'))[0] for dir in valid_for_nadir_dir ] nadir_aft_list = [ glob.glob(os.path.join(dir, f'run*{identifier}*-DEM.tif'))[0] for dir in valid_nadir_aft_dir ] for_aft_list = [ glob.glob(os.path.join(dir, f'run*{identifier}*-DEM.tif'))[0] for dir in valid_for_aft_dir ] total_dem_list = for_nadir_list + for_aft_list + nadir_aft_list stats_list = ['nmad', 'count', 'median'] print(f'total dems are {len(total_dem_list)}') out_fn_list = [ os.path.join(out_folder, f'triplet_{stat}_mos.tif') for stat in stats_list ] print( "Mosaicing output total per-pixel nmad, count, nmad and 3 DEMs from 3 stereo combinations in parallel" ) dem_mos_log = p_map( asp.dem_mosaic, [total_dem_list] * 3 + [for_aft_list, nadir_aft_list, for_nadir_list], out_fn_list + [ os.path.join(out_folder, x) for x in [ 'for_aft_dem_median_mos.tif', 'nadir_aft_dem_median_mos.tif', 'for_nadir_dem_median_mos.tif' ] ], ['None'] * 6, [None] * 6, stats_list + ['median'] * 3) out_log_fn = os.path.join(out_folder, 'skysat_triplet_dem_mos.log') print(f"Saving triplet DEM mosaic log at {out_log_fn}") with open(out_log_fn, 'w') as f: for log in dem_mos_log: f.write(log) elif args.mode == 'video': dir_list = sorted(glob.glob(os.path.join(dir, '1*/'))) valid_video_dir = [] for video_dir in dir_list: try: D_sub = iolib.fn_getma( os.path.join(video_dir, 'run-D_sub.tif'), 3) stats = [ np.percentile(D_sub.compressed(), (2, 98)), np.mean(D_sub.compressed()) ] DEM = glob.glob( os.path.join(video_dir, f'run*{identifier}*-DEM.tif'))[0] valid_video_dir.append(video_dir) except: continue video_dem_list = [ glob.glob(os.path.join(dir, '*-DEM.tif'))[0] for dir in valid_video_dir ] stats_list = ['nmad', 'count', 'median'] print(f'total dems are {len(video_dem_list)}') out_fn_list = [ os.path.join(out_folder, f'video_{stat}_mos.tif') for stat in stats_list ] dem_mos_log = p_map(asp.dem_mosaic, [video_dem_list] * 3, out_fn_list, ['None'] * 3, [None] * 3, stats_list) out_log_fn = os.path.join(out_folder, 'skysat_video_dem_mos.log') with open(out_log_fn, 'w') as f: for log in dem_mos_log: f.write(log) print("Script complete")