Пример #1
0
def proc_modscag(fn_list, extent=None, t_srs=None):
    """Process the MODSCAG products for full date range, create composites and reproject
    """
    #Use cubic spline here for improve upsampling 
    ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
    stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) 
    #Create stack here - no need for most of mastack machinery, just make 3D array
    #Mask values greater than 100% (clouds, bad pixels, etc)
    ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)

    stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
    stack_count.set_fill_value(0)
    stack_min = ma_stack.min(axis=0).astype(np.uint8)
    stack_min.set_fill_value(0)
    stack_max = ma_stack.max(axis=0).astype(np.uint8)
    stack_max.set_fill_value(0)
    stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
    stack_med.set_fill_value(0)

    out_fn = stack_fn + '_count.tif'
    iolib.writeGTiff(stack_count, out_fn, ds_list[0])
    out_fn = stack_fn + '_max.tif'
    iolib.writeGTiff(stack_max, out_fn, ds_list[0])
    out_fn = stack_fn + '_min.tif'
    iolib.writeGTiff(stack_min, out_fn, ds_list[0])
    out_fn = stack_fn + '_med.tif'
    iolib.writeGTiff(stack_med, out_fn, ds_list[0])

    ds = gdal.Open(out_fn)
    return ds
Пример #2
0
def dz_fltr(dem_fn, refdem_fn, perc=None, abs_dz_lim=(0, 30), smooth=True):
    """Absolute elevation difference range filter using values from a source raster file and a reference raster file 
    """
    try:
        open(refdem_fn)
    except IOError:
        sys.exit('Unable to open reference DEM: %s' % refdem_fn)

    dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first')
    dem = iolib.ds_getma(dem_ds)
    refdem = iolib.ds_getma(refdem_ds)
    out = dz_fltr_ma(dem, refdem, perc, abs_dz_lim, smooth)
    return out
Пример #3
0
def plot_composite_fig(ortho, dem, count, nmad, outfn, product='triplet'):
    """
    Plot the gallery figure for final DEM products
    Parameters
    ------------
    ortho: str
        path to orthoimage
    dem: str
        path to dem
    count: str
        path to count map
    nmad: str
        path to NMAD
    outfn: str
        path to save output figure
    ortho: str
        product to plot (triplet/video)
    """
    if product == 'triplet':
        figsize = (10, 8)
    else:
        figsize = (10, 3)
    f, ax = plt.subplots(1, 4, figsize=figsize)
    ds_list = warplib.memwarp_multi_fn([ortho, dem, count, nmad], res='max')
    ortho, dem, count, nmad = [iolib.ds_getma(x) for x in ds_list]
    pltlib.iv(ortho,
              ax=ax[0],
              cmap='gray',
              scalebar=True,
              cbar=False,
              ds=ds_list[0],
              skinny=False)
    pltlib.iv(dem,
              ax=ax[1],
              hillshade=True,
              scalebar=False,
              ds=ds_list[1],
              label='Elevation (m WGS84)',
              skinny=False)
    pltlib.iv(count, ax=ax[2], cmap='YlOrRd', label='DEM count', skinny=False)
    pltlib.iv(nmad,
              ax=ax[3],
              cmap='inferno',
              clim=(0, 10),
              label='Elevation NMAD (m)',
              skinny=False)
    plt.tight_layout()
    f.savefig(outfn, dpi=300, bbox_inches='tight', pad_inches=0.1)
Пример #4
0
def abs_range_fltr_lowresDEM(dem_fn, refdem_fn, pad=30):
    try:
        open(refdem_fn)
    except IOError:
        sys.exit('Unable to open reference DEM: %s' % refdem_fn)

    dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first')
    dem = iolib.ds_getma(dem_ds)
    refdem = iolib.ds_getma(refdem_ds)

    rangelim = (refdem.min(), refdem.max())
    rangelim = (rangelim[0] - pad, rangelim[1] + pad)

    print('Excluding values outside of padded ({0:0.1f} m) lowres DEM range: {1:0.1f} to {2:0.1f} m'.format(pad, *rangelim))
    out = range_fltr(dem, rangelim)
    return out
Пример #5
0
def get_t_factor_fn(fn1, fn2, ds=None):
    t_factor = None
    #Extract timestamps from input filenames
    t1 = fn_getdatetime(fn1)
    t2 = fn_getdatetime(fn2)
    t_factor = get_t_factor(t1,t2)
    #Attempt to load timestamp arrays (for mosaics with variable timestamps)
    t1_fn = os.path.splitext(fn1)[0]+'_ts.tif'
    t2_fn = os.path.splitext(fn2)[0]+'_ts.tif'
    if os.path.exists(t1_fn) and os.path.exists(t2_fn) and ds is not None:
        print("Preparing timestamp arrays")
        from pygeotools.lib import warplib
        t1_ds, t2_ds = warplib.memwarp_multi_fn([t1_fn, t2_fn], extent=ds, res=ds)
        print("Loading timestamps into masked arrays")
        from pygeotools.lib import iolib
        t1 = iolib.ds_getma(t1_ds)
        t2 = iolib.ds_getma(t2_ds)
        #This is a new masked array
        t_factor = (t2 - t1) / 365.25
    return t_factor
Пример #6
0
def get_mask(ds, dem_fn):
    #Mask glaciers, vegetated slopes
    static_mask = dem_mask.get_lulc_mask(ds,
                                         mask_glaciers=True,
                                         filter='not_forest',
                                         bareground_thresh=60)
    #Mask glaciers only
    #static_mask = dem_mask.get_icemask(ds)
    #Top-of-atmosphere reflectance threshold (requires orthoimage and output from toa.sh)
    toa_fn = dem_mask.get_toa_fn(dem_fn)
    if toa_fn is not None:
        toa_ds = warplib.memwarp_multi_fn([
            toa_fn,
        ],
                                          res=ds,
                                          extent=ds,
                                          t_srs=ds,
                                          r='cubicspline')[0]
        toa_mask = dem_mask.get_toa_mask(toa_ds)
        static_mask = np.logical_and(static_mask, toa_mask)
    #Return final mask, ready to be applied
    return ~(static_mask)
Пример #7
0
    else:
        feat_fn = str(glacnum)

    print("\n%i of %i: %s\n" % (n + 1, feat_count, feat_fn))
    glac_geom = feat.GetGeometryRef()
    glac_geom.AssignSpatialReference(glac_shp_srs)
    glac_geom_extent = geolib.geom_extent(glac_geom)
    glac_area = glac_geom.GetArea()
    if glac_area / 1E6 < min_glac_area:
        print("Glacier area below %0.1f km2 threshold" % min_glac_area)
        continue

    cx, cy = glac_geom.Centroid().GetPoint_2D()

    #Warp everything to common res/extent/proj
    ds_list = warplib.memwarp_multi_fn([z1_fn, z2_fn], res='min', \
            extent=glac_geom_extent, t_srs=aea_srs, verbose=False)

    if site == 'conus':
        #Add prism datasets
        prism_fn_list = [prism_ppt_annual_fn, prism_tmean_annual_fn]
        prism_fn_list.extend([
            prism_ppt_summer_fn, prism_ppt_winter_fn, prism_tmean_summer_fn,
            prism_tmean_winter_fn
        ])
        ds_list.extend(
            warplib.memwarp_multi_fn(prism_fn_list,
                                     res=ds_list[0],
                                     extent=glac_geom_extent,
                                     t_srs=aea_srs,
                                     verbose=False))
Пример #8
0
def main(argv=None):
    parser = getparser()
    args = parser.parse_args()

    #Should check that files exist
    ref_dem_fn = args.ref_fn
    src_dem_fn = args.src_fn

    mode = args.mode
    mask_list = args.mask_list
    max_offset = args.max_offset
    max_dz = args.max_dz
    slope_lim = tuple(args.slope_lim)
    tiltcorr = args.tiltcorr
    polyorder = args.polyorder
    res = args.res

    #Maximum number of iterations
    max_iter = args.max_iter

    #These are tolerances (in meters) to stop iteration
    tol = args.tol
    min_dx = tol
    min_dy = tol
    min_dz = tol

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.splitext(src_dem_fn)[0] + '_dem_align'

    if tiltcorr:
        outdir += '_tiltcorr'
        tiltcorr_done = False
        #Relax tolerance for initial round of co-registration
        #tiltcorr_tol = 0.1
        #if tol < tiltcorr_tol:
        #    tol = tiltcorr_tol

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = '%s_%s' % (os.path.splitext(os.path.split(src_dem_fn)[-1])[0], \
            os.path.splitext(os.path.split(ref_dem_fn)[-1])[0])
    outprefix = os.path.join(outdir, outprefix)

    print("\nReference: %s" % ref_dem_fn)
    print("Source: %s" % src_dem_fn)
    print("Mode: %s" % mode)
    print("Output: %s\n" % outprefix)

    src_dem_ds = gdal.Open(src_dem_fn)
    ref_dem_ds = gdal.Open(ref_dem_fn)

    #Get local cartesian coordinate system
    #local_srs = geolib.localtmerc_ds(src_dem_ds)
    #Use original source dataset coordinate system
    #Potentially issues with distortion and xyz/tiltcorr offsets for DEM with large extent
    local_srs = geolib.get_ds_srs(src_dem_ds)
    #local_srs = geolib.get_ds_srs(ref_dem_ds)

    #Resample to common grid
    ref_dem_res = float(geolib.get_res(ref_dem_ds, t_srs=local_srs, square=True)[0])
    #Create a copy to be updated in place
    src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
    src_dem_res = float(geolib.get_res(src_dem_ds, t_srs=local_srs, square=True)[0])
    src_dem_ds = None
    #Resample to user-specified resolution
    ref_dem_ds, src_dem_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
            extent='intersection', res=args.res, t_srs=local_srs, r='cubic')

    res = float(geolib.get_res(src_dem_ds_align, square=True)[0])
    print("\nReference DEM res: %0.2f" % ref_dem_res)
    print("Source DEM res: %0.2f" % src_dem_res)
    print("Resolution for coreg: %s (%0.2f m)\n" % (args.res, res))

    #Iteration number
    n = 1
    #Cumulative offsets
    dx_total = 0
    dy_total = 0
    dz_total = 0

    #Now iteratively update geotransform and vertical shift
    while True:
        print("*** Iteration %i ***" % n)
        dx, dy, dz, static_mask, fig = compute_offset(ref_dem_ds, src_dem_ds_align, src_dem_fn, mode, max_offset, \
                mask_list=mask_list, max_dz=max_dz, slope_lim=slope_lim, plot=True)
        xyz_shift_str_iter = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx, dy, dz)
        print("Incremental offset: %s" % xyz_shift_str_iter)

        dx_total += dx
        dy_total += dy
        dz_total += dz

        xyz_shift_str_cum = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx_total, dy_total, dz_total)
        print("Cumulative offset: %s" % xyz_shift_str_cum)
        #String to append to output filenames
        xyz_shift_str_cum_fn = '_%s_x%+0.2f_y%+0.2f_z%+0.2f' % (mode, dx_total, dy_total, dz_total)

        #Should make an animation of this converging
        if n == 1: 
            #static_mask_orig = static_mask
            if fig is not None:
                dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
                print("Writing offset plot: %s" % dst_fn)
                fig.gca().set_title("Incremental: %s\nCumulative: %s" % (xyz_shift_str_iter, xyz_shift_str_cum))
                fig.savefig(dst_fn, dpi=300)

        #Apply the horizontal shift to the original dataset
        src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx, dy, createcopy=False)
        #Should 
        src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz, createcopy=False)

        n += 1
        print("\n")
        #If magnitude of shift in all directions is less than tol
        #if n > max_iter or (abs(dx) <= min_dx and abs(dy) <= min_dy and abs(dz) <= min_dz):
        #If magnitude of shift is less than tol
        dm = np.sqrt(dx**2 + dy**2 + dz**2)
        dm_total = np.sqrt(dx_total**2 + dy_total**2 + dz_total**2)

        if dm_total > max_offset:
            sys.exit("Total offset exceeded specified max_offset (%0.2f m). Consider increasing -max_offset argument" % max_offset)

        #Stop iteration
        if n > max_iter or dm < tol:

            if fig is not None:
                dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
                print("Writing offset plot: %s" % dst_fn)
                fig.gca().set_title("Incremental:%s\nCumulative:%s" % (xyz_shift_str_iter, xyz_shift_str_cum))
                fig.savefig(dst_fn, dpi=300)

            #Compute final elevation difference
            if True:
                ref_dem_clip_ds_align, src_dem_clip_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
                        res=res, extent='intersection', t_srs=local_srs, r='cubic')
                ref_dem_align = iolib.ds_getma(ref_dem_clip_ds_align, 1)
                src_dem_align = iolib.ds_getma(src_dem_clip_ds_align, 1)
                ref_dem_clip_ds_align = None

                diff_align = src_dem_align - ref_dem_align
                src_dem_align = None
                ref_dem_align = None

                #Get updated, final mask
                static_mask_final = get_mask(src_dem_clip_ds_align, mask_list, src_dem_fn)
                static_mask_final = np.logical_or(np.ma.getmaskarray(diff_align), static_mask_final)
                
                #Final stats, before outlier removal
                diff_align_compressed = diff_align[~static_mask_final]
                diff_align_stats = malib.get_stats_dict(diff_align_compressed, full=True)

                #Prepare filtered version for tiltcorr fit
                diff_align_filt = np.ma.array(diff_align, mask=static_mask_final)
                diff_align_filt = outlier_filter(diff_align_filt, f=3, max_dz=max_dz)
                #diff_align_filt = outlier_filter(diff_align_filt, perc=(12.5, 87.5), max_dz=max_dz)
                slope = get_filtered_slope(src_dem_clip_ds_align)
                diff_align_filt = np.ma.array(diff_align_filt, mask=np.ma.getmaskarray(slope))
                diff_align_filt_stats = malib.get_stats_dict(diff_align_filt, full=True)

            #Fit 2D polynomial to residuals and remove
            #To do: add support for along-track and cross-track artifacts
            if tiltcorr and not tiltcorr_done:
                print("\n************")
                print("Calculating 'tiltcorr' 2D polynomial fit to residuals with order %i" % polyorder)
                print("************\n")
                gt = src_dem_clip_ds_align.GetGeoTransform()

                #Need to apply the mask here, so we're only fitting over static surfaces
                #Note that the origmask=False will compute vals for all x and y indices, which is what we want
                vals, resid, coeff = geolib.ma_fitpoly(diff_align_filt, order=polyorder, gt=gt, perc=(0,100), origmask=False)
                #vals, resid, coeff = geolib.ma_fitplane(diff_align_filt, gt, perc=(12.5, 87.5), origmask=False)

                #Should write out coeff or grid with correction 

                vals_stats = malib.get_stats_dict(vals)

                #Want to have max_tilt check here
                #max_tilt = 4.0 #m
                #Should do percentage
                #vals.ptp() > max_tilt

                #Note: dimensions of ds and vals will be different as vals are computed for clipped intersection
                #Need to recompute planar offset for full src_dem_ds_align extent and apply
                xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
                valgrid = geolib.polyval2d(xgrid, ygrid, coeff) 
                #For results of ma_fitplane
                #valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
                src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)

                if True:
                    print("Creating plot of polynomial fit to residuals")
                    fig, axa = plt.subplots(1,2, figsize=(8, 4))
                    dz_clim = malib.calcperc_sym(vals, (2, 98))
                    ax = pltlib.iv(diff_align_filt, ax=axa[0], cmap='RdBu', clim=dz_clim, \
                            label='Residual dz (m)', scalebar=False)
                    ax = pltlib.iv(valgrid, ax=axa[1], cmap='RdBu', clim=dz_clim, \
                            label='Polyfit dz (m)', ds=src_dem_ds_align)
                    #if tiltcorr:
                        #xyz_shift_str_cum_fn += "_tiltcorr"
                    tiltcorr_fig_fn = outprefix + '%s_polyfit.png' % xyz_shift_str_cum_fn
                    print("Writing out figure: %s\n" % tiltcorr_fig_fn)
                    fig.savefig(tiltcorr_fig_fn, dpi=300)

                print("Applying tilt correction to difference map")
                diff_align -= vals

                #Should iterate until tilts are below some threshold
                #For now, only do one tiltcorr
                tiltcorr_done=True
                #Now use original tolerance, and number of iterations 
                tol = args.tol
                max_iter = n + args.max_iter
            else:
                break

    if True:
        #Write out aligned difference map for clipped extent with vertial offset removed
        align_diff_fn = outprefix + '%s_align_diff.tif' % xyz_shift_str_cum_fn
        print("Writing out aligned difference map with median vertical offset removed")
        iolib.writeGTiff(diff_align, align_diff_fn, src_dem_clip_ds_align)

    if True:
        #Write out fitered aligned difference map
        align_diff_filt_fn = outprefix + '%s_align_diff_filt.tif' % xyz_shift_str_cum_fn
        print("Writing out filtered aligned difference map with median vertical offset removed")
        iolib.writeGTiff(diff_align_filt, align_diff_filt_fn, src_dem_clip_ds_align)

    #Extract final center coordinates for intersection
    center_coord_ll = geolib.get_center(src_dem_clip_ds_align, t_srs=geolib.wgs_srs)
    center_coord_xy = geolib.get_center(src_dem_clip_ds_align)
    src_dem_clip_ds_align = None

    #Write out final aligned src_dem 
    align_fn = outprefix + '%s_align.tif' % xyz_shift_str_cum_fn
    print("Writing out shifted src_dem with median vertical offset removed: %s" % align_fn)
    #Open original uncorrected dataset at native resolution
    src_dem_ds = gdal.Open(src_dem_fn)
    src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
    #Apply final horizontal and vertial shift to the original dataset
    #Note: potentially issues if we used a different projection during coregistration!
    src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx_total, dy_total, createcopy=False)
    src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz_total, createcopy=False)
    if tiltcorr:
        xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
        valgrid = geolib.polyval2d(xgrid, ygrid, coeff) 
        #For results of ma_fitplane
        #valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
        src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)
    #Might be cleaner way to write out MEM ds directly to disk
    src_dem_full_align = iolib.ds_getma(src_dem_ds_align)
    iolib.writeGTiff(src_dem_full_align, align_fn, src_dem_ds_align)

    if True:
        #Output final aligned src_dem, masked so only best pixels are preserved
        #Useful if creating a new reference product
        #Can also use apply_mask.py 
        print("Applying filter to shiftec src_dem")
        align_diff_filt_full_ds = warplib.memwarp_multi_fn([align_diff_filt_fn,], res=src_dem_ds_align, extent=src_dem_ds_align, \
                t_srs=src_dem_ds_align)[0]
        align_diff_filt_full = iolib.ds_getma(align_diff_filt_full_ds)
        align_diff_filt_full_ds = None
        align_fn_masked = outprefix + '%s_align_filt.tif' % xyz_shift_str_cum_fn
        iolib.writeGTiff(np.ma.array(src_dem_full_align, mask=np.ma.getmaskarray(align_diff_filt_full)), \
                align_fn_masked, src_dem_ds_align)

    src_dem_full_align = None
    src_dem_ds_align = None

    #Compute original elevation difference
    if True:
        ref_dem_clip_ds, src_dem_clip_ds = warplib.memwarp_multi([ref_dem_ds, src_dem_ds], \
                res=res, extent='intersection', t_srs=local_srs, r='cubic')
        src_dem_ds = None
        ref_dem_ds = None
        ref_dem_orig = iolib.ds_getma(ref_dem_clip_ds)
        src_dem_orig = iolib.ds_getma(src_dem_clip_ds)
        #Needed for plotting
        ref_dem_hs = geolib.gdaldem_mem_ds(ref_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
        src_dem_hs = geolib.gdaldem_mem_ds(src_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
        diff_orig = src_dem_orig - ref_dem_orig
        #Only compute stats over valid surfaces
        static_mask_orig = get_mask(src_dem_clip_ds, mask_list, src_dem_fn)
        #Note: this doesn't include outlier removal or slope mask!
        static_mask_orig = np.logical_or(np.ma.getmaskarray(diff_orig), static_mask_orig)
        #For some reason, ASTER DEM diff have a spike near the 0 bin, could be an issue with masking?
        diff_orig_compressed = diff_orig[~static_mask_orig]
        diff_orig_stats = malib.get_stats_dict(diff_orig_compressed, full=True)

        #Prepare filtered version for comparison 
        diff_orig_filt = np.ma.array(diff_orig, mask=static_mask_orig)
        diff_orig_filt = outlier_filter(diff_orig_filt, f=3, max_dz=max_dz)
        #diff_orig_filt = outlier_filter(diff_orig_filt, perc=(12.5, 87.5), max_dz=max_dz)
        slope = get_filtered_slope(src_dem_clip_ds)
        diff_orig_filt = np.ma.array(diff_orig_filt, mask=np.ma.getmaskarray(slope))
        diff_orig_filt_stats = malib.get_stats_dict(diff_orig_filt, full=True)

        #Write out original difference map
        print("Writing out original difference map for common intersection before alignment")
        orig_diff_fn = outprefix + '_orig_diff.tif'
        iolib.writeGTiff(diff_orig, orig_diff_fn, ref_dem_clip_ds)
        src_dem_clip_ds = None
        ref_dem_clip_ds = None

    if True:
        align_stats_fn = outprefix + '%s_align_stats.json' % xyz_shift_str_cum_fn
        align_stats = {}
        align_stats['src_fn'] = src_dem_fn 
        align_stats['ref_fn'] = ref_dem_fn 
        align_stats['align_fn'] = align_fn 
        align_stats['res'] = {} 
        align_stats['res']['src'] = src_dem_res
        align_stats['res']['ref'] = ref_dem_res
        align_stats['res']['coreg'] = res
        align_stats['center_coord'] = {'lon':center_coord_ll[0], 'lat':center_coord_ll[1], \
                'x':center_coord_xy[0], 'y':center_coord_xy[1]}
        align_stats['shift'] = {'dx':dx_total, 'dy':dy_total, 'dz':dz_total, 'dm':dm_total}
        #This tiltcorr flag gets set to false, need better flag
        if tiltcorr:
            align_stats['tiltcorr'] = {}
            align_stats['tiltcorr']['coeff'] = coeff.tolist()
            align_stats['tiltcorr']['val_stats'] = vals_stats
        align_stats['before'] = diff_orig_stats
        align_stats['before_filt'] = diff_orig_filt_stats
        align_stats['after'] = diff_align_stats
        align_stats['after_filt'] = diff_align_filt_stats
        
        import json
        with open(align_stats_fn, 'w') as f:
            json.dump(align_stats, f)

    #Create output plot
    if True:
        print("Creating final plot")
        kwargs = {'interpolation':'none'}
        #f, axa = plt.subplots(2, 4, figsize=(11, 8.5))
        f, axa = plt.subplots(2, 4, figsize=(16, 8))
        for ax in axa.ravel()[:-1]:
            ax.set_facecolor('k')
            pltlib.hide_ticks(ax)
        dem_clim = malib.calcperc(ref_dem_orig, (2,98))
        axa[0,0].imshow(ref_dem_hs, cmap='gray', **kwargs)
        im = axa[0,0].imshow(ref_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
        pltlib.add_cbar(axa[0,0], im, arr=ref_dem_orig, clim=dem_clim, label=None)
        pltlib.add_scalebar(axa[0,0], res=res)
        axa[0,0].set_title('Reference DEM')
        axa[0,1].imshow(src_dem_hs, cmap='gray', **kwargs)
        im = axa[0,1].imshow(src_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
        pltlib.add_cbar(axa[0,1], im, arr=src_dem_orig, clim=dem_clim, label=None)
        axa[0,1].set_title('Source DEM')
        #axa[0,2].imshow(~static_mask_orig, clim=(0,1), cmap='gray')
        axa[0,2].imshow(~static_mask, clim=(0,1), cmap='gray', **kwargs)
        axa[0,2].set_title('Surfaces for co-registration')
        dz_clim = malib.calcperc_sym(diff_orig_compressed, (5, 95))
        im = axa[1,0].imshow(diff_orig, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1,0], im, arr=diff_orig, clim=dz_clim, label=None)
        axa[1,0].set_title('Elev. Diff. Before (m)')
        im = axa[1,1].imshow(diff_align, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1,1], im, arr=diff_align, clim=dz_clim, label=None)
        axa[1,1].set_title('Elev. Diff. After (m)')

        #tight_dz_clim = (-1.0, 1.0)
        tight_dz_clim = (-2.0, 2.0)
        #tight_dz_clim = (-10.0, 10.0)
        #tight_dz_clim = malib.calcperc_sym(diff_align_filt, (5, 95))
        im = axa[1,2].imshow(diff_align_filt, cmap='RdBu', clim=tight_dz_clim)
        pltlib.add_cbar(axa[1,2], im, arr=diff_align_filt, clim=tight_dz_clim, label=None)
        axa[1,2].set_title('Elev. Diff. After (m)')

        #Tried to insert Nuth fig here
        #ax_nuth.change_geometry(1,2,1)
        #f.axes.append(ax_nuth)

        bins = np.linspace(dz_clim[0], dz_clim[1], 128)
        axa[1,3].hist(diff_orig_compressed, bins, color='g', label='Before', alpha=0.5)
        axa[1,3].hist(diff_align_compressed, bins, color='b', label='After', alpha=0.5)
        axa[1,3].set_xlim(*dz_clim)
        axa[1,3].axvline(0, color='k', linewidth=0.5, linestyle=':')
        axa[1,3].set_xlabel('Elev. Diff. (m)')
        axa[1,3].set_ylabel('Count (px)')
        axa[1,3].set_title("Source - Reference")
        before_str = 'Before\nmed: %0.2f\nnmad: %0.2f' % (diff_orig_stats['med'], diff_orig_stats['nmad'])
        axa[1,3].text(0.05, 0.95, before_str, va='top', color='g', transform=axa[1,3].transAxes, fontsize=8)
        after_str = 'After\nmed: %0.2f\nnmad: %0.2f' % (diff_align_stats['med'], diff_align_stats['nmad'])
        axa[1,3].text(0.65, 0.95, after_str, va='top', color='b', transform=axa[1,3].transAxes, fontsize=8)

        #This is empty
        axa[0,3].axis('off')

        suptitle = '%s\nx: %+0.2fm, y: %+0.2fm, z: %+0.2fm' % (os.path.split(outprefix)[-1], dx_total, dy_total, dz_total)
        f.suptitle(suptitle)
        f.tight_layout()
        plt.subplots_adjust(top=0.90)

        fig_fn = outprefix + '%s_align.png' % xyz_shift_str_cum_fn
        print("Writing out figure: %s" % fig_fn)
        f.savefig(fig_fn, dpi=300)
Пример #9
0
#Note: velocity is expected to be a 2-band file
#for i in *mos_vx.tif ; do
#base=$(echo $i | awk -F'_v' '{print $1}')
#gdal_merge.py -separate -a_nodata -2000000000 -o ${base}_vxy.tif ${base}_vx.tif ${base}_vy.tif
#done

print "Warping DEMs to same res/extent/proj"
#Might want to limit intersection to dem1 and dem2, as vmap could be significantly smaller
disp_ds = None
if len(sys.argv) == 4:
    disp_fn = sys.argv[3]
    if 'track' in disp_fn or 'tsx' in disp_fn:
        vel_input = True
    fn_list.append(disp_fn)
    dem1_ds, dem2_ds, disp_ds = warplib.memwarp_multi_fn(fn_list,
                                                         extent='intersection',
                                                         res='max')
else:
    dem1_ds, dem2_ds = warplib.memwarp_multi_fn(fn_list,
                                                extent='intersection',
                                                res='max')

outdir = os.path.split(dem1_fn)[0]
outprefix = os.path.splitext(
    os.path.split(dem1_fn)[1])[0] + '_' + os.path.splitext(
        os.path.split(dem2_fn)[1])[0]

#Load input DEMs into masked arrays
print "Loading input DEMs into masked arrays"
dem1 = iolib.ds_getma(dem1_ds, 1)
dem2 = iolib.ds_getma(dem2_ds, 1)
Пример #10
0
def main():
    parser = getparser()
    args = parser.parse_args()

    fn = args.fn
    if not iolib.fn_check(fn):
        sys.exit("Unable to locate input file: %s" % fn)

    #Need some checks on these
    param = args.param

    print("Loading input raster into masked array")
    ds = iolib.fn_getds(fn)
    #Currently supports only single band operations
    r = iolib.ds_getma(ds, 1)

    #May need to cast input ma as float32 so np.nan filling works
    #r = r.astype(np.float32)
    #Want function that checks and returns float32 if necessary
    #Should filter, then return original dtype

    r_fltr = r

    #Loop through all specified input filters
    #for filt in args.filt:
    filt = args.filt[0]

    if len(param) == 1:
        param = param[0]
    param_str = ''

    if filt == 'range':
        #Range filter
        param = [float(i) for i in param[1:]]
        r_fltr = filtlib.range_fltr(r_fltr, param)
        param_str = '_{0:0.2f}-{1:0.2f}'.format(*param)
    elif filt == 'absrange':
        #Range filter of absolute values
        param = [float(i) for i in param[1:]]
        r_fltr = filtlib.absrange_fltr(r_fltr, param)
        param_str = '_{0:0.2f}-{1:0.2f}'.format(*param)
    elif filt == 'perc':
        #Percentile filter
        param = [float(i) for i in param[1:]]
        r_fltr = filtlib.perc_fltr(r, perc=param)
        param_str = '_{0:0.2f}-{1:0.2f}'.format(*param)
    elif filt == 'med':
        #Median filter
        param = int(param)
        r_fltr = filtlib.rolling_fltr(r_fltr, f=np.nanmedian, size=param)
        #r_fltr = filtlib.median_fltr(r_fltr, fsize=param, origmask=True)
        #r_fltr = filtlib.median_fltr_skimage(r_fltr, radius=4, origmask=True)
        param_str = '_%ipx' % param
    elif filt == 'gauss':
        #Gaussian filter (default)
        param = int(param)
        r_fltr = filtlib.gauss_fltr_astropy(r_fltr,
                                            size=param,
                                            origmask=False,
                                            fill_interior=False)
        param_str = '_%ipx' % param
    elif filt == 'highpass':
        #High pass filter
        param = int(param)
        r_fltr = filtlib.highpass(r_fltr, size=param)
        param_str = '_%ipx' % param
    elif filt == 'sigma':
        #n*sigma filter, remove outliers
        param = int(param)
        r_fltr = filtlib.sigma_fltr(r_fltr, n=param)
        param_str = '_n%i' % param
    elif filt == 'mad':
        #n*mad filter, remove outliers
        #Maybe better to use a percentile filter
        param = int(param)
        r_fltr = filtlib.mad_fltr(r_fltr, n=param)
        param_str = '_n%i' % param
    elif filt == 'dz':
        #Difference filter, need to specify ref_fn and range
        #Could let the user compute their own dz, then just run a standard range or absrange filter
        ref_fn = param[0]
        ref_ds = warplib.memwarp_multi_fn([
            ref_fn,
        ],
                                          res=ds,
                                          extent=ds,
                                          t_srs=ds)[0]
        ref = iolib.ds_getma(ref_ds)
        param = [float(i) for i in param[1:]]
        r_fltr = filtlib.dz_fltr_ma(r, ref, rangelim=param)
        #param_str = '_{0:0.2f}-{1:0.2f}'.format(*param)
        param_str = '_{0:0.0f}_{1:0.0f}'.format(*param)
    else:
        sys.exit("No filter type specified")

    #Compute and print stats before/after
    if args.stats:
        print("Input stats:")
        malib.print_stats(r)
        print("Filtered stats:")
        malib.print_stats(r_fltr)

    #Write out
    dst_fn = os.path.splitext(fn)[0] + '_%sfilt%s.tif' % (filt, param_str)
    if args.outdir is not None:
        outdir = args.outdir
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        dst_fn = os.path.join(outdir, os.path.split(dst_fn)[-1])
    print("Writing out filtered raster: %s" % dst_fn)
    iolib.writeGTiff(r_fltr, dst_fn, ds)
Пример #11
0
def warpMultipleFiles(filenames,srs_template):
	from pygeotools.lib import warplib 
	return(warplib.memwarp_multi_fn(filenames, extent='intersection', res='min', t_srs=srs_template))
Пример #12
0
def main():
    parser = getparser()
    args = parser.parse_args()

    dem_fn = args.dem_fn

    # Write out because they will be used to mask CHM
    writeall = True

    # Auto compute min TOA with gaussian mixture model
    auto_min_toa = args.auto_min_toa

    dirname, demname = os.path.split(dem_fn)

    # The subdir in which the DEM.tif sits will be the pairname
    pairname = os.path.split(dirname)[1]
    print("Pairname:", pairname)

    if args.out_dir is not None:

        # Create symlink in out_dir to: (1) original out-DEM_4m (2) *_ortho_4m.tif (3) All *.xml files
        # This should look like <out_dir>/<pairname>_out-DEM_4m
        dem_fn_lnk = os.path.join(args.out_dir, pairname + '_' +  demname)
        force_symlink(dem_fn, dem_fn_lnk)
        force_symlink(os.path.join(dirname, pairname + '_ortho_4m.tif'), os.path.join(args.out_dir, pairname + '_ortho_4m.tif') )
        xml_list = [f for f in os.listdir(dirname) if f.endswith('r100.xml')]

        print("\nSymlinks made for:")
        for x in xml_list:
            print(x)
            shutil.copy2(os.path.join(dirname,x), args.out_dir)

        out_fn_base = os.path.splitext(dem_fn_lnk)[0]

        dem_fn = dem_fn_lnk
    else:
        out_fn_base = os.path.splitext(dem_fn)[0]

    print("\nBasename for output files:")
    print(out_fn_base)

    #Max Threshold value for LiDAR datset; Valid pixels under this value
    lidar_fn=args.lidar_fn
    max_thresh=args.max_thresh

    #Need some checks on these
    param = args.filt_param
    if param is not None and len(param) == 1:
        param = param[0]
    # Get original DEM
    dem = iolib.fn_getma(dem_fn)

    print("\nLoading input DEM into masked array")
    dem_ds = iolib.fn_getds(dem_fn)

    toa_mask = None
    toa_tri_mask = None # probably not used by itself; done as part of toa_mask
    rough_mask = None
    slope_mask = None
    mask_list = [toa_tri_mask, toa_mask, rough_mask, slope_mask]

    if args.filtdz:
        print("\nFilter with dz from ref DEM to remove cloud returns and blunders (shadows)...")
        print("Reference DEM: %s" % os.path.split(param[0])[1] )
        print("Absolute dz (+/-): %s \n" % param[2] )
        #May need to cast input ma as float32 so np.nan filling works
        dem = dem.astype(np.float32)

        #Difference filter, need to specify ref_fn and range
        #Could let the user compute their own dz, then just run a standard range or absrange filter
        ref_fn = param[0]
        ref_ds = warplib.memwarp_multi_fn([ref_fn,], res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0]
        ref = iolib.ds_getma(ref_ds)
        param = map(float, param[1:])

        # A dem that has been masked based on the dz filter
        dem = filtlib.dz_fltr_ma(dem, ref, rangelim=param)

        if writeall:
            #out_fn = os.path.splitext(dem_fn)[0]+'_dzfilt.tif'
            out_fn = os.path.join(out_fn_base +'_dzfilt.tif')
            print("Writing out %s\n" % out_fn)
            iolib.writeGTiff(dem, out_fn, src_ds=dem_ds, ndv=args.ndv)

    #Initialize a control mask that we'll update
    #True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
    controlmask = ~(np.ma.getmaskarray(dem))

    # DEM masking: Each block returns a masked output (not a mask)
    #    TOA: mask dark and/or smooth areas (shadows and/or water)
    #    Roughness
    #    Slope

    if args.toamask or args.toatrimask:
        #try:
            print("\nCompute TOA from ortho...\n")
            toa_fn = get_toa_fn(out_fn_base + '.tif') ##--->dem_fn
            print(toa_fn)
            print("\nWarp TOA to DEM...\n")

            toa_ds = warplib.memwarp_multi_fn([toa_fn,], res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0]

            if args.toamask:

                if auto_min_toa:

                    # Compute a good min TOA value
                    m,s = get_min_gaus(toa_fn, 50, 4)
                    min_toa = m + s
                    min_toa = m
                else:
                    min_toa = args.min_toa

                with open(os.path.join(os.path.split(toa_fn)[0], "min_toa_" + pairname + ".txt"), "w") as text_file:
                    text_file.write(os.path.basename(__file__))
                    text_file.write("\nMinimum TOA used for mask:\n{0}".format(min_toa))

                # Should mask dark areas and dilate
                toa_mask = get_toa_mask(toa_ds, min_toa)

                #Dilate the mask
                if args.dilate_toa is not None:
                    niter = args.dilate_toa
                    print("Dilating TOA mask with %i iterations" % niter)
                    from scipy import ndimage
                    toa_mask = ~(ndimage.morphology.binary_dilation(~toa_mask, iterations=niter))

                controlmask = np.logical_and(toa_mask, controlmask)

                # Mask islands here
                controlmask = malib.mask_islands(controlmask, 5)

                if writeall:
                    #out_fn = out_fn_base+'_toamask.tif'
                    out_fn = os.path.join(out_fn_base +'_toamask.tif')
                    print("Writing out %s\n" % out_fn)
                    iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)

            if args.toatrimask:
                # Should mask smooth areas (measures local variance)
                toa_tri_mask = get_tri_mask(toa_ds, args.min_toatri)
                controlmask = np.logical_and(toa_tri_mask, controlmask)

                if writeall:
                    #out_fn = out_fn_base+'_toatrimask.tif'
                    out_fn = os.path.join(out_fn_base +'_toatrimask.tif')
                    print("Writing out %s\n" % out_fn)
                    iolib.writeGTiff(toa_tri_mask, out_fn, src_ds=dem_ds)

        #except Exception, e:
            #print "\tFailed to apply TOA masking.\n"

    if args.slopemask:
        slope_mask = get_slope_mask(dem_ds, args.max_slope)
        controlmask = np.logical_and(slope_mask, controlmask)

        #if args.slopemaskcoarse:

            #dem_fn2 = args.dem_coarscomp_fn

            #print("\nLoading input coarse DEM into masked array")
            #dem2_ds = iolib.fn_getds(dem_fn2)
            #slope_mask = get_slope_mask(dem2_ds, args.max_slope)
            #controlmask = np.logical_and(slope_mask, controlmask)

        if writeall:
            #out_fn = out_fn_base+'_slopemask.tif'
            out_fn = os.path.join(out_fn_base +'_slopemask.tif')
            print("Writing out %s\n" % out_fn)
            iolib.writeGTiff(slope_mask, out_fn, src_ds=dem_ds)

    if args.lidar_fn:
        try:
            print("Masking DEM file based on Lidar Dataset\n")
            print("\nWarp Lidar Raster to DEM...\n")
            lidar_ds=warplib.memwarp_multi_fn([lidar_fn,],r='near', res=dem_ds, extent=dem_ds, t_srs=dem_ds)[0]

            lidarmask = get_lidar_mask(dem_ds, lidar_ds, max_thresh)

            controlmask = np.logical_and(lidarmask, controlmask)

            if writeall:
                out_fn=out_fn_base+'_lidarmask.tif'
                print("Writing out %s\n" % out_fn)
                iolib.writeGTiff(lidarmask, out_fn, src_ds=dem_ds)
        except Exception as e:
            print("\tFailed to Apply Lidar Mask")

    # CHM mask will be a subset of the Control mask; slope_mask, toa_mask, toa_tri_mask
    chmmask = controlmask
    print("Generating final CHM mask to apply later")
    #out_fn = out_fn_base+'_chmmask.tif'
    out_fn = os.path.join(out_fn_base +'_chmmask.tif')
    print("Writing out %s\n" % out_fn)
    iolib.writeGTiff(chmmask, out_fn, src_ds=dem_ds)

    if args.roughmask:

        rough_mask = get_rough_mask(dem_ds, args.max_rough)
        controlmask = np.logical_and(rough_mask, controlmask)

        if writeall:
            out_fn = os.path.join(out_fn_base +'_roughmask.tif')
            print("Writing out %s\n" % out_fn)
            iolib.writeGTiff(rough_mask, out_fn, src_ds=dem_ds)

    print("Generating final mask to use for reference surfaces, and applying to input DEM")

    #Now invert to use to create final masked array
    # This steps results in the areas to be removed being set to a valid value
    controlmask = ~controlmask

    #Dilate the mask
    if args.dilate_con is not None:
        niter = args.dilate_con
        print("Dilating control mask with %i iterations" % niter)
        from scipy import ndimage
        #
        # So, this should work too.... controlmask = ndimage.morphology.binary_dilation(controlmask, iterations=niter))
        controlmask = ~(ndimage.morphology.binary_dilation(~controlmask, iterations=niter)) # This steps results in the areas to be removed being set to a valid value, again

    print("\nApply mask to original DEM - use these control surfaces for co-registration...")
    newdem = np.ma.array(dem, mask=controlmask) # This sets the valid values of the controlmask to the 'mask' of the DEM, which turns them into NaN values

    if True:
        print("\nStats of valid DEM with masks applied:")
        valid_stats = malib.print_stats(newdem)
        valid_stats_med = valid_stats[5]

    print("\nWriting DEM control surfaces:")
    #if args.out_dir is not None:
    #    dst_fn = os.path.join(args.out_dir, os.path.split(dirname)[1] + os.path.splitext(demname)[0]+'_control.tif')
    #else:
    #    dst_fn = os.path.splitext(dem_fn)[0]+'_control.tif'
    dst_fn = os.path.join(out_fn_base +'_control.tif')
    print(dst_fn)
    iolib.writeGTiff(newdem, dst_fn, dem_ds)

    return dst_fn
Пример #13
0
def main():
    parser = getparser()
    #Create dictionary of arguments
    args = vars(parser.parse_args())

    #Want to enable -full when -of is specified, probably a fancy way to do this with argparse
    if args['of']:
        args['full'] = True

    args['imshow_kwargs'] = pltlib.imshow_kwargs

    #Need to implement better extent handling for link and overlay
    #Can use warplib extent parsing
    extent = 'first'
    #extent = 'union'

    #Should accept 'ts' or 'fn' or string here, default is 'ts'
    #Can also accept list for subplots
    title = args['title']

    if args['link']:
        fig = plt.figure(0)
        n_ax = len(args['filelist'])
        src_ds_list = [gdal.Open(fn) for fn in args['filelist']]
        t_srs = geolib.get_ds_srs(src_ds_list[0])
        res_stats = geolib.get_res_stats(src_ds_list, t_srs=t_srs)
        #Use min res
        res = res_stats[0]
        extent = 'intersection'
        extent = geolib.ds_geom_union_extent(src_ds_list, t_srs=t_srs)
        #extent = geolib.ds_geom_intersection_extent(src_ds_list, t_srs=t_srs)
        #print(res, extent)

    for n, fn in enumerate(args['filelist']):
        if not iolib.fn_check(fn):
            print('Unable to open input file: %s' % fn)
            continue

        if title == 'ts':
            ts = timelib.fn_getdatetime_list(fn)

            if ts:
                print("Timestamp list: ", ts)
                if len(ts) == 1:
                    args['title'] = ts[0].date()
                elif len(ts) > 1:
                    args['title'] = "%s to %s" % (ts[0].date(), ts[1].date())
            else:
                print("Unable to extract timestamp")
                args['title'] = None
        elif title == 'fn':
            args['title'] = fn

        #if title is not None:
        #    plt.title(title, fontdict={'fontsize':12})

        #Note: this won't work if img1 has 1 band and img2 has 3 bands
        #Hack for now
        if not args['link']:
            fig = plt.figure(n)
            n_ax = 1

        #fig.set_facecolor('black')
        fig.set_facecolor('white')
        fig.canvas.set_window_title(os.path.split(fn)[1])
        #fig.suptitle(os.path.split(fn)[1], fontsize=10)

        if args['overlay']:
            #Should automatically search for shaded relief with same base fn
            #bg_fn = os.path.splitext(fn)[0]+'_hs_az315.tif'
            #Clip/warp background dataset to match overlay dataset
            src_ds, bg_ds = warplib.memwarp_multi_fn([fn, args['overlay']],
                                                     extent=extent,
                                                     res='max')
            #Want to load up the unique bg array for each input
            args['bg'] = get_bma(bg_ds, 1, args['full'])
        else:
            src_ds = gdal.Open(fn, gdal.GA_ReadOnly)
            if args['link']:
                src_ds = warplib.memwarp_multi([
                    src_ds,
                ],
                                               res=res,
                                               extent=extent,
                                               t_srs=t_srs)[0]

        args['cbar_kwargs'] = pltlib.cbar_kwargs
        if args['no_cbar']:
            args['cbar_kwargs'] = None

        nbands = src_ds.RasterCount
        b = src_ds.GetRasterBand(1)
        dt = gdal.GetDataTypeName(b.DataType)
        #Eventually, check dt of each band
        print("%s (%i bands)" % (fn, nbands))
        #Singleband raster
        if (nbands == 1):
            if args['cmap'] is None:
                #Special case to handle ASP float32 grayscale data
                if '-L_sub' in fn or '-R_sub' in fn:
                    args['cmap'] = 'gray'
                else:
                    if (dt == 'Float64') or (dt == 'Float32') or (dt
                                                                  == 'Int32'):
                        args['cmap'] = 'cpt_rainbow'
                    #This is for WV images
                    elif (dt == 'UInt16'):
                        args['cmap'] = 'gray'
                    elif (dt == 'Byte'):
                        args['cmap'] = 'gray'
                    else:
                        args['cmap'] = 'cpt_rainbow'
                """
                if 'count' in fn:
                    args['clim_perc'] = (0,100)
                    cbar_kwargs['extend'] = 'neither'
                    args['cmap'] = 'cpt_rainbow'
                if 'mask' in fn:
                    args['clim'] = (0, 1)
                    #Could be (0, 255)
                    #args['clim_perc'] = (0,100)
                    #Want absolute clim of 0, then perc of 100
                    cbar_kwargs['extend'] = 'neither'
                    args['cmap'] = 'gray'
                """
            bma = get_bma(src_ds, 1, args['full'])
            if args['invert']:
                bma *= -1
            #Note n+1 here ensures we're assigning subplot correctly here (n is 0-relative, subplot is 1)
            bma_fig(fig, bma, n_subplt=n_ax, subplt=n + 1, ds=src_ds, **args)
        #3-band raster, likely disparity map
        #This doesn't work when alpha band is present
        elif (nbands == 3) and (dt == 'Byte'):
            #For some reason, tifs are vertically flipped
            if (os.path.splitext(fn)[1] == '.tif'):
                args['imshow_kwargs']['origin'] = 'lower'
            #Use gdal dataset here instead of imread(fn)?
            imgplot = plt.imshow(plt.imread(fn), **args['imshow_kwargs'])
            pltlib.hide_ticks(imgplot.axes)
        #Handle the 3-band disparity map case here
        #elif ((dt == 'Float32') or (dt == 'Int32')):
        else:
            if args['cmap'] is None:
                args['cmap'] = 'cpt_rainbow'
            bn = 1
            while bn <= nbands:
                bma = get_bma(src_ds, bn, args['full'])
                bma_fig(fig,
                        bma,
                        n_subplt=nbands,
                        subplt=bn,
                        ds=src_ds,
                        **args)
                bn += 1
        #Want to be better about this else case - lazy for now
        #else:
        #    bma = get_bma(src_ds, 1, args['full'])
        #    bma_fig(fig, bma, **args)

        plt.tight_layout()

        #Write out the file
        #Note: make sure display is local for savefig
        if args['of']:
            outf = str(os.path.splitext(fn)[0]) + '_fig.' + args['of']
            #outf = str(os.path.splitext(fn)[0])+'_'+str(os.path.splitext(args['overlay'])[0])+'_fig.'+args['of']

            #Note: need to account for colorbar (12%) and title - some percentage of axes beyond bma dimensions
            #Should specify minimum text size for output

            max_size = np.array((10.0, 10.0))
            max_dpi = 300.0
            #If both outsize and dpi are specified, don't try to change, just make the figure
            if (args['outsize'] is None) and (args['dpi'] is None):
                args['dpi'] = 150.0

            #Unspecified out figure size for a given dpi
            if (args['outsize'] is None) and (args['dpi'] is not None):
                args['outsize'] = np.array(bma.shape[::-1]) / args['dpi']
                if np.any(np.array(args['outsize']) > max_size):
                    args['outsize'] = max_size
            #Specified output figure size, no specified dpi
            elif (args['outsize'] is not None) and (args['dpi'] is None):
                args['dpi'] = np.min([
                    np.max(
                        np.array(bma.shape[::-1]) / np.array(args['outsize'])),
                    max_dpi
                ])

            print()
            print("Saving output figure:")
            print("Filename: ", outf)
            print("Size (in): ", args['outsize'])
            print("DPI (px/in): ", args['dpi'])
            print("Input dimensions (px): ", bma.shape[::-1])
            print("Output dimensions (px): ",
                  tuple(np.array(args['outsize']) * args['dpi']))
            print()

            fig.set_size_inches(args['outsize'])
            #fig.set_size_inches(54.427, 71.87)
            #fig.set_size_inches(40, 87)
            fig.savefig(outf,
                        dpi=args['dpi'],
                        bbox_inches='tight',
                        pad_inches=0,
                        facecolor=fig.get_facecolor(),
                        edgecolor='none')
            #fig.savefig(outf, dpi=args['dpi'], facecolor=fig.get_facecolor(), edgecolor='none')
    #Show the plot - want to show all at once
    if not args['of']:
        plt.show()
Пример #14
0
def main():
    parser = getparser()
    #Create dictionary of arguments
    args = vars(parser.parse_args())
    
    #Want to enable -full when -of is specified, probably a fancy way to do this with argparse
    if args['of']:
        args['full'] = True

    #Note, imshow has many interpolation types:
    #'none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 
    #'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos'
    #{'interpolation':'bicubic', 'aspect':'auto'}
    #args['imshow_kwargs']={'interpolation':'bicubic'}
    args['imshow_kwargs']={'interpolation':'none'}

    if args['clipped'] and args['overlay'] is None:
        sys.exit("Must specify an overlay filename with option 'clipped'")

    #Set this as the background numpy array
    args['bg'] = None

    if args['shp'] is not None:
        print args['shp']

    if args['link']:
        fig = plt.figure(0)
        n_ax = len(args['filelist'])
        src_ds_list = [gdal.Open(fn) for fn in args['filelist']]
        t_srs = geolib.get_ds_srs(src_ds_list[0])
        res_stats = geolib.get_res_stats(src_ds_list, t_srs=t_srs)
        #Use min res
        res = res_stats[0]
        extent = geolib.ds_geom_union_extent(src_ds_list, t_srs=t_srs)
        #print res, extent

    for n,fn in enumerate(args['filelist']):

        if not iolib.fn_check(fn):
            print 'Unable to open input file: %s' % fn
            continue

        #Note: this won't work if img1 has 1 band and img2 has 3 bands
        #Hack for now
        if not args['link']:
            fig = plt.figure(n)
            n_ax = 1
        
        #fig.set_facecolor('black')
        fig.set_facecolor('white')
        fig.canvas.set_window_title(os.path.split(fn)[1])
        #fig.suptitle(os.path.split(fn)[1], fontsize=10)

        #Note: warplib SHOULD internally check to see if extent/resolution/projection are identical
        #This eliminates the need for a clipped flag
        #If user has already warped the background and source data 
        if args['overlay']:
            if args['clipped']: 
                src_ds = gdal.Open(fn, gdal.GA_ReadOnly)
                #Only load up the bg array once
                if args['bg'] is None:
                    #Need to check that background fn exists
                    print "%s background" % args['overlay']
                    bg_ds = gdal.Open(args['overlay'], gdal.GA_ReadOnly)
                    #Check image dimensions
                    args['bg'] = get_bma(bg_ds, 1, args['full'])
            else:
                #Clip/warp background dataset to match overlay dataset 
                #src_ds, bg_ds = warplib.memwarp_multi_fn([fn, args['overlay']], extent='union')
                src_ds, bg_ds = warplib.memwarp_multi_fn([fn, args['overlay']], extent='first')
                #src_ds, bg_ds = warplib.memwarp_multi_fn([fn, args['overlay']], res='min', extent='first')
                #Want to load up the unique bg array for each input
                args['bg'] = get_bma(bg_ds, 1, args['full'])
        else:
            src_ds = gdal.Open(fn, gdal.GA_ReadOnly)
            if args['link']:
                #Not sure why, but this still warps all linked ds, even when identical res/extent/srs
                #src_ds = warplib.warp(src_ds, res=res, extent=extent, t_srs=t_srs)
                src_ds = warplib.memwarp_multi([src_ds,], res=res, extent=extent, t_srs=t_srs)[0]

        cbar_kwargs={'extend':'both', 'orientation':'vertical', 'shrink':0.7, 'fraction':0.12, 'pad':0.02}

        nbands = src_ds.RasterCount
        b = src_ds.GetRasterBand(1)
        dt = gdal.GetDataTypeName(b.DataType)
        #Eventually, check dt of each band
        print 
        print "%s (%i bands)" % (fn, nbands)
        #Singleband raster
        if (nbands == 1):
            if args['cmap'] is None:
                #Special case to handle ASP float32 grayscale data
                if '-L_sub' in fn or '-R_sub' in fn:
                    args['cmap'] = 'gray'
                else:
                    if (dt == 'Float64') or (dt == 'Float32') or (dt == 'Int32'):
                        args['cmap'] = 'cpt_rainbow'
                    #This is for WV images
                    elif (dt == 'UInt16'):
                        args['cmap'] = 'gray'
                    elif (dt == 'Byte'):
                        args['cmap'] = 'gray'
                    else:
                        args['cmap'] = 'cpt_rainbow'
                """
                if 'count' in fn:
                    args['clim_perc'] = (0,100)
                    cbar_kwargs['extend'] = 'neither'
                    args['cmap'] = 'cpt_rainbow'
                if 'mask' in fn:
                    args['clim'] = (0, 1)
                    #Could be (0, 255)
                    #args['clim_perc'] = (0,100)
                    #Want absolute clim of 0, then perc of 100
                    cbar_kwargs['extend'] = 'neither'
                    args['cmap'] = 'gray'
                """
            args['cbar_kwargs'] = cbar_kwargs
            bma = get_bma(src_ds, 1, args['full'])   
            #Note n+1 here ensures we're assigning subplot correctly here (n is 0-relative, subplot is 1)
            bma_fig(fig, bma, n_subplt=n_ax, subplt=n+1, ds=src_ds, **args)
        #3-band raster, likely disparity map
        #This doesn't work when alpha band is present
        elif (nbands == 3) and (dt == 'Byte'):
            #For some reason, tifs are vertically flipped
            if (os.path.splitext(fn)[1] == '.tif'):
                args['imshow_kwargs']['origin'] = 'lower'
            #Use gdal dataset here instead of imread(fn)?
            imgplot = plt.imshow(plt.imread(fn), **args['imshow_kwargs'])
            pltlib.hide_ticks(imgplot.axes)
        #Handle the 3-band disparity map case here
        #elif ((dt == 'Float32') or (dt == 'Int32')):
        else: 
            if args['cmap'] is None:
                args['cmap'] = 'cpt_rainbow'
            bn = 1
            while bn <= nbands:
                bma = get_bma(src_ds, bn, args['full'])
                bma_fig(fig, bma, n_subplt=nbands, subplt=bn, ds=src_ds, **args)
                bn += 1
        #Want to be better about this else case - lazy for now
        #else:
        #    bma = get_bma(src_ds, 1, args['full'])
        #    bma_fig(fig, bma, **args)

        ts = timelib.fn_getdatetime_list(fn) 

        if ts:
            print "Timestamp list: ", ts

        """
        if len(ts) == 1:
            plt.title(ts[0].date())
        elif len(ts) == 2:
            plt.title("%s to %s" % (ts[0].date(), ts[1].date()))
        """
            
        plt.tight_layout()
        
        #Write out the file 
        #Note: make sure display is local for savefig
        if args['of']:
            outf = str(os.path.splitext(fn)[0])+'_fig.'+args['of'] 
            #outf = str(os.path.splitext(fn)[0])+'_'+str(os.path.splitext(args['overlay'])[0])+'_fig.'+args['of'] 

            #Note: need to account for colorbar (12%) and title - some percentage of axes beyond bma dimensions
            #Should specify minimum text size for output

            max_size = np.array((10.0,10.0))
            max_dpi = 300.0
            #If both outsize and dpi are specified, don't try to change, just make the figure
            if (args['outsize'] is None) and (args['dpi'] is None):
                args['dpi'] = 150.0

            #Unspecified out figure size for a given dpi
            if (args['outsize'] is None) and (args['dpi'] is not None):
                args['outsize'] = np.array(bma.shape[::-1])/args['dpi']
                if np.any(np.array(args['outsize']) > max_size):
                    args['outsize'] = max_size
            #Specified output figure size, no specified dpi 
            elif (args['outsize'] is not None) and (args['dpi'] is None):
                args['dpi'] = np.min([np.max(np.array(bma.shape[::-1])/np.array(args['outsize'])), max_dpi])
                
            print
            print "Saving output figure:"
            print "Filename: ", outf
            print "Size (in): ", args['outsize']
            print "DPI (px/in): ", args['dpi']
            print "Input dimensions (px): ", bma.shape[::-1]
            print "Output dimensions (px): ", tuple(np.array(args['outsize'])*args['dpi'])
            print

            fig.set_size_inches(args['outsize'])
            #fig.set_size_inches(54.427, 71.87)
            #fig.set_size_inches(40, 87)
            fig.savefig(outf, dpi=args['dpi'], bbox_inches='tight', pad_inches=0, facecolor=fig.get_facecolor(), edgecolor='none')
    #Show the plot - want to show all at once
    if not args['of']: 
        plt.show()
Пример #15
0
def main():
    parser = getparser()
    args = parser.parse_args()
    refdem = args.refdem
    srcdem = args.srcdem
    outfolder = '{}__{}_comparison_stats'.format(
        os.path.splitext(os.path.basename(refdem))[0],
        os.path.splitext(os.path.basename(srcdem))[0])
    header_str = '{}__{}'.format(
        os.path.splitext(os.path.basename(refdem))[0],
        os.path.splitext(os.path.basename(srcdem))[0])
    if not os.path.exists(outfolder):
        os.makedirs(outfolder)
    if args.local_ortho == 1:
        temp_ds = warplib.memwarp_multi_fn([refdem, srcdem])[0]
        bbox = geolib.ds_extent(temp_ds)
        geo_crs = temp_ds.GetProjection()
        print('Bounding box lon_lat is{}'.format(bbox))
        bound_poly = Polygon([[bbox[0], bbox[3]], [bbox[2], bbox[3]],
                              [bbox[2], bbox[1]], [bbox[0], bbox[1]]])
        bound_shp = gpd.GeoDataFrame(index=[0],
                                     geometry=[bound_poly],
                                     crs=geo_crs)
        bound_centroid = bound_shp.centroid
        cx = bound_centroid.x.values[0]
        cy = bound_centroid.y.values[0]
        pad = np.ptp([bbox[3], bbox[1]]) / 6.0
        lat_1 = bbox[1] + pad
        lat_2 = bbox[3] - pad
        local_ortho = "+proj=ortho +lat_1={} +lat_2={} +lat_0={} +lon_0={} +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs".format(
            lat_1, lat_2, cy, cx)
        logging.info('Local Ortho projection is {}'.format(local_ortho))
        t_srs = local_ortho
    else:
        t_srs = 'first'
    # this step performs the desired warping operation
    ds_list = warplib.memwarp_multi_fn([refdem, srcdem],
                                       res=args.comparison_res,
                                       t_srs=t_srs)
    refma = iolib.ds_getma(ds_list[0])
    srcma = iolib.ds_getma(ds_list[1])
    init_diff = refma - srcma
    init_stats = malib.get_stats_dict(init_diff)
    print("Original descriptive statistics {}".format(init_stats))
    init_diff_json_fn = os.path.join(
        outfolder, '{}_precoreg_descriptive_stats.json'.format(header_str))
    init_diff_json = json.dumps(init_stats)

    with open(init_diff_json_fn, 'w') as f:
        f.write(init_diff_json)
    logging.info("Saved initial stats at {}".format(init_diff_json))
    refslope = gdaldem(ds_list[0])
    # stats for elevation difference vs reference DEM elevation
    elev_bin, diff_mean, diff_median, diff_std, diff_perc = cummulative_profile(
        refma, init_diff, args.elev_bin_width)
    # stats for elevation difference vs reference DEM slope
    slope_bin, diff_mean_s, diff_median_s, diff_std_s, diff_perc_s = cummulative_profile(
        refslope, init_diff, args.slope_bin_width)
    f, ax = plt.subplots(1, 2, figsize=(10, 4))
    im = ax[0].scatter(elev_bin, diff_mean, c=diff_perc, cmap='inferno')
    ax[0].set_xlabel('Elevation (m)')
    divider = make_axes_locatable(ax[0])
    cax = divider.append_axes('right', size='2.5%', pad=0.05)
    plt.colorbar(im,
                 cax=cax,
                 orientation='vertical',
                 label='pixel count percentage')
    im2 = ax[1].scatter(slope_bin, diff_mean_s, c=diff_perc_s, cmap='inferno')
    ax[1].set_xlabel('Slope (degrees)')
    divider = make_axes_locatable(ax[1])
    cax = divider.append_axes('right', size='2.5%', pad=0.05)
    plt.colorbar(im2,
                 cax=cax,
                 orientation='vertical',
                 label='pixel count percentage')

    for axa in ax.ravel():
        axa.axhline(y=0, c='k')
        axa.set_ylabel('Elevation Difference (m)')
    plt.tight_layout()
    precoreg_plot = os.path.join(outfolder,
                                 header_str + '_precoreg_binned_plot.png')
    f.savefig(precoreg_plot, dpi=300, bbox_inches='tight', pad_inches=0.1)
    logging.info("Saved binned plot at {}".format(precoreg_plot))
    if args.coreg == 1:
        logging.info("will attempt coregisteration")
        if args.local_ortho == 1:
            ref_local_ortho = os.path.splitext(refdem)[0] + '_local_ortho.tif'
            src_local_ortho = os.path.splitext(srcdem)[0] + '_local_ortho.tif'
            # coregisteration works best at mean resolution
            # we will rewarp if the initial args.res was not mean
            if args.comparison_res != 'mean':
                ds_list = warplib.memwarp_multi_fn([refdem, srcdem],
                                                   res='mean',
                                                   t_srs=t_srs)
                refma = iolib.ds_getma(ds_list[0])
                srcma = iolib.ds_getma(ds_list[1])
            iolib.writeGTiff(refma, ref_local_ortho, ds_list[0])
            iolib.writeGTiff(srcma, src_local_ortho, ds_list[1])
            coreg_ref = ref_local_ortho
            src_ref = src_local_ortho
        else:
            coreg_ref = refdem
            src_ref = srcdem
        demcoreg_dir = os.path.join(outfolder, 'coreg_results')
        align_opts = [
            '-mode', 'nuth', '-max_iter', '12', '-max_offset', '400',
            '-outdir', demcoreg_dir
        ]
        align_args = [coreg_ref, src_ref]
        align_cmd = ['dem_align.py'] + align_opts + align_args
        subprocess.call(align_cmd)
        #ah final round of warping and stats calculation
        try:
            srcdem_align = glob.glob(os.path.join(demcoreg_dir,
                                                  '*align.tif'))[0]
            logging.info(
                "Attempting stats calculation for aligned DEM {}".format(
                    srcdem_align))
            ds_list = warplib.memwarp_multi_fn([args.refdem, srcdem_align],
                                               res=args.comparison_res,
                                               t_srs=t_srs)
            refma = iolib.ds_getma(ds_list[0])
            srcma = iolib.ds_getma(ds_list[1])
            # this is creepy, but I am recycling variable names to save on memory
            init_diff = refma - srcma
            init_stats = malib.get_stats_dict(init_diff)
            print("Final descriptive statistics {}".format(init_stats))
            init_diff_json_fn = os.path.join(
                outfolder,
                '{}_postcoreg_descriptive_stats.json'.format(header_str))
            init_diff_json = json.dumps(init_stats)

            with open(init_diff_json_fn, 'w') as f:
                f.write(init_diff_json)
            logging.info("Saved final stats at {}".format(init_diff_json))
            refslope = gdaldem(ds_list[0])
            # stats for elevation difference vs reference DEM elevation
            elev_bin, diff_mean, diff_median, diff_std, diff_perc = cummulative_profile(
                refma, init_diff, args.elev_bin_width)
            # stats for elevation difference vs reference DEM slope
            slope_bin, diff_mean_s, diff_median_s, diff_std_s, diff_perc_s = cummulative_profile(
                refslope, init_diff, args.slope_bin_width)
            f, ax = plt.subplots(1, 2, figsize=(10, 4))
            im = ax[0].scatter(elev_bin,
                               diff_mean,
                               c=diff_perc,
                               cmap='inferno')
            ax[0].set_xlabel('Elevation (m)')
            divider = make_axes_locatable(ax[0])
            cax = divider.append_axes('right', size='2.5%', pad=0.05)
            plt.colorbar(im,
                         cax=cax,
                         orientation='vertical',
                         label='pixel count percentage')
            im2 = ax[1].scatter(slope_bin,
                                diff_mean_s,
                                c=diff_perc_s,
                                cmap='inferno')
            ax[1].set_xlabel('Slope (degrees)')
            divider = make_axes_locatable(ax[1])
            cax = divider.append_axes('right', size='2.5%', pad=0.05)
            plt.colorbar(im2,
                         cax=cax,
                         orientation='vertical',
                         label='pixel count percentage')

            for axa in ax.ravel():
                axa.axhline(y=0, c='k')
                axa.set_ylabel('Elevation Difference (m)')
            plt.tight_layout()
            precoreg_plot = os.path.join(
                outfolder, header_str + '_postcoreg_binned_plot.png')
            f.savefig(precoreg_plot,
                      dpi=300,
                      bbox_inches='tight',
                      pad_inches=0.1)
        except:
            logging.info(
                "Failed to compute post coreg stats, see corresponding job log"
            )
        logging.info("Script is complete !")
Пример #16
0
Файл: vmap.py Проект: whigg/vmap
def main():
    parser = getparser()
    args = parser.parse_args()
    if args.seedmode == 'existing_velocity':
        if args.vx_fn is None or args.vy_fn is None:
            parser.error('"-seedmode existing_velocity" requires "-vx_fn" and "-vy_fn"')

    print('\n%s' % datetime.now())
    print('%s UTC\n' % datetime.utcnow())

    align = args.align
    seedmode = args.seedmode
    spr = args.refinement
    erode = args.erode
    #Correlator tile timeout
    #With proper seeding, correlation should be very fast
    #timeout = 360 
    timeout = 1200 
    threads = args.threads

    kernel = (args.kernel, args.kernel)
    #SGM correlator
    if spr > 3:
        #kernel = (7,7)
        kernel = (11,11)
        erode = 0

    #Smooth the output F.tif 
    smoothF = args.filter 

    res = args.tr
    #Resample input to something easier to work with
    #res = 4.0

    #Open input files
    fn1 = args.fn1
    fn2 = args.fn2 

    if not iolib.fn_check(fn1) or not iolib.fn_check(fn2):
        sys.exit("Unable to locate input files")

    if args.outdir is not None:
        outdir = args.outdir
    else:
        outdir = '%s__%s_vmap_%sm_%ipx_spm%i' % (os.path.splitext(os.path.split(fn1)[1])[0], \
                os.path.splitext(os.path.split(fn2)[1])[0], res, kernel[0], spr)

    #Note, can encounter filename length issues in boost, just use vmap prefix
    outprefix = '%s/vmap' % (outdir)
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    #Check to see if inputs have geolocation and projection information
    ds1 = iolib.fn_getds(fn1)
    ds2 = iolib.fn_getds(fn2)

    if geolib.srs_check(ds1) and geolib.srs_check(ds2):
        ds1_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn1))[0]+'_warp.tif')
        ds2_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn2))[0]+'_warp.tif')

        if not os.path.exists(ds1_clip_fn) or not os.path.exists(ds2_clip_fn):
            #This should write out files to new subdir
            ds1_clip, ds2_clip = warplib.diskwarp_multi_fn([fn1, fn2], extent='intersection', res=res, r='average', outdir=outdir)
            ds1_clip = None
            ds2_clip = None
            #However, if inputs have identical extent/res/proj, then link to original files
            if not os.path.exists(ds1_clip_fn):
                os.symlink(os.path.abspath(fn1), ds1_clip_fn)
            if not os.path.exists(ds2_clip_fn):
                os.symlink(os.path.abspath(fn2), ds2_clip_fn)
            align = 'None'

        #Mask support - limit correlation only to rock/ice surfaces, no water/veg
        #This masks input images - guarantee we won't waste time correlating over vegetation
        #TODO: Add support to load arbitrary raster or shp mask
        if args.mask_input:
            ds1_masked_fn = os.path.splitext(ds1_clip_fn)[0]+'_masked.tif'
            ds2_masked_fn = os.path.splitext(ds2_clip_fn)[0]+'_masked.tif'

            if not os.path.exists(ds1_masked_fn) or not os.path.exists(ds2_masked_fn):
                #Load NLCD or bareground mask
                from demcoreg.dem_mask import get_lulc_mask

                ds1_clip = iolib.fn_getds(ds1_clip_fn)
                lulc_mask_fn = os.path.join(outdir, 'lulc_mask.tif')
                #if not os.path.exists(nlcd_mask_fn):
                lulc_mask = get_lulc_mask(ds1_clip, mask_glaciers=False, filter='not_forest')
                iolib.writeGTiff(lulc_mask, lulc_mask_fn, ds1_clip) 
                ds1_clip = None

                #Now apply to original images 
                #This could be problematic for huge inputs, see apply_mask.py
                #lulc_mask = lulc_mask.astype(int)
                for fn in (ds1_clip_fn, ds2_clip_fn):
                    ds = iolib.fn_getds(fn)
                    a = iolib.ds_getma(ds)
                    a = np.ma.array(a, mask=~(lulc_mask))
                    if a.count() > 0:
                        out_fn = os.path.splitext(fn)[0]+'_masked.tif'
                        iolib.writeGTiff(a,out_fn,ds)
                        a = None
                    else:
                        sys.exit("No unmasked pixels over bare earth")
            ds1_clip_fn = ds1_masked_fn
            ds2_clip_fn = ds2_masked_fn
    else:
        ds1_clip_fn = fn1
        ds2_clip_fn = fn2
        #Now let user specify alignment methods as option - don't hardcode
        #align = 'Homography'
        #align = 'AffineEpipolar'
    ds1 = None
    ds2 = None

    #Should have extra kwargs option here
    stereo_opt = get_stereo_opt(threads=threads, kernel=kernel, timeout=timeout, \
            erode=erode, spr=spr, align=align)
    
    #Stereo arguments
    #Latest version of ASP should accept tif without camera models
    #stereo_args = [ds1_clip_fn, ds2_clip_fn, outprefix]
    #Nope - still need to provide dummy camera models, and they must be unique files
    #Use the dummy.tsai file bundled in the vmap repo
    dummy_tsai = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'dummy.tsai')
    dummy_tsai2 = os.path.splitext(dummy_tsai)[0]+'2.tsai'
    if not os.path.exists(dummy_tsai2):
        dummy_tsai2 = os.symlink(dummy_tsai, os.path.splitext(dummy_tsai)[0]+'2.tsai')
    stereo_args = [ds1_clip_fn, ds2_clip_fn, dummy_tsai, dummy_tsai2, outprefix]

    #Run stereo_pprc
    if not os.path.exists(outprefix+'-R_sub.tif'):
        run_cmd('stereo_pprc', stereo_opt+stereo_args, msg='0: Preprocessing')
        #Copy proj info to outputs, this should happen automatically now?
        for ext in ('L', 'R', 'L_sub', 'R_sub', 'lMask', 'rMask', 'lMask_sub', 'rMask_sub'):
            geolib.copyproj(ds1_clip_fn, '%s-%s.tif' % (outprefix,ext))

    #Prepare seeding for stereo_corr
    #TODO: these are untested after refactoring
    if not os.path.exists(outprefix+'_D_sub.tif'):
        #Don't need to do anything for default seed-mode 1
        if seedmode == 'sparse_disp':
            #Sparse correlation of full-res images
            stereo_opt.extend(['--corr-seed-mode', '3'])
            sparse_disp_opt = []
            sparse_disp_opt.extend(['--Debug', '--coarse', '512', '--fine', '256', '--no_epipolar_fltr']) 
            sparse_disp_opt.extend(['-P', str(threads)])
            sparse_disp_args = [outprefix+'-L.tif', outprefix+'-R.tif', outprefix]
            run_cmd('sparse_disp', sparse_disp_opt+sparse_disp_args, msg='0.5: D_sub generation')
        elif seedmode == 'existing_velocity':
            #User-input low-res velocity maps for seeding
            #TODO: Add functions that fetch best available velocities for Ant/GrIS or user-defined low-res velocities
            #Automatically query GoLive velocities here
            vx_fn = args.vx_fn 
            vy_fn = args.vy_fn 
            #Check for existence

            #HMA seeding
            vdir = '/nobackup/deshean/rpcdem/hma/velocity_jpl_amaury_2013-2015'
            vx_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.x_vel.TIF')
            vy_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.y_vel.TIF')

            if os.path.exists(vx_fn) and os.path.exists(vy_fn):
                ds1_clip = iolib.fn_getds(ds1_clip_fn)
                ds1_res = geolib.get_res(ds1_clip, square=True)[0]

                #Compute L_sub res - use this for output dimensions
                L_sub_fn = outprefix+'-L_sub.tif' 
                L_sub_ds = gdal.Open(L_sub_fn)
                L_sub_x_scale = float(ds1_clip.RasterXSize) / L_sub_ds.RasterXSize
                L_sub_y_scale = float(ds1_clip.RasterYSize) / L_sub_ds.RasterYSize
                L_sub_scale = np.max([L_sub_x_scale, L_sub_y_scale])
                L_sub_res = ds1_res * L_sub_scale

                #Since we are likely upsampling here, use cubicspline
                vx_ds_clip, vy_ds_clip = warplib.memwarp_multi_fn([vx_fn, vy_fn], extent=ds1_clip, \
                        t_srs=ds1_clip, res=L_sub_res, r='cubicspline')

                ds1_clip = None

                #Get vx and vy arrays
                vx = iolib.ds_getma(vx_ds_clip)
                vy = iolib.ds_getma(vy_ds_clip)

                #Determine time interval between inputs
                #Use to scaling of known low-res velocities
                t_factor = get_t_factor_fn(ds1_clip_fn, ds2_clip_fn, ds=vx_ds_clip)

                if t_factor is not None:
                    #Compute expected offset in scaled pixels 
                    dx = (vx*t_factor)/L_sub_res
                    dy = (vy*t_factor)/L_sub_res
                    #Note: Joughin and Rignot's values are positive y up!
                    #ASP is positive y down, so need to multiply these values by -1
                    #dy = -(vy*t_factor)/L_sub_res

                    #Should smooth/fill dx and dy

                    #If absolute search window is only 30x30
                    #Don't seed, just use fixed search window 
                    #search_window_area_thresh = 900
                    search_window_area_thresh = 0 
                    search_window = np.array([dx.min(), dy.min(), dx.max(), dy.max()])
                    dx_p = calcperc(dx, perc=(0.5, 99.5))
                    dy_p = calcperc(dy, perc=(0.5, 99.5))
                    search_window = np.array([dx_p[0], dy_p[0], dx_p[1], dy_p[1]])
                    search_window_area = (search_window[2]-search_window[0]) * (search_window[3]-search_window[1])
                    if search_window_area < search_window_area_thresh:
                        stereo_opt.extend(['--corr-seed-mode', '0'])
                        stereo_opt.append('--corr-search')
                        stereo_opt.extend([str(x) for x in search_window])
                        #pad_perc=0.1
                        #stereo_opt.extend(['--corr-sub-seed-percent', str(pad_perc)]
                    #Otherwise, generate a D_sub map from low-res velocity
                    else:
                        stereo_opt.extend(['--corr-seed-mode', '3'])
                        #This is relative to the D_sub scaled disparities
                        d_sub_fn = L_sub_fn.split('-L_sub')[0]+'-D_sub.tif' 
                        gen_d_sub(d_sub_fn, dx, dy)

    #If the above didn't generate a D_sub.tif for seeding, run stereo_corr to generate Low-res D_sub.tif
    if not os.path.exists(outprefix+'-D_sub.tif'):
        newopt = ['--compute-low-res-disparity-only',]
        run_cmd('stereo_corr', newopt+stereo_opt+stereo_args, msg='1.1: Low-res Correlation')
    #Copy projection info to D_sub
    geolib.copyproj(outprefix+'-L_sub.tif', outprefix+'-D_sub.tif')
      
    #Mask D_sub to limit correlation over bare earth surfaces
    #This _should_ be a better approach than masking input images, but stereo_corr doesn't honor D_sub
    #Still need to mask input images before stereo_pprc
    #Left this in here for reference, or if this changes in ASP
    if False:
        D_sub_ds = gdal.Open(outprefix+'-D_sub.tif', gdal.GA_Update)

        #Mask support - limit correlation only to rock/ice surfaces, no water/veg
        from demcoreg.dem_mask import get_nlcd, mask_nlcd
        nlcd_fn = get_nlcd()
        nlcd_ds = warplib.diskwarp_multi_fn([nlcd_fn,], extent=D_sub_ds, res=D_sub_ds, t_srs=D_sub_ds, r='near', outdir=outdir)[0]
        #validmask = mask_nlcd(nlcd_ds, valid='rock+ice')
        validmask = mask_nlcd(nlcd_ds, valid='not_forest', mask_glaciers=False)
        nlcd_mask_fn = os.path.join(outdir, 'nlcd_validmask.tif')
        iolib.writeGTiff(validmask, nlcd_mask_fn, nlcd_ds) 

        #Now apply to D_sub (band 3 is valid mask)
        #validmask = validmask.astype(int)
        for b in (1,2,3):
            dsub = iolib.ds_getma(D_sub_ds, b)
            dsub = np.ma.array(dsub, mask=~(validmask))
            D_sub_ds.GetRasterBand(b).WriteArray(dsub.filled())
        D_sub_ds = None

    #OK, finally run stereo_corr full-res integer correlation with appropriate seeding
    if not os.path.exists(outprefix+'-D.tif'):
        run_cmd('stereo_corr', stereo_opt+stereo_args, msg='1: Correlation')
        geolib.copyproj(ds1_clip_fn, outprefix+'-D.tif')

    #Run stereo_rfne
    if spr > 0:
        if not os.path.exists(outprefix+'-RD.tif'):
            run_cmd('stereo_rfne', stereo_opt+stereo_args, msg='2: Refinement')
            geolib.copyproj(ds1_clip_fn, outprefix+'-RD.tif')
        d_fn = make_ln(outdir, outprefix, '-RD.tif')
    else:
        ln_fn = outprefix+'-RD.tif'
        if os.path.lexists(ln_fn):
            os.remove(ln_fn)
        os.symlink(os.path.split(outprefix)[1]+'-D.tif', ln_fn)

    #Run stereo_fltr
    if not os.path.exists(outprefix+'-F.tif'):
        run_cmd('stereo_fltr', stereo_opt+stereo_args, msg='3: Filtering')
        geolib.copyproj(ds1_clip_fn, outprefix+'-F.tif')

    d_fn = make_ln(outdir, outprefix, '-F.tif')

    if smoothF and not os.path.exists(outprefix+'-F_smooth.tif'):
        print('Smoothing F.tif')
        from pygeotools.lib import filtlib 
        #Fill holes and smooth F
        F_fill_fn = outprefix+'-F_smooth.tif'
        F_ds = gdal.Open(outprefix+'-F.tif', gdal.GA_ReadOnly)
        #import dem_downsample_fill
        #F_fill_ds = dem_downsample_fill.gdalfill_ds(F_fill_ds)
        print('Creating F_smooth.tif')
        F_fill_ds = iolib.gtif_drv.CreateCopy(F_fill_fn, F_ds, 0, options=iolib.gdal_opt)
        F_ds = None
        for n in (1, 2):
            print('Smoothing band %i' % n)
            b = F_fill_ds.GetRasterBand(n)
            b_fill_bma = iolib.b_getma(b)
            #b_fill_bma = iolib.b_getma(dem_downsample_fill.gdalfill(b))
            #Filter extreme values (careful, could lose areas of valid data with fastest v)
            #b_fill_bma = filtlib.perc_fltr(b_fill_bma, perc=(0.01, 99.99))
            #These filters remove extreme values and fill data gaps
            #b_fill_bma = filtlib.median_fltr_skimage(b_fill_bma, radius=7, erode=0)
            #b_fill_bma = filtlib.median_fltr(b_fill_bma, fsize=7, origmask=True)
            #Gaussian filter
            b_fill_bma = filtlib.gauss_fltr_astropy(b_fill_bma, size=9)
            b.WriteArray(b_fill_bma)
        F_fill_ds = None
        d_fn = make_ln(outdir, outprefix, '-F_smooth.tif')

    print('\n%s' % datetime.now())
    print('%s UTC\n' % datetime.utcnow())

    #If time interval is specified, convert pixel displacements to rates
    if args.dt != 'none':
        #Check if vm.tif already exists
        #Should probably just overwrite by default
        #if os.path.exists(os.path.splitext(d_fn)[0]+'_vm.tif'):
        #    print("\nFound existing velocity magnitude map!\n"
        #else:
        #Generate output velocity products and figure
        #Requires that vmap repo is in PATH
        cmd = ['disp2v.py', d_fn]
        #Note: this will attempt to automatically determine control surfaces
        #disp2v.py will accept arbitrary mask, could pass through here
        if args.remove_offsets:
            cmd.append('-remove_offsets')
        cmd.extend(['-dt', args.dt])
        print("Converting disparities to velocities")
        print(cmd)
        subprocess.call(cmd)
Пример #17
0
def main():
    parser = getparser()
    args = parser.parse_args()

    src_fn = args.src_fn
    if not iolib.fn_check(src_fn):
        sys.exit("Unable to find src_fn: %s" % src_fn)

    mask_fn = args.mask_fn
    if not iolib.fn_check(mask_fn):
        sys.exit("Unable to find mask_fn: %s" % mask_fn)

    #Determine output extent, default is input raster extent
    extent = args.extent
    if extent == 'raster':
        extent = src_fn
    elif extent == 'mask':
        extent = mask_fn
    else:
        #This is a hack for intersection computation
        src_ds_list = [
            gdal.Open(fn, gdal.GA_ReadOnly) for fn in [src_fn, mask_fn]
        ]
        #t_srs = geolib.get_ds_srs(src_ds_list[0])
        extent = warplib.parse_extent(extent, src_ds_list, src_fn)

    print("Warping mask_fn")
    mask_ds = warplib.memwarp_multi_fn([
        mask_fn,
    ],
                                       res=src_fn,
                                       extent=extent,
                                       t_srs=src_fn)[0]

    print("Loading mask array")
    mask_ma_full = iolib.ds_getma(mask_ds)
    mask_ds = None

    print("Extracting mask")
    mask = np.ma.getmaskarray(mask_ma_full)

    #Free up memory
    mask_ma_full = None

    #Add dilation step for buffer

    #newmask = np.logical_or(np.ma.getmaskarray(src_ma_full), mask)

    if args.invert:
        print("Inverting mask")
        mask = ~(mask)

    print("Loading src array and applying updated mask")
    if extent == src_fn:
        src_ds = gdal.Open(src_fn)
    else:
        src_ds = warplib.memwarp_multi_fn([
            src_fn,
        ],
                                          res=src_fn,
                                          extent=extent,
                                          t_srs=src_fn)[0]

    #Now load source array with new mask
    src_ma_full = np.ma.array(iolib.ds_getma(src_ds), mask=mask)
    mask = None

    if args.out_fn is not None:
        src_fn_masked = args.out_fn
    else:
        src_fn_masked = os.path.splitext(src_fn)[0] + '_masked.tif'
    print("Writing out masked version of input raster: %s" % src_fn_masked)
    iolib.writeGTiff(src_ma_full, src_fn_masked, src_ds, create=True)
Пример #18
0
def mb_calc(gf, z1_date=z1_date, z2_date=z2_date, verbose=verbose):
    #print("\n%i of %i: %s\n" % (n+1, len(glacfeat_list), gf.feat_fn))
    print(gf.feat_fn)

    #This should already be handled by earlier attribute filter, but RGI area could be wrong
    #24k shp has area in m^2, RGI in km^2
    #if gf.glac_area/1E6 < min_glac_area:
    if gf.glac_area < min_glac_area:
        if verbose:
            print("Glacier area below %0.1f km2 threshold" % min_glac_area)
        return None

    #Warp everything to common res/extent/proj
    ds_list = warplib.memwarp_multi_fn([z1_fn, z2_fn], res='min', \
            extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose)

    if site == 'conus':
        #Add prism datasets
        prism_fn_list = [prism_ppt_annual_fn, prism_tmean_annual_fn]
        prism_fn_list.extend([
            prism_ppt_summer_fn, prism_ppt_winter_fn, prism_tmean_summer_fn,
            prism_tmean_winter_fn
        ])
        ds_list.extend(warplib.memwarp_multi_fn(prism_fn_list, res=ds_list[0], \
                extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose))

    if site == 'hma':
        #Add debris cover datasets
        #Should tar this up, and extract only necessary file
        #Downloaded from: http://mountainhydrology.org/data-nature-2017/
        kra_nature_dir = '/nobackup/deshean/data/Kraaijenbrink_hma/regions/out'
        #This assumes that numbers are identical between RGI50 and RGI60
        debris_class_fn = os.path.join(
            kra_nature_dir, 'RGI50-%s/classification.tif' % gf.glacnum)
        debris_thick_fn = os.path.join(
            kra_nature_dir, 'RGI50-%s/debris-thickness-50cm.tif' % gf.glacnum)
        ice_thick_fn = os.path.join(kra_nature_dir,
                                    'RGI50-%s/ice-thickness.tif' % gf.glacnum)
        hma_fn_list = []
        if os.path.exists(debris_class_fn):
            hma_fn_list.append(debris_class_fn)
        if os.path.exists(debris_thick_fn):
            hma_fn_list.append(debris_thick_fn)
        if os.path.exists(ice_thick_fn):
            hma_fn_list.append(ice_thick_fn)
        if len(hma_fn_list) > 0:
            #Add velocity
            hma_fn_list.extend([vx_fn, vy_fn])
            ds_list.extend(warplib.memwarp_multi_fn(hma_fn_list, res=ds_list[0], \
                    extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose))

    #Check to see if z2 is empty, as z1 should be continuous
    gf.z2 = iolib.ds_getma(ds_list[1])
    if gf.z2.count() == 0:
        if verbose:
            print("No z2 pixels")
        return None

    glac_geom_mask = geolib.geom2mask(gf.glac_geom, ds_list[0])
    gf.z1 = np.ma.array(iolib.ds_getma(ds_list[0]), mask=glac_geom_mask)
    #Apply SRTM penetration correction
    if z1_srtm_penetration_corr:
        gf.z1 = srtm_corr(gf.z1)
    if z2_srtm_penetration_corr:
        gf.z2 = srtm_corr(gf.z2)
    gf.z2 = np.ma.array(gf.z2, mask=glac_geom_mask)
    gf.dz = gf.z2 - gf.z1
    if gf.dz.count() == 0:
        if verbose:
            print("No valid dz pixels")
        return None

    #Should add better filtering here
    #Elevation dependent abs. threshold filter?

    filter_outliers = True
    #Remove clearly bogus pixels
    if filter_outliers:
        bad_perc = (0.1, 99.9)
        #bad_perc = (1, 99)
        rangelim = malib.calcperc(gf.dz, bad_perc)
        gf.dz = np.ma.masked_outside(gf.dz, *rangelim)

    gf.res = geolib.get_res(ds_list[0])
    valid_area = gf.dz.count() * gf.res[0] * gf.res[1]
    valid_area_perc = valid_area / gf.glac_area
    if valid_area_perc < min_valid_area_perc:
        if verbose:
            print(
                "Not enough valid pixels. %0.1f%% percent of glacier polygon area"
                % (100 * valid_area_perc))
        return None

    #Filter dz - throw out abs differences >150 m

    #Compute dz, volume change, mass balance and stats
    gf.z1_stats = malib.get_stats(gf.z1)
    gf.z2_stats = malib.get_stats(gf.z2)
    z2_elev_med = gf.z2_stats[5]
    z2_elev_p16 = gf.z2_stats[11]
    z2_elev_p84 = gf.z2_stats[12]

    #Caluclate stats for aspect and slope using z2
    #Requires GDAL 2.1+
    gf.z2_aspect = np.ma.array(geolib.gdaldem_mem_ds(ds_list[1],
                                                     processing='aspect',
                                                     returnma=True),
                               mask=glac_geom_mask)
    gf.z2_aspect_stats = malib.get_stats(gf.z2_aspect)
    z2_aspect_med = gf.z2_aspect_stats[5]
    gf.z2_slope = np.ma.array(geolib.gdaldem_mem_ds(ds_list[1],
                                                    processing='slope',
                                                    returnma=True),
                              mask=glac_geom_mask)
    gf.z2_slope_stats = malib.get_stats(gf.z2_slope)
    z2_slope_med = gf.z2_slope_stats[5]

    #Rasterize source dates
    if z1_date is None:
        z1_date = get_date_a(ds_list[0], z1_date_shp_lyr, glac_geom_mask,
                             z1_datefield)
        gf.t1 = z1_date.mean()
    else:
        gf.t1 = z1_date

    if z2_date is None:
        z2_date = get_date_a(ds_list[0], z2_date_shp_lyr, glac_geom_mask,
                             z2_datefield)
        #Attempt to use YYYYMMDD string
        #z2_dta = np.datetime64(z2_date.astype("S8").tolist())
        gf.t2 = z2_date.mean()
    else:
        gf.t2 = z2_date

    if isinstance(gf.t1, datetime):
        gf.t1 = timelib.dt2decyear(gf.t1)

    if isinstance(gf.t2, datetime):
        gf.t2 = timelib.dt2decyear(gf.t2)

    gf.t1 = float(gf.t1)
    gf.t2 = float(gf.t2)

    #Calculate dt grids
    #gf.dt = z2_date - z1_date
    #gf.dt = gf.dt.mean()
    #This should be decimal years
    gf.dt = gf.t2 - gf.t1
    #if isinstance(gf.dt, timedelta):
    #    gf.dt = gf.dt.total_seconds()/timelib.spy
    #Calculate dh/dt, in m/yr
    gf.dhdt = gf.dz / gf.dt
    gf.dhdt_stats = malib.get_stats(gf.dhdt)
    dhdt_mean = gf.dhdt_stats[3]
    dhdt_med = gf.dhdt_stats[5]

    rho_i = 0.91
    rho_s = 0.50
    rho_f = 0.60

    #This is recommendation by Huss et al (2013)
    rho_is = 0.85
    rho_sigma = 0.06

    #Can estimate ELA values computed from hypsometry and typical AAR
    #For now, assume ELA is mean
    gf.z1_ela = None
    gf.z1_ela = gf.z1_stats[3]
    gf.z2_ela = gf.z2_stats[3]
    #Note: in theory, the ELA should get higher with mass loss
    #In practice, using mean and same polygon, ELA gets lower as glacier surface thins
    if verbose:
        print("ELA(t1): %0.1f" % gf.z1_ela)
        print("ELA(t2): %0.1f" % gf.z2_ela)

    if gf.z1_ela > gf.z2_ela:
        min_ela = gf.z2_ela
        max_ela = gf.z1_ela
    else:
        min_ela = gf.z1_ela
        max_ela = gf.z2_ela

    #Calculate mass balance map from dhdt
    gf.mb = gf.dhdt * rho_is
    """
    # This attempted to assign different densities above and below ELA
    if gf.z1_ela is None:
        gf.mb = gf.dhdt * rho_is
    else:
        #Initiate with average density
        gf.mb = gf.dhdt*(rho_is + rho_f)/2.
        #Everything that is above ELA at t2 is elevation change over firn, use firn density
        accum_mask = (gf.z2 > gf.z2_ela).filled(0).astype(bool)
        gf.mb[accum_mask] = (gf.dhdt*rho_f)[accum_mask]
        #Everything that is below ELA at t1 is elevation change over ice, use ice density
        abl_mask = (gf.z1 <= gf.z1_ela).filled(0).astype(bool)
        gf.mb[abl_mask] = (gf.dhdt*rho_is)[abl_mask]
        #Everything in between, use average of ice and firn density
        #mb[(z1 > z1_ela) || (z2 <= z2_ela)] = dhdt*(rhois + rho_f)/2.
        #Linear ramp
        #rho_f + z2*((rho_is - rho_f)/(z2_ela - z1_ela))
        #mb = np.where(dhdt < ela, dhdt*rho_i, dhdt*rho_s)
    """

    #Use this for winter balance
    #mb = dhdt * rho_s

    gf.mb_stats = malib.get_stats(gf.mb)
    gf.mb_mean = gf.mb_stats[3]

    #Calculate uncertainty of total elevation change
    #TODO: Better spatial distribution characterization
    #Add slope-dependent component here
    dz_sigma = np.sqrt(z1_sigma**2 + z2_sigma**2)
    #Uncrtainty of dh/dt
    dhdt_sigma = dz_sigma / gf.dt

    #This is mb uncertainty map
    gf.mb_sigma = np.ma.abs(gf.mb) * np.sqrt((rho_sigma / rho_is)**2 +
                                             (dhdt_sigma / gf.dhdt)**2)
    gf.mb_sigma_stats = malib.get_stats(gf.mb_sigma)
    #This is average mb uncertainty
    gf.mb_mean_sigma = gf.mb_sigma_stats[3]

    #Now calculate mb for entire polygon
    area_sigma_perc = 0.09
    gf.mb_mean_totalarea = gf.mb_mean * gf.glac_area
    #Already have area uncertainty as percentage, just use directly
    gf.mb_mean_totalarea_sigma = np.ma.abs(gf.mb_mean_totalarea) * np.sqrt(
        (gf.mb_mean_sigma / gf.mb_mean)**2 + area_sigma_perc**2)

    mb_sum = np.sum(gf.mb) * gf.res[0] * gf.res[1]

    outlist = [gf.glacnum, gf.cx, gf.cy, z2_elev_med, z2_elev_p16, z2_elev_p84, z2_slope_med, z2_aspect_med, \
            gf.mb_mean, gf.mb_mean_sigma, gf.glac_area, gf.mb_mean_totalarea, gf.mb_mean_totalarea_sigma, \
            gf.t1, gf.t2, gf.dt]

    if site == 'conus':
        prism_ppt_annual = np.ma.array(iolib.ds_getma(ds_list[2]),
                                       mask=glac_geom_mask) / 1000.
        prism_ppt_annual_stats = malib.get_stats(prism_ppt_annual)
        prism_ppt_annual_mean = prism_ppt_annual_stats[3]

        prism_tmean_annual = np.ma.array(iolib.ds_getma(ds_list[3]),
                                         mask=glac_geom_mask)
        prism_tmean_annual_stats = malib.get_stats(prism_tmean_annual)
        prism_tmean_annual_mean = prism_tmean_annual_stats[3]

        outlist.extend([prism_ppt_annual_mean, prism_tmean_annual_mean])

        #This is mean monthly summer precip, need to multiply by nmonths to get cumulative
        n_summer = 4
        prism_ppt_summer = n_summer * np.ma.array(iolib.ds_getma(ds_list[4]),
                                                  mask=glac_geom_mask) / 1000.
        prism_ppt_summer_stats = malib.get_stats(prism_ppt_summer)
        prism_ppt_summer_mean = prism_ppt_summer_stats[3]

        n_winter = 8
        prism_ppt_winter = n_winter * np.ma.array(iolib.ds_getma(ds_list[5]),
                                                  mask=glac_geom_mask) / 1000.
        prism_ppt_winter_stats = malib.get_stats(prism_ppt_winter)
        prism_ppt_winter_mean = prism_ppt_winter_stats[3]

        prism_tmean_summer = np.ma.array(iolib.ds_getma(ds_list[6]),
                                         mask=glac_geom_mask)
        prism_tmean_summer_stats = malib.get_stats(prism_tmean_summer)
        prism_tmean_summer_mean = prism_tmean_summer_stats[3]

        prism_tmean_winter = np.ma.array(iolib.ds_getma(ds_list[7]),
                                         mask=glac_geom_mask)
        prism_tmean_winter_stats = malib.get_stats(prism_tmean_winter)
        prism_tmean_winter_mean = prism_tmean_winter_stats[3]

        outlist.extend([
            prism_ppt_summer_mean, prism_ppt_winter_mean,
            prism_tmean_summer_mean, prism_tmean_winter_mean
        ])

    if site == 'hma':
        #Classes are: 1 = clean ice, 2 = debris, 3 = pond
        #Load up debris cover maps, ice thickness
        if len(ds_list) > 2:
            gf.debris_class = np.ma.array(iolib.ds_getma(ds_list[2]),
                                          mask=glac_geom_mask)
            gf.debris_thick = np.ma.array(iolib.ds_getma(ds_list[3]),
                                          mask=glac_geom_mask)
            #Load ice thickness from glabtop2
            gf.H = np.ma.array(iolib.ds_getma(ds_list[4]), mask=glac_geom_mask)
            #Load surface velocity maps from Dehecq
            gf.vx = np.ma.array(iolib.ds_getma(ds_list[5]),
                                mask=glac_geom_mask)
            gf.vy = np.ma.array(iolib.ds_getma(ds_list[6]),
                                mask=glac_geom_mask)
            gf.vm = np.ma.sqrt(gf.vx**2 + gf.vy**2)
            v_col_factor = 0.8
            #Should smooth, better handling of data gaps
            gf.divU = np.gradient(v_col_factor * gf.vx)[1] + np.gradient(
                v_col_factor * gf.vy)[0]
            gf.divQ = gf.H * gf.divU
            #Compute debris/pond/clean percentages for entire polygon
            if gf.debris_class.count() > 0:
                gf.perc_clean = 100. * (gf.debris_class
                                        == 1).sum() / gf.debris_class.count()
                gf.perc_debris = 100. * (gf.debris_class
                                         == 2).sum() / gf.debris_class.count()
                gf.perc_pond = 100. * (gf.debris_class
                                       == 3).sum() / gf.debris_class.count()
            outlist.extend([
                gf.H.mean(),
                gf.debris_thick.mean(), gf.perc_debris, gf.perc_pond,
                gf.perc_clean
            ])

    if verbose:
        print('Mean mb: %0.2f +/- %0.2f mwe/yr' %
              (gf.mb_mean, gf.mb_mean_sigma))
        print('Sum/Area mb: %0.2f mwe/yr' % (mb_sum / gf.glac_area))
        print('Mean mb * Area: %0.2f +/- %0.2f mwe/yr' %
              (gf.mb_mean_totalarea, gf.mb_mean_totalarea_sigma))
        print('Sum mb: %0.2f mwe/yr' % mb_sum)
        #print('-------------------------------')

    #Write to master list
    #out.append(outlist)
    #Write to temporary file
    #writer.writerow(outlist)
    #f.flush()

    if writeout and (gf.glac_area / 1E6 > min_glac_area_writeout):
        out_dz_fn = os.path.join(outdir, gf.feat_fn + '_dz.tif')
        iolib.writeGTiff(gf.dz, out_dz_fn, ds_list[0])

        out_z1_fn = os.path.join(outdir, gf.feat_fn + '_z1.tif')
        iolib.writeGTiff(gf.z1, out_z1_fn, ds_list[0])

        out_z2_fn = os.path.join(outdir, gf.feat_fn + '_z2.tif')
        iolib.writeGTiff(gf.z2, out_z2_fn, ds_list[0])

        temp_fn = os.path.join(outdir, gf.feat_fn + '_z2_aspect.tif')
        iolib.writeGTiff(gf.z2_aspect, temp_fn, ds_list[0])

        temp_fn = os.path.join(outdir, gf.feat_fn + '_z2_slope.tif')
        iolib.writeGTiff(gf.z2_slope, temp_fn, ds_list[0])

        #Need to fix this - write out constant date arrays regardless of source
        #out_z1_date_fn = os.path.join(outdir, gf.feat_fn+'_ned_date.tif')
        #iolib.writeGTiff(z1_date, out_z1_date_fn, ds_list[0])

        if site == 'conus':
            out_prism_ppt_annual_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_annual.tif')
            iolib.writeGTiff(prism_ppt_annual, out_prism_ppt_annual_fn,
                             ds_list[0])
            out_prism_tmean_annual_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_annual.tif')
            iolib.writeGTiff(prism_tmean_annual, out_prism_tmean_annual_fn,
                             ds_list[0])

            out_prism_ppt_summer_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_summer.tif')
            iolib.writeGTiff(prism_ppt_summer, out_prism_ppt_summer_fn,
                             ds_list[0])
            out_prism_ppt_winter_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_winter.tif')
            iolib.writeGTiff(prism_ppt_winter, out_prism_ppt_winter_fn,
                             ds_list[0])

            out_prism_tmean_summer_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_summer.tif')
            iolib.writeGTiff(prism_tmean_summer, out_prism_tmean_summer_fn,
                             ds_list[0])
            out_prism_tmean_winter_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_winter.tif')
            iolib.writeGTiff(prism_tmean_winter, out_prism_tmean_winter_fn,
                             ds_list[0])

        if site == 'hma':
            if gf.H is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_H.tif')
                iolib.writeGTiff(gf.H, temp_fn, ds_list[0])

            if gf.debris_thick is not None:
                temp_fn = os.path.join(outdir,
                                       gf.feat_fn + '_debris_thick.tif')
                iolib.writeGTiff(gf.debris_thick, temp_fn, ds_list[0])

            if gf.debris_class is not None:
                temp_fn = os.path.join(outdir,
                                       gf.feat_fn + '_debris_class.tif')
                iolib.writeGTiff(gf.debris_class, temp_fn, ds_list[0])

            if gf.vm is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_vm.tif')
                iolib.writeGTiff(gf.vm, temp_fn, ds_list[0])

            if gf.divQ is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_divQ.tif')
                iolib.writeGTiff(gf.divQ, temp_fn, ds_list[0])

    #Do AED for all
    #Compute mb using scaled AED vs. polygon
    #Check for valid pixel count vs. feature area, fill if appropriate

    if mb_plot and (gf.glac_area / 1E6 > min_glac_area_writeout):
        z_bin_edges = hist_plot(gf, outdir)
        gf.z1_hs = geolib.gdaldem_mem_ds(ds_list[0],
                                         processing='hillshade',
                                         returnma=True)
        gf.z2_hs = geolib.gdaldem_mem_ds(ds_list[1],
                                         processing='hillshade',
                                         returnma=True)
        map_plot(gf, z_bin_edges, outdir)

    return outlist, gf
Пример #19
0
def main():
    parser = getparser()
    args = parser.parse_args()

    t_unit = args.dt
    plot = args.plot
    remove_offsets = args.remove_offsets
    mask_fn = args.mask_fn
    if mask_fn is not None:
        remove_offsets = True

    #Input is 3-band disparity map, extract bands directly
    src_fn = args.disp_fn
    if not iolib.fn_check(src_fn):
        sys.exit("Unable to locate input file: %s" % src_fn)

    src_ds = iolib.fn_getds(src_fn)
    if src_ds.RasterCount != 3:
        sys.exit("Input file must be ASP disparity map (3 bands: x, y, mask)")
    #Extract pixel resolution
    h_res, v_res = geolib.get_res(src_ds)

    #Horizontal scale factor
    #If running on disparity_view output (gdal_translate -outsize 5% 5% F.tif F_5.tif)
    #h_res /= 20
    #v_res /= 20

    #Load horizontal and vertical disparities
    h = iolib.ds_getma(src_ds, bnum=1)
    v = iolib.ds_getma(src_ds, bnum=2)

    #ASP output has northward motion as negative values in band 2
    v *= -1

    t1, t2 = timelib.fn_getdatetime_list(src_fn)
    dt = t2 - t1
    #Default t_factor is in 1/years
    t_factor = timelib.get_t_factor(t1, t2)

    #Input timestamp arrays if inputs are mosaics
    if False:
        t1_fn = ''
        t2_fn = ''
        if os.path.exists(t1_fn) and os.path.exists(t2_fn):
            t_factor = timelib.get_t_factor_fn(t1_fn, t2_fn)
        if t_factor is None:
            sys.exit("Unable to determine input timestamps")

    if t_unit == 'day':
        t_factor *= 365.25

    print("Input dates:")
    print(t1)
    print(t2)
    print(dt)
    print(t_factor, t_unit)

    #Scale values for polar stereographic distortion
    srs = geolib.get_ds_srs(src_ds)
    proj_scale_factor = 1.0
    #Want to scale to get correct distances for polar sterographic
    if srs.IsSame(geolib.nps_srs) or srs.IsSame(geolib.sps_srs):
        proj_scale_factor = geolib.scale_ps_ds(src_ds)

    #Convert disparity values in pixels to m/t_unit
    h_myr = h * h_res * proj_scale_factor / t_factor
    h = None
    v_myr = v * v_res * proj_scale_factor / t_factor
    v = None

    #Velocity Magnitude
    m = np.ma.sqrt(h_myr**2 + v_myr**2)
    print("Velocity Magnitude stats")
    malib.print_stats(m)

    #Remove x and y offsets over control surfaces
    offset_str = ''
    if remove_offsets:
        if mask_fn is None:
            from demcoreg.dem_mask import get_mask
            print(
                "\nUsing demcoreg to prepare mask of stable control surfaces\n"
            )
            #TODO: Accept mask_list as in demcoreg
            #mask_list = args.mask_list
            # for now keep it simple, limit to non-glacier surfaces
            mask_list = [
                'glaciers',
            ]
            mask = get_mask(src_ds, mask_list=mask_list, dem_fn=src_fn)
        else:
            print("\nWarping input raster mask")
            #This can be from previous dem_mask.py run (e.g. *rockmask.tif)
            mask_ds = warplib.memwarp_multi_fn([
                mask_fn,
            ],
                                               res=src_ds,
                                               extent=src_ds,
                                               t_srs=src_ds)[0]
            mask = iolib.ds_getma(mask_ds)
            #The default from ds_getma is a masked array, so need to isolate boolean mask
            #Assume input is 0 for masked, 1 for unmasked (valid control surface)
            mask = mask.filled().astype('bool')
            #This should work, as the *rockmask.py is 1 for unmasked, 0 for masked, with ndv=0
            #mask = np.ma.getmaskarray(mask)
            #Vector mask - untested
            if os.path.splitext(mask_fn)[1] == 'shp':
                mask = geolib.shp2array(mask_fn, src_ds)

        print("\nRemoving median x and y offset over static control surfaces")
        h_myr_count = h_myr.count()
        h_myr_static_count = np.ma.array(h_myr, mask=mask).count()
        h_myr_mad, h_myr_med = malib.mad(np.ma.array(h_myr, mask=mask),
                                         return_med=True)
        v_myr_mad, v_myr_med = malib.mad(np.ma.array(v_myr, mask=mask),
                                         return_med=True)

        print("Static pixel count: %i (%0.1f%%)" %
              (h_myr_static_count,
               100 * float(h_myr_static_count) / h_myr_count))
        print("median (+/-NMAD)")
        print("x velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (h_myr_med, h_myr_mad, t_unit))
        print("y velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (v_myr_med, v_myr_mad, t_unit))
        h_myr -= h_myr_med
        v_myr -= v_myr_med
        offset_str = '_offsetcorr_h%0.2f_v%0.2f' % (h_myr_med, v_myr_med)
        #Velocity Magnitude
        m = np.ma.sqrt(h_myr**2 + v_myr**2)
        print("Velocity Magnitude stats after correction")
        malib.print_stats(m)

    if plot:
        fig_fn = os.path.splitext(src_fn)[0] + '.png'
        label = 'Velocity (m/%s)' % t_unit
        f, ax = make_plot(m, fig_fn, label)
        plotvec(h_myr, v_myr)
        plt.tight_layout()
        plt.savefig(fig_fn,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0,
                    edgecolor='none')

    print("Writing out files")
    gt = src_ds.GetGeoTransform()
    proj = src_ds.GetProjection()
    dst_fn = os.path.splitext(src_fn)[0] + '_vm%s.tif' % offset_str
    iolib.writeGTiff(m, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vx%s.tif' % offset_str
    iolib.writeGTiff(h_myr, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vy%s.tif' % offset_str
    iolib.writeGTiff(v_myr, dst_fn, create=True, gt=gt, proj=proj)
    src_ds = None
Пример #20
0
    main_glac_thickness['RGIId'] = main_glac_rgi.RGIId.values
    main_glac_width['RGIId'] = main_glac_rgi.RGIId.values
    main_glac_length['RGIId'] = main_glac_rgi.RGIId.values
    main_glac_slope['RGIId'] = main_glac_rgi.RGIId.values

    # ===== PROCESS EACH GLACIER ======
    for nglac, glacno in enumerate(glacno_list):
        # print(nglac, glacno)
        thickness_fn = thickness_fp + 'RGI60-' + glacno + '_thickness.tif'
        dem_farinotti_fn = dem_farinotti_fp + 'surface_DEM_RGI60-' + glacno + '.tif'

        # Reproject, resample, warp rasters to common extent, grid size, etc.
        #  note: use thickness for the reference to avoid unrealistic extrapolations, e.g., negative thicknesses
        #        also using equal area increases areas significantly compared to RGI
        raster_fn_list = [dem_ref_fn, dem_farinotti_fn, thickness_fn]
        ds_list = warplib.memwarp_multi_fn(raster_fn_list, extent='intersection', res='min', t_srs=thickness_fn)
        # print('\n\nSWITCH BACK TO THICKNESS_FN AFTER OTHERS CORRECTED!\n\n')
        # ds_list = warplib.memwarp_multi_fn(raster_fn_list, extent='intersection', res='min', t_srs=dem_ref_fn)

        # masked arrays using ice thickness estimates
        dem_ref_raw, dem_far_raw, thickness = [iolib.ds_getma(i) for i in ds_list]
        dem_ref = dem_ref_raw.copy()
        dem_ref.mask = thickness.mask
        dem_far = dem_far_raw.copy()
        dem_far.mask = thickness.mask

        # DEM selection for binning computations
        # if exceeds threshold, then use the reference
        if (abs(main_glac_rgi.loc[nglac,'Zmin'] - dem_far.min()) > dem_poorquality_threshold or
            abs(main_glac_rgi.loc[nglac,'Zmax'] - dem_far.max()) > dem_poorquality_threshold):
            print('  Check Glacier ' + glacno + ': use Christian DEM instead of Farinotti')
Пример #21
0
def main():
    parser = getparser()
    args = parser.parse_args()

    src_fn = args.src_fn
    if not iolib.fn_check(src_fn):
        sys.exit("Unable to find src_fn: %s" % src_fn)

    mask_fn = args.mask_fn
    if not iolib.fn_check(mask_fn):
        sys.exit("Unable to find mask_fn: %s" % mask_fn)

    #Determine output extent, default is input raster extent
    extent = args.extent
    if extent == 'raster':
        extent = src_fn
    elif extent == 'mask':
        extent = mask_fn
    else:
        #This is a hack for intersection computation
        src_ds_list = [
            gdal.Open(fn, gdal.GA_ReadOnly) for fn in [src_fn, mask_fn]
        ]
        #t_srs = geolib.get_ds_srs(src_ds_list[0])
        extent = warplib.parse_extent(extent, src_ds_list, src_fn)

    #Set resampling algorithm appropriately
    r = 'cubic'
    mask_val = args.mask_val
    if mask_val is not None:
        r = 'near'

    print("Warping mask_fn")
    mask_ds = warplib.memwarp_multi_fn([
        mask_fn,
    ],
                                       res=src_fn,
                                       extent=extent,
                                       t_srs=src_fn,
                                       r=r)[0]

    print("Loading mask array")
    mask_ma_full = iolib.ds_getma(mask_ds)
    mask_ds = None

    print("Extracting mask")
    if mask_val is not None:
        #Use specified value
        mask = ~((mask_ma_full == mask_val).data)
    elif mask_ma_full.std() != 0:
        #Input mask filename is a raster, or other masked array
        #Just need to extract mask
        mask = np.ma.getmaskarray(mask_ma_full)
    else:
        #Input mask filename is a mask, use directly
        #If input mask values are zero, valid values are nonzero
        #Bool True == 1, so need to invert
        if mask_ma_full.fill_value == 0:
            mask = ~((mask_ma_full.data).astype(bool))
        else:
            mask = (mask_ma_full.data).astype(bool)

    #Free up memory
    mask_ma_full = None

    #Add dilation step for buffer

    #newmask = np.logical_or(np.ma.getmaskarray(src_ma_full), mask)

    if args.invert:
        print("Inverting mask")
        mask = ~(mask)

    print("Loading src array and applying updated mask")
    if extent == src_fn:
        src_ds = gdal.Open(src_fn)
    else:
        src_ds = warplib.memwarp_multi_fn([
            src_fn,
        ],
                                          res=src_fn,
                                          extent=extent,
                                          t_srs=src_fn)[0]

    #Now load source array with new mask
    src_ma_full = np.ma.array(iolib.ds_getma(src_ds), mask=mask)
    mask = None

    if args.out_fn is not None:
        src_fn_masked = args.out_fn
    else:
        src_fn_masked = os.path.splitext(src_fn)[0] + '_masked.tif'
    print("Writing out masked version of input raster: %s" % src_fn_masked)
    iolib.writeGTiff(src_ma_full, src_fn_masked, src_ds, create=True)
Пример #22
0
def crop_sim_res_extent(img_list, outfol, vrt=False, rpc=False):
    """
    Warp images to common 'finest' resolution and intersecting extent
    This is useful for stereo processing with mapprojected imagery with the skysat pairs

    Parameters
    ----------
    img_list: list
        list containing two images
    outfol: str
        path to folder where warped images will be saved
    vrt: bool
        Produce warped VRT instead of geotiffs if True
    rpc: bool
        Copy RPC information to warped images if True
    Returns
    ----------
    out: list
        list containing the two warped images, first entry (left image) is the image which was of finer resolution (more nadir) initially
        If the images do not intersect, two None objects are returned in the list
    """
    resample_alg = 'lanczos'
    img1 = img_list[0]
    img2 = img_list[1]
    img1_ds = iolib.fn_getds(img1)
    img2_ds = iolib.fn_getds(img2)
    res1 = geolib.get_res(img1_ds, square=True)[0]
    res2 = geolib.get_res(img2_ds, square=True)[0]
    # set left image as higher resolution, this is repeated for video, but
    # good for triplet with no gsd information
    if res1 < res2:
        l_img = img1
        r_img = img2
        res = res1
    else:
        l_img = img2
        r_img = img1
        res = res2
    # ASP stereo command expects the input to be .tif/.tiff, complains for .vrt
    # Try to save with vrt driver but a tif extension ?
    l_img_warp = os.path.join(
        outfol,
        os.path.splitext(os.path.basename(l_img))[0] + '_warp.tif')
    r_img_warp = os.path.join(
        outfol,
        os.path.splitext(os.path.basename(r_img))[0] + '_warp.tif')
    if not (os.path.exists(l_img_warp)):
        # can turn on verbose during qa/qc
        # Better to turn off during large runs, writing takes time
        verbose = False
        if not os.path.exists(outfol):
            os.makedirs(outfol)
        try:
            #this will simply break and continue if the images do not intersect
            ds_list = warplib.memwarp_multi_fn([l_img, r_img],
                                               r=resample_alg,
                                               verbose=verbose,
                                               res='min',
                                               extent='intersection')
            if vrt:
                extent = geolib.ds_extent(ds_list[0])
                res = geolib.get_res(ds_list[0], square=True)
                vrt_options = gdal.BuildVRTOptions(resampleAlg='average',
                                                   resolution='user',
                                                   xRes=res[0],
                                                   yRes=res[1],
                                                   outputBounds=tuple(extent))
                l_vrt = gdal.BuildVRT(l_img_warp, [
                    l_img,
                ],
                                      options=vrt_options)
                r_vrt = gdal.BuildVRT(r_img_warp, [
                    r_img,
                ],
                                      options=vrt_options)
                # close vrt to save to disk
                l_vrt = None
                r_vrt = None
                out = [l_img_warp, r_img_warp]
            else:
                # I am opting out of writing out vrt, to prevent correlation
                # artifacts. GeoTiffs will be written out in the meantime
                l_img_ma = iolib.ds_getma(ds_list[0])
                r_img_ma = iolib.ds_getma(ds_list[1])
                iolib.writeGTiff(l_img_ma, l_img_warp, ds_list[0])
                iolib.writeGTiff(r_img_ma, r_img_warp, ds_list[1])
                out = [l_img_warp, r_img_warp]
                del (ds_list)
                if rpc:
                    copy_rpc(l_img, l_img_warp)
                    copy_rpc(r_img, r_img_warp)
        except BaseException:
            out = None
    else:
        out = [l_img_warp, r_img_warp]
    return out
Пример #23
0
#def main():
parser = getparser()
args = parser.parse_args()

dem1_fn = args.dem1_fn
dem1_ts = timelib.fn_getdatetime(dem1_fn)
res = 'min'
save = True 
#For testing
#res = 64

if args.dem2_fn is not None:
    dem2_fn = args.dem2_fn
    print("Warping DEMs to same res/extent/proj")
    #This will check input param for validity, could do beforehand
    dem1_ds, dem2_ds = warplib.memwarp_multi_fn([dem1_fn, dem2_fn], extent='intersection', res=res, t_srs='first')
    print("Loading input DEMs into masked arrays")
    dem1 = iolib.ds_getma(dem1_ds, 1)
    dem2 = iolib.ds_getma(dem2_ds, 1)
    dem2_ts = timelib.fn_getdatetime(dem2_fn)
    dz = dem2 - dem1
    outprefix = os.path.splitext(os.path.split(dem1_fn)[1])[0]+'_'+os.path.splitext(os.path.split(dem2_fn)[1])[0]
elif args.dz_fn is not None:
    dz_fn = args.dz_fn
    dem1_ds, dz_ds = warplib.memwarp_multi_fn([dem1_fn, dz_fn], extent='intersection', res=res, t_srs='first')
    print("Loading input DEM and Snow depth into masked arrays")
    dem1 = iolib.ds_getma(dem1_ds, 1)
    dz = iolib.ds_getma(dz_ds, 1)
    #Try to pull out second timestamp from dz_fn
    dem2_ts = timelib.fn_getdatetime_list(dz_fn)[-1]
    outprefix = os.path.splitext(os.path.split(dz_fn)[1])[0]
Пример #24
0
xlim = (100000, 300000)
b[:, 1][(b[:, 1] > xlim[1]) | (b[:, 1] < xlim[0])] = np.ma.masked
ylim = (4100000, 4500000)
b[:, 2][(b[:, 2] > ylim[1]) | (b[:, 2] < ylim[0])] = np.ma.masked

#Only pass pits with valid x and y coord
b = b[b[:, 1:3].count(axis=1) == 2]

#Stereo2SWE preliminary products
dem_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_mos_20170504/gm_8m-tile-0.tif'
hs_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_mos_20170504/gm_8m-tile-0_hs_az315.tif'
#snowdepth_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_snowdepth_20170606/snowdepth_20170201-20170317_mos-tile-0.tif'
snowdepth_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_snowdepth_20170606/snowdepth_tif/snowdepth_20170201-20170317_mos-tile-0_filt5px.tif'

#Load and clip to common extent
dem_ds, hs_ds, snowdepth_ds = warplib.memwarp_multi_fn(
    [dem_fn, hs_fn, snowdepth_fn], extent='union')
dem = iolib.ds_getma(dem_ds)
hs = iolib.ds_getma(hs_ds)
snowdepth = iolib.ds_getma(snowdepth_ds)

#Pixel coordinates of sample sites
x, y = geolib.mapToPixel(b[:, 1], b[:, 2], dem_ds.GetGeoTransform())
depth = b[:, 3]
rho = b[:, 4]

#Sample DEM snow depth
samp = geolib.sample(snowdepth_ds, b[:, 1], b[:, 2], pad=5)

#Filter to throw out samples with significant roughness over sampled area
samp_perc_thresh = 0.3
samp_mask = (samp[:, 1] / samp[:, 0]) > samp_perc_thresh
Пример #25
0
def main():
    """
    # chm_refine.py
    # Main goals: 
    # (1)fix non-forest "heights"
    # (2)fix dense interior forest height estimates
    # (3)remove water
    # Basic logic
    #Divide HRSI CHM into forest and non-forest;
    #to estimate max canopy height, within the forest mask run a 'max'filter (filtlib)
    #to remove spurious 'heights' in the non-forest using a 'min' filter (filtlib)
    #
    #(1) invert roughmask to get 'forest'
    #    -run a 'max' filter,
    #(2) use roughmask (get_lo_rough_mask) to get 'non-forest' pixels
    #    -run a 'min' filter,
    #    -maybe a small (3 pix) window filter
    #(3) then mask the result with the toamask (this removes water and other dark (shadow) areas
    #    -remove dark and smooth (water)
    #    -smooth is non-veg land
    #    -dark and rough is woody veg land
    #
    #(4) for later: then mask with the slopemask, toatrimask
    
    # step 1
    #  get_dark_mask from ortho_toa --> remove the areas that are TOA dark (water,shadow) from the chm
    # step 2
    #	get_hi_slope_mask from DEM --> remove areas of high slopes from chm
    # step 3
    #	get_lo_rough_mask from DEM --> remove areas that are NOT rough (aka remove non-forest)
    #	run a max filter on remaining pixels
    
    * mask outputs should all consistently show the 'masked' area as valid
    """
    parser = getparser()
    args = parser.parse_args()

    outdir = args.outdir
    pairname = args.pairname

    if not os.path.exists(outdir):
        os.mkdir(outdir)

    outfolder = os.path.join(outdir, pairname)
    if not os.path.exists(outfolder):
        os.mkdir(outfolder)

    auto_min_toa = args.auto_min_toa

    #Symlink files to working directory.
    #symlinks=['out-DEM_1m.tif','{}_ortho.tif'.format(pairname)]
    print("\nSymlinking Files to Working Directory\n")
    cmd = "ln -sf /att/pubrepo/DEM/hrsi_dsm/v2/{}/*ortho*tif {}".format(
        pairname, outfolder)
    subprocess.call(cmd, shell=True)

    cmd = "ln -sf /att/pubrepo/DEM/hrsi_dsm/v2/{}/out-DEM*m.tif {}".format(
        pairname, outfolder)
    subprocess.call(cmd, shell=True)

    cmd = "xml_fn_list=$(ls /att/pubrepo/DEM/hrsi_dsm/v2/{}/*.xml);ln -sf $xml_fn_list {}".format(
        pairname, outfolder)
    subprocess.call(cmd, shell=True)

    #dsm_maindir='/att/pubrepo/DEM/hrsi_dsm/v2/'
    #dsm_dir=os.path.join(dsm_maindir,pairname)

    chm_dir = '/att/gpfsfs/briskfs01/ppl/pmontesa/chm_work/hrsi_chm_sgm_filt/chm'
    chm_name = '{}_sr05_4m-sr05-min_1m-sr05-max_dz_eul.tif'.format(pairname)
    chm_fn = os.path.join(chm_dir, chm_name)

    print("[1]\nLoading Input CHM into masked array\n")
    chm_ds = iolib.fn_getds(chm_fn)

    print("[2]\nGetting Dark Mask from Ortho TOA\n")
    #May need to include get_toa_fn from DEM_control.py
    print("\n\t-Compute TOA from Ortho\n")
    dem_fn = os.path.join(outfolder, 'out-DEM_1m.tif')
    toa_fn = get_toa_fn(dem_fn)

    print("\nt-Warp TOA to CHM...\n")
    toa_ds = warplib.memwarp_multi_fn([
        toa_fn,
    ],
                                      res=chm_ds,
                                      extent=chm_ds,
                                      t_srs=chm_ds)[0]

    #Determine from inputs or calculate lowest acceptable TOA valuesfor masking
    if auto_min_toa:
        # Compute a good min TOA value
        m, s = get_min_gaus(toa_fn, 50, 4)
        min_toa = m + s
        min_toa = m
    else:
        min_toa = args.min_toa

    #Write TOA Mins for reference
    with open(
            os.path.join(
                os.path.split(toa_fn)[0], "min_toa_" + pairname + ".txt"),
            "w") as text_file:
        text_file.write(os.path.basename(__file__))
        text_file.write("\nMinimum TOA used for mask:\n{0}".format(min_toa))

    # Should mask dark areas and dilate
    dark_mask = get_dark_mask(toa_ds, min_toa)

    print("\n\t-Completed Calculating Dark Mask\n")

    print("[3]\nGetting High Slope Mask from DEM\n")

    max_slope = args.max_slope
    dem_ds = iolib.fn_getds(dem_fn)
    slope_mask = get_hi_slope_mask(dem_ds, max_slope)
    print("\n\t-Completed Sploe Masking\n")

    print("[4]\nGetting Roughness for Forest/Non-Forest Classification\n")

    #NOTE: Not sure which mask we want to use. Will write up both
    min_rough = args.min_rough

    lo_rough_mask = get_lo_rough_mask(dem_ds, min_rough)
    #Areas less than min_rough is masked#
    min_tri = args.min_tri
    lo_tri_mask = get_lo_tri_mask(dem_ds, min_tri)

    #Valid areas are forest
    forest_mask = np.logical_or(lo_rough_mask, log_tri_mask)

    ground_mask = ~forest_mask
Пример #26
0
def main2(args):
    #Should check that files exist
    dem1_fn = args.ref_fn
    dem2_fn = args.src_fn
    mode = args.mode
    apply_mask = not args.nomask
    max_offset_m = args.max_offset
    tiltcorr = args.tiltcorr

    #These are tolerances (in meters) to stop iteration
    tol = args.tol
    min_dx = tol
    min_dy = tol
    min_dz = tol

    #Maximum number of iterations
    max_n = 10

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.splitext(dem2_fn)[0] + '_dem_align'

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = '%s_%s' % (os.path.splitext(os.path.split(dem2_fn)[-1])[0], \
            os.path.splitext(os.path.split(dem1_fn)[-1])[0])
    outprefix = os.path.join(outdir, outprefix)

    print("\nReference: %s" % dem1_fn)
    print("Source: %s" % dem2_fn)
    print("Mode: %s" % mode)
    print("Output: %s\n" % outprefix)

    dem2_ds = gdal.Open(dem2_fn, gdal.GA_ReadOnly)
    #Often the "ref" DEM is high-res lidar or similar
    #This is a shortcut to resample to match "source" DEM
    dem1_ds = warplib.memwarp_multi_fn([
        dem1_fn,
    ],
                                       res=dem2_ds,
                                       extent=dem2_ds,
                                       t_srs=dem2_ds)[0]
    #dem1_ds = gdal.Open(dem1_fn, gdal.GA_ReadOnly)

    #Create a copy to be updated in place
    dem2_ds_align = iolib.mem_drv.CreateCopy('', dem2_ds, 0)
    #dem2_ds_align = dem2_ds

    #Iteration number
    n = 1
    #Cumulative offsets
    dx_total = 0
    dy_total = 0
    dz_total = 0

    #Now iteratively update geotransform and vertical shift
    while True:
        print("*** Iteration %i ***" % n)
        dx, dy, dz, static_mask, fig = compute_offset(dem1_ds,
                                                      dem2_ds_align,
                                                      dem2_fn,
                                                      mode,
                                                      max_offset_m,
                                                      apply_mask=apply_mask)
        if n == 1:
            static_mask_orig = static_mask
        xyz_shift_str_iter = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx, dy,
                                                                     dz)
        print("Incremental offset: %s" % xyz_shift_str_iter)

        #Should make an animation of this converging
        if fig is not None:
            dst_fn = outprefix + '_%s_iter%i_plot.png' % (mode, n)
            print("Writing offset plot: %s" % dst_fn)
            fig.gca().set_title(xyz_shift_str_iter)
            fig.savefig(dst_fn, dpi=300, bbox_inches='tight', pad_inches=0.1)

        #Apply the horizontal shift to the original dataset
        dem2_ds_align = coreglib.apply_xy_shift(dem2_ds_align,
                                                dx,
                                                dy,
                                                createcopy=False)
        dem2_ds_align = coreglib.apply_z_shift(dem2_ds_align,
                                               dz,
                                               createcopy=False)

        dx_total += dx
        dy_total += dy
        dz_total += dz
        print("Cumulative offset: dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" %
              (dx_total, dy_total, dz_total))

        #Fit plane to residuals and remove
        #Might be better to do this after converging
        """
        if tiltcorr:
            print("Applying planar tilt correction")
            gt = dem2_ds_align.GetGeoTransform()
            #Need to compute diff_euler here
            #Copy portions of compute_offset, create new function 
            vals, resid, coeff = geolib.ma_fitplane(diff_euler_align, gt, perc=(4, 96))
            dem2_ds_align = coreglib.apply_z_shift(dem2_ds_align, -vals, createcopy=False)
        """

        n += 1
        print("\n")
        #If magnitude of shift in all directions is less than tol
        #if n > max_n or (abs(dx) <= min_dx and abs(dy) <= min_dy and abs(dz) <= min_dz):
        #If magnitude of shift is less than tol
        dm = np.sqrt(dx**2 + dy**2 + dz**2)
        if n > max_n or dm < tol:
            break

    #String to append to output filenames
    xyz_shift_str_cum = '_%s_x%+0.2f_y%+0.2f_z%+0.2f' % (mode, dx_total,
                                                         dy_total, dz_total)
    if tiltcorr:
        xyz_shift_str_cum += "_tiltcorr"

    #Compute original elevation difference
    if True:
        dem1_clip_ds, dem2_clip_ds = warplib.memwarp_multi([dem1_ds, dem2_ds], \
                res='max', extent='intersection', t_srs=dem2_ds)
        dem1_orig = iolib.ds_getma(dem1_clip_ds, 1)
        dem2_orig = iolib.ds_getma(dem2_clip_ds, 1)
        diff_euler_orig = dem2_orig - dem1_orig
        if not apply_mask:
            static_mask_orig = np.ma.getmaskarray(diff_euler_orig)
        diff_euler_orig_compressed = diff_euler_orig[~static_mask_orig]
        diff_euler_orig_stats = np.array(
            malib.print_stats(diff_euler_orig_compressed))

        #Write out original eulerian difference map
        print(
            "Writing out original euler difference map for common intersection before alignment"
        )
        dst_fn = outprefix + '_orig_dz_eul.tif'
        iolib.writeGTiff(diff_euler_orig, dst_fn, dem1_clip_ds)

    #Compute final elevation difference
    if True:
        dem1_clip_ds_align, dem2_clip_ds_align = warplib.memwarp_multi([dem1_ds, dem2_ds_align], \
                res='max', extent='intersection', t_srs=dem2_ds_align)
        dem1_align = iolib.ds_getma(dem1_clip_ds_align, 1)
        dem2_align = iolib.ds_getma(dem2_clip_ds_align, 1)
        diff_euler_align = dem2_align - dem1_align
        if not apply_mask:
            static_mask = np.ma.getmaskarray(diff_euler_align)
        diff_euler_align_compressed = diff_euler_align[~static_mask]
        diff_euler_align_stats = np.array(
            malib.print_stats(diff_euler_align_compressed))

        #Fit plane to residuals and remove
        if tiltcorr:
            print("Applying planar tilt correction")
            gt = dem1_clip_ds_align.GetGeoTransform()
            #Need to apply the mask here, so we're only fitting over static surfaces
            #Note that the origmask=False will compute vals for all x and y indices, which is what we want
            vals, resid, coeff = geolib.ma_fitplane(np.ma.array(diff_euler_align, mask=static_mask), \
                    gt, perc=(4, 96), origmask=False)
            #Remove planar offset from difference map
            diff_euler_align -= vals
            #Remove planar offset from aligned dem2
            #Note: dimensions of ds and vals will be different as vals are computed for clipped intersection
            #Recompute planar offset for dem2_ds_align extent
            xgrid, ygrid = geolib.get_xy_grids(dem2_ds_align)
            vals = coeff[0] * xgrid + coeff[1] * ygrid + coeff[2]
            dem2_ds_align = coreglib.apply_z_shift(dem2_ds_align,
                                                   -vals,
                                                   createcopy=False)
            if not apply_mask:
                static_mask = np.ma.getmaskarray(diff_euler_align)
            diff_euler_align_compressed = diff_euler_align[~static_mask]
            diff_euler_align_stats = np.array(
                malib.print_stats(diff_euler_align_compressed))
            print("Creating fitplane plot")
            fig, ax = plt.subplots(figsize=(6, 6))
            fitplane_clim = malib.calcperc(vals, (2, 98))
            im = ax.imshow(vals, cmap='cpt_rainbow', clim=fitplane_clim)
            res = float(geolib.get_res(dem2_clip_ds, square=True)[0])
            pltlib.add_scalebar(ax, res=res)
            pltlib.hide_ticks(ax)
            pltlib.add_cbar(ax, im, label='Fit plane residuals (m)')
            fig.tight_layout()
            dst_fn1 = outprefix + '%s_align_dz_eul_fitplane.png' % xyz_shift_str_cum
            print("Writing out figure: %s" % dst_fn1)
            fig.savefig(dst_fn1, dpi=300, bbox_inches='tight', pad_inches=0.1)

        #Compute higher-order fits?
        #Could also attempt to model along-track and cross-track artifacts

        #Write out aligned eulerian difference map for clipped extent with vertial offset removed
        dst_fn = outprefix + '%s_align_dz_eul.tif' % xyz_shift_str_cum
        print(
            "Writing out aligned difference map with median vertical offset removed"
        )
        iolib.writeGTiff(diff_euler_align, dst_fn, dem1_clip_ds)

    #Write out aligned dem_2 with vertial offset removed
    if True:
        dst_fn2 = outprefix + '%s_align.tif' % xyz_shift_str_cum
        print(
            "Writing out shifted dem2 with median vertical offset removed: %s"
            % dst_fn2)
        #Might be cleaner way to write out MEM ds directly to disk
        dem2_align = iolib.ds_getma(dem2_ds_align)
        iolib.writeGTiff(dem2_align, dst_fn2, dem2_ds_align)
        dem2_ds_align = None

    #Create output plot
    if True:
        print("Creating final plot")
        dem1_hs = geolib.gdaldem_mem_ma(dem1_orig, dem1_clip_ds, returnma=True)
        dem2_hs = geolib.gdaldem_mem_ma(dem2_orig, dem2_clip_ds, returnma=True)
        f, axa = plt.subplots(2, 3, figsize=(11, 8.5))
        for ax in axa.ravel()[:-1]:
            ax.set_facecolor('k')
            pltlib.hide_ticks(ax)
        dem_clim = malib.calcperc(dem1_orig, (2, 98))
        axa[0, 0].imshow(dem1_hs, cmap='gray')
        axa[0, 0].imshow(dem1_orig,
                         cmap='cpt_rainbow',
                         clim=dem_clim,
                         alpha=0.6)
        res = float(geolib.get_res(dem1_clip_ds, square=True)[0])
        pltlib.add_scalebar(axa[0, 0], res=res)
        axa[0, 0].set_title('Reference DEM')
        axa[0, 1].imshow(dem2_hs, cmap='gray')
        axa[0, 1].imshow(dem2_orig,
                         cmap='cpt_rainbow',
                         clim=dem_clim,
                         alpha=0.6)
        axa[0, 1].set_title('Source DEM')
        axa[0, 2].imshow(~static_mask_orig, clim=(0, 1), cmap='gray')
        axa[0, 2].set_title('Surfaces for co-registration')
        dz_clim = malib.calcperc_sym(diff_euler_orig_compressed, (5, 95))
        im = axa[1, 0].imshow(diff_euler_orig, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1, 0], im, label=None)
        axa[1, 0].set_title('Elev. Diff. Before (m)')
        im = axa[1, 1].imshow(diff_euler_align, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1, 1], im, label=None)
        axa[1, 1].set_title('Elev. Diff. After (m)')

        #Tried to insert Nuth fig here
        #ax_nuth.change_geometry(1,2,1)
        #f.axes.append(ax_nuth)

        bins = np.linspace(dz_clim[0], dz_clim[1], 128)
        axa[1, 2].hist(diff_euler_orig_compressed,
                       bins,
                       color='g',
                       label='Before',
                       alpha=0.5)
        axa[1, 2].hist(diff_euler_align_compressed,
                       bins,
                       color='b',
                       label='After',
                       alpha=0.5)
        axa[1, 2].axvline(0, color='k', linewidth=0.5, linestyle=':')
        axa[1, 2].set_xlabel('Elev. Diff. (m)')
        axa[1, 2].set_ylabel('Count (px)')
        axa[1, 2].set_title("Source - Reference")
        #axa[1,2].legend(loc='upper right')
        #before_str = 'Before\nmean: %0.2f\nstd: %0.2f\nmed: %0.2f\nnmad: %0.2f' % tuple(diff_euler_orig_stats[np.array((3,4,5,6))])
        #after_str = 'After\nmean: %0.2f\nstd: %0.2f\nmed: %0.2f\nnmad: %0.2f' % tuple(diff_euler_align_stats[np.array((3,4,5,6))])
        before_str = 'Before\nmed: %0.2f\nnmad: %0.2f' % tuple(
            diff_euler_orig_stats[np.array((5, 6))])
        axa[1, 2].text(0.05,
                       0.95,
                       before_str,
                       va='top',
                       color='g',
                       transform=axa[1, 2].transAxes)
        after_str = 'After\nmed: %0.2f\nnmad: %0.2f' % tuple(
            diff_euler_align_stats[np.array((5, 6))])
        axa[1, 2].text(0.65,
                       0.95,
                       after_str,
                       va='top',
                       color='b',
                       transform=axa[1, 2].transAxes)

        suptitle = '%s\nx: %+0.2fm, y: %+0.2fm, z: %+0.2fm' % (
            os.path.split(outprefix)[-1], dx_total, dy_total, dz_total)
        f.suptitle(suptitle)
        f.tight_layout()
        plt.subplots_adjust(top=0.90)

        dst_fn = outprefix + '%s_align.png' % xyz_shift_str_cum
        print("Writing out figure: %s" % dst_fn)
        f.savefig(dst_fn, dpi=300, bbox_inches='tight', pad_inches=0.1)

        #Removing residual planar tilt can introduce additional slope/aspect dependent offset
        #Want to run another round of main dem_align after removing planar tilt
        if tiltcorr:
            print("\n Rerunning after applying tilt correction \n")
            #Create copy of original arguments
            import copy
            args2 = copy.copy(args)
            #Use aligned, tilt-corrected DEM as input src_fn for second round
            args2.src_fn = dst_fn2
            #Assume we've already corrected most of the tilt during first round (also prevents endless loop)
            args2.tiltcorr = False
            main2(args2)
    fig.tight_layout()
    fig.colorbar(im_list[0], ax=axa.ravel().tolist(), label=label, extend='both', shrink=0.5)
    if fn is not None:
        fig.savefig(fn, bbox_inches='tight', pad_inches=0, dpi=150)


! ./Users/elischwat/Documents/UW/pygeotools/pygeotools/ogr_merge.sh

#Input DEM filenames
dem2007_fn = 'rainier_dem_differencing/07_joined.tif'
dem2009_fn = 'rainier_dem_differencing/09_joined.tif'
dem_fn_list = [dem2007_fn,dem2009_fn]

#This will return warped, in-memory GDAL dataset objects
#Can also resample all inputs to a lower resolution (res=256)
ds_list = warplib.memwarp_multi_fn(dem_fn_list, extent='intersection', res='min')

#Load datasets to NumPy arrays
dem_2007, dem_2009 = [iolib.ds_getma(i) for i in ds_list]
dem_list = [dem_2007, dem_2009]
#dem_list = [iolib.ds_getma(i) for i in ds_list]

import matplotlib
matplotlib.pyplot.imshow(dem_2007)

matplotlib.pyplot.imshow(dem_2009)

titles = ['2007', '2009']
clim = malib.calcperc(dem_list[0], (2,98))
plot_panels(2, dem_list, clim, titles, 'inferno', 'Elevation (m WGS84)', fn='dem.png')
Пример #28
0
def main():
    parser = getparser()
    args = parser.parse_args()

    #This is output ndv, avoid using 0 for differences
    diffndv = -9999

    dem1_fn = args.fn1
    dem2_fn = args.fn2

    if dem1_fn == dem2_fn:
        sys.exit('Input filenames are identical')

    fn_list = [dem1_fn, dem2_fn]

    print("Warping DEMs to same res/extent/proj")
    #This will check input param for validity, could do beforehand
    dem1_ds, dem2_ds = warplib.memwarp_multi_fn(fn_list, extent=args.te, res=args.tr, t_srs=args.t_srs)

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.dirname(os.path.abspath(dem1_fn))

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = os.path.splitext(os.path.split(dem1_fn)[1])[0]+'_'+os.path.splitext(os.path.split(dem2_fn)[1])[0]

    print("Loading input DEMs into masked arrays")
    dem1 = iolib.ds_getma(dem1_ds, 1)
    dem2 = iolib.ds_getma(dem2_ds, 1)

    #Compute dz/dt rates if possible, in m/yr
    rates = True 
    if rates:
        #Extract basename
        #This was a hack to work with timestamp array filenames that have geoid offset applied
        adj = ''
        if '-adj' in dem1_fn:
            adj = '-adj' 
        dem1_fn_base = re.sub(adj, '', os.path.splitext(dem1_fn)[0]) 
        dem2_fn_base = re.sub(adj, '', os.path.splitext(dem2_fn)[0]) 

        #Attempt to load timestamp arrays (for mosaics) if present
        t1_fn = dem1_fn_base+'_ts.tif'
        t2_fn = dem2_fn_base+'_ts.tif'
        if os.path.exists(t1_fn) and os.path.exists(t2_fn):
            print("Preparing timestamp arrays")
            t1_ds, t2_ds = warplib.memwarp_multi_fn([t1_fn, t2_fn], extent=dem1_ds, res=dem1_ds)
            print("Loading timestamps into masked arrays")
            t1 = iolib.ds_getma(t1_ds)
            t2 = iolib.ds_getma(t2_ds)
            #Compute dt in days
            t_factor = t2 - t1
            t_factor /= 365.25
        else:
            #Attempt to extract timestamps from input filenames
            t1 = timelib.fn_getdatetime(dem1_fn)
            t2 = timelib.fn_getdatetime(dem2_fn)
            if t1 is not None and t2 is not None and t1 != t2:  
                dt = t2 - t1
                year = timedelta(days=365.25)
                t_factor = abs(dt.total_seconds()/year.total_seconds()) 
                print("Time differences is %s, dh/%0.3f" % (dt, t_factor))
            else:
                print("Unable to extract timestamps for input images")
                rates = False

    #Compute relative elevation difference with Eulerian approach 
    print("Computing eulerian elevation difference")
    diff_euler = dem2 - dem1

    #Check to make sure inputs actually intersect
    #if not np.any(~dem1.mask*~dem2.mask):
    if diff_euler.count() == 0:
        sys.exit("No valid overlap between input DEMs")

    if True:
        print("Eulerian elevation difference stats:")
        diff_euler_stats = malib.print_stats(diff_euler)
        diff_euler_med = diff_euler_stats[5]

    if True:
        print("Writing Eulerian elevation difference map")
        dst_fn = os.path.join(outdir, outprefix+'_dz_eul.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_euler, dst_fn, dem1_ds, ndv=diffndv)
        if rates:
            print("Writing Eulerian rate map")
            dst_fn = os.path.join(outdir, outprefix+'_dz_eul_rate.tif')
            print(dst_fn)
            iolib.writeGTiff(diff_euler/t_factor, dst_fn, dem1_ds, ndv=diffndv)

    if False:
        print("Writing Eulerian relative elevation difference map")
        diff_euler_rel = diff_euler - diff_euler_med
        dst_fn = os.path.join(outdir, outprefix+'_dz_eul_rel.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_euler_rel, dst_fn, dem1_ds, ndv=diffndv)

    if False:
        print("Writing out DEM2 with median elevation difference removed")
        dst_fn = os.path.splitext(dem2_fn)[0]+'_med'+diff_euler_med+'.tif'
        print(dst_fn)
        iolib.writeGTiff(dem2 - diff_euler_med, dst_fn, dem1_ds, ndv=diffndv)

    if False:
        print("Writing Eulerian elevation difference percentage map")
        diff_euler_perc = 100.0*diff_euler/dem1
        dst_fn = os.path.join(outdir, outprefix+'_dz_eul_perc.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_euler_perc, dst_fn, dem1_ds, ndv=diffndv)
Пример #29
0
def ols(x, **kwargs):
    fit = sm.OLS(
        y,
        x,
        missing='drop',
    ).fit()
    return fit.intercept


if __name__ == '__main__':
    files = RasterSource(
        '/Volumes/warehouse/projects/UofU/geohackweek/galcier_data/khumbu_DEM_32m/'
    ).file_list()

    raster_list = warplib.memwarp_multi_fn(files,
                                           extent='union',
                                           res='min',
                                           t_srs=files[0])

    band_data = [iolib.ds_getma(i).filled(np.NaN) for i in raster_list]

    x, y = geolib.get_xy_grids(raster_list[0])
    time_list = np.array([timelib.fn_getdatetime(fn) for fn in files])

    x_band_data = xr.DataArray(np.stack(band_data),
                               coords={
                                   'lat': y[::, 0],
                                   'lon': x[0, ::],
                                   'time': time_list
                               },
                               dims=('time', 'lat', 'lon'),
                               name='elevation')
Пример #30
0
from pygeotools.lib import iolib, warplib, malib
fn1 = 'D:\\shapedata\\result\\2016-spectral16d366ae8.tif'
fn2 = 'D:\\shapedata\\result\\2016-spectral16d366ae8.tif'
ds_list = warplib.memwarp_multi_fn([fn1, fn2],
                                   res='max',
                                   extent='intersection',
                                   t_srs='first',
                                   r='cubic')
r1 = iolib.ds_getma(ds_list[0])
r2 = iolib.ds_getma(ds_list[1])
rdiff = r1 - r2
malib.print_stats(rdiff)
out_fn = 'D:\\shapedata\\result\\2016-3456564344343434.tif'
iolib.writeGTiff(rdiff, out_fn, ds_list[0])