Пример #1
0
def get_tri_mask(dem_ds, min_tri):
    print("Applying TRI filter (masking smooth values < %0.4f)" % min_tri)
    dem = iolib.ds_getma(dem_ds)
    tri = geolib.gdaldem_mem_ds(dem_ds, 'TRI', returnma=True)
    tri_mask = np.ma.masked_less(tri, min_tri)
    #This should be 1 for valid surfaces, nan for removed surfaces
    tri_mask = ~(np.ma.getmaskarray(tri_mask))
    return tri_mask
Пример #2
0
def get_hi_slope_mask(dem_ds, max_slope):
    print("\nApplying DEM slope filter (masking values > %0.1f)" % max_slope)
    #dem = iolib.ds_getma(dem_ds)
    slope = geolib.gdaldem_mem_ds(dem_ds, 'slope', returnma=True)
    hi_slope_mask = np.ma.masked_greater(slope, max_slope)
    #This should be 1 for valid surfaces, nan for removed surfaces
    hi_slope_mask = ~(np.ma.getmaskarray(hi_slope_mask))
    return hi_slope_mask
Пример #3
0
def get_rough_mask(dem_ds, max_rough):
    print("Applying DEM roughness filter (masking values > %0.4f)" % max_rough)
    dem = iolib.ds_getma(dem_ds)
    rough = geolib.gdaldem_mem_ds(dem_ds, 'Roughness', returnma=True)
    rough_mask = np.ma.masked_greater(rough, max_rough)
    #This should be 1 for valid surfaces, nan for removed surfaces
    rough_mask = ~(np.ma.getmaskarray(rough_mask))
    return rough_mask
Пример #4
0
def slope_fltr_ds(dem_ds, slopelim=(0, 40)):
    print("Slope filter: %0.2f - %0.2f" % slope_lim)
    from pygeotools.lib import geolib
    dem = iolib.ds_getma(dem_ds)
    dem_slope = geolib.gdaldem_mem_ds(dem_ds, processing='slope', returnma=True, computeEdges=True)
    print("Initial count: %i" % dem_slope.count())
    dem_slope = range_fltr(dem_slope, slopelim)
    print("Final count: %i" % dem_slope.count())
    return np.ma.array(dem, mask=np.ma.getmaskarray(dem_slope))
Пример #5
0
def get_rough_mask(dem_ds, max_rough):
    # Roughness is the largest inter-cell difference of a central pixel and its surrounding cell, as defined in Wilson et al (2007, Marine Geodesy 30:3-35).
    print("\nApplying DEM roughness filter (masking values > %0.4f)" % max_rough)
    #dem = iolib.ds_getma(dem_ds)
    rough = geolib.gdaldem_mem_ds(dem_ds, 'Roughness', returnma=True)
    rough_mask = np.ma.masked_greater(rough, max_rough)
    #This should be 1 for valid surfaces, nan for removed surfaces
    rough_mask = ~(np.ma.getmaskarray(rough_mask))
    return rough_mask
Пример #6
0
def get_lo_tri_mask(dem_ds, min_tri):
    # TRI is the mean difference between a central pixel and its surrounding cells (see Wilson et al 2007, Marine Geodesy 30:3-35).
    print("\nApplying TRI filter (masking low TRI values < %0.4f)" % min_tri)
    #dem = iolib.ds_getma(dem_ds)
    tri = geolib.gdaldem_mem_ds(dem_ds, 'TRI', returnma=True)
    lo_tri_mask = np.ma.masked_less(tri, min_tri)
    #This should be 1 for valid surfaces, nan for removed surfaces
    lo_tri_mask = ~(np.ma.getmaskarray(lo_tri_mask))
    return lo_tri_mask
Пример #7
0
def get_filtered_slope(ds, slope_lim=(0.1, 40)):
    #Generate slope map
    print("Computing slope")
    slope = geolib.gdaldem_mem_ds(ds, processing='slope', returnma=True, computeEdges=False)
    #slope_stats = malib.print_stats(slope)
    print("Slope filter: %0.2f - %0.2f" % slope_lim)
    print("Initial count: %i" % slope.count()) 
    slope = filtlib.range_fltr(slope, slope_lim) 
    print(slope.count())
    return slope
Пример #8
0
def iv(a, ax=None, clim=None, clim_perc=(2,98), cmap='cpt_rainbow', label=None, title=None, \
        ds=None, res=None, hillshade=False, scalebar=True):
    """
    Quick image viewer with standardized display settings
    """
    if ax is None:
        #ax = plt.subplot()
        f, ax = plt.subplots()
    ax.set_aspect('equal')
    if clim is None:
        clim = get_clim(a, clim_perc)
    cm = cmap_setndv(cmap, cmap)
    alpha = 1.0
    if hillshade:
        if ds is not None:
            hs = geolib.gdaldem_mem_ds(ds,
                                       processing='hillshade',
                                       computeEdges=True,
                                       returnma=True)
            b_cm = cmap_setndv('gray', cmap)
            #Set the overlay bad values to completely transparent, otherwise darkens the bg
            cm.set_bad(alpha=0)
            bg_clim_perc = (2, 98)
            bg_clim = get_clim(hs, bg_clim_perc)
            #bg_clim = (1, 255)
            bgplot = ax.imshow(hs, cmap=b_cm, clim=bg_clim)
            alpha = 0.5
    if scalebar:
        if ds is not None:
            #Get resolution at center of dataset
            ccoord = geolib.get_center(ds, t_srs=geolib.wgs_srs)
            #Compute resolution in local cartesian coordinates at center
            c_srs = geolib.localortho(*ccoord)
            res = geolib.get_res(ds, c_srs)[0]
        if res is not None:
            sb_loc = best_scalebar_location(a)
            add_scalebar(ax, res, location=sb_loc)
    imgplot = ax.imshow(a, cmap=cm, clim=clim, alpha=alpha, **imshow_kwargs)
    cbar_kwargs['extend'] = get_cbar_extend(a, clim=clim)
    cbar_kwargs['format'] = get_cbar_format(a)
    cbar = add_cbar(ax, imgplot, label=label)
    hide_ticks(ax)
    if title is not None:
        ax.set_title(title)
    plt.tight_layout()
    return ax
Пример #9
0
def get_raster_idx(x_vect, y_vect, pt_srs, ras_ds, max_slope=20):
    """Get raster index corresponding to the set of X,Y locations
    """
    #Convert input xy coordinates to raster coordinates
    mX_fltr, mY_fltr, mZ = geolib.cT_helper(x_vect, y_vect, 0, pt_srs, geolib.get_ds_srs(ras_ds))
    pX_fltr, pY_fltr = geolib.mapToPixel(mX_fltr, mY_fltr, ras_ds.GetGeoTransform())
    pX_fltr = np.atleast_1d(pX_fltr)
    pY_fltr = np.atleast_1d(pY_fltr)

    #Sample raster
    #This returns median and mad for ICESat footprint
    samp = geolib.sample(ras_ds, mX_fltr, mY_fltr, pad=pad)
    samp_idx = ~(np.ma.getmaskarray(samp[:,0]))
    npts = samp_idx.nonzero()[0].size

    if False:
        print("Applying slope filter, masking points with slope > %0.1f" % max_slope)
        slope_ds = geolib.gdaldem_mem_ds(ras_ds, processing='slope', returnma=False)
        slope_samp = geolib.sample(slope_ds, mX_fltr, mY_fltr, pad=pad)
        slope_samp_idx = (slope_samp[:,0] <= max_slope).data
        samp_idx = np.logical_and(slope_samp_idx, samp_idx)

    return samp, samp_idx, npts, pX_fltr, pY_fltr
Пример #10
0
def compute_offset(ref_dem_ds, src_dem_ds, src_dem_fn, mode='nuth', remove_outliers=True, max_offset=100, \
        max_dz=100, slope_lim=(0.1, 40), mask_list=['glaciers',], plot=True):
    #Make sure the input datasets have the same resolution/extent
    #Use projection of source DEM
    ref_dem_clip_ds, src_dem_clip_ds = warplib.memwarp_multi([ref_dem_ds, src_dem_ds], \
            res='max', extent='intersection', t_srs=src_dem_ds, r='cubic')

    #Compute size of NCC and SAD search window in pixels
    res = float(geolib.get_res(ref_dem_clip_ds, square=True)[0])
    max_offset_px = (max_offset/res) + 1
    #print(max_offset_px)
    pad = (int(max_offset_px), int(max_offset_px))

    #This will be updated geotransform for src_dem
    src_dem_gt = np.array(src_dem_clip_ds.GetGeoTransform())

    #Load the arrays
    ref_dem = iolib.ds_getma(ref_dem_clip_ds, 1)
    src_dem = iolib.ds_getma(src_dem_clip_ds, 1)

    print("Elevation difference stats for uncorrected input DEMs (src - ref)")
    diff = src_dem - ref_dem

    static_mask = get_mask(src_dem_clip_ds, mask_list, src_dem_fn)
    diff = np.ma.array(diff, mask=static_mask)

    if diff.count() == 0:
        sys.exit("No overlapping, unmasked pixels shared between input DEMs")

    if remove_outliers:
        diff = outlier_filter(diff, f=3, max_dz=max_dz)

    #Want to use higher quality DEM, should determine automatically from original res/count
    #slope = get_filtered_slope(ref_dem_clip_ds, slope_lim=slope_lim)
    slope = get_filtered_slope(src_dem_clip_ds, slope_lim=slope_lim)

    print("Computing aspect")
    #aspect = geolib.gdaldem_mem_ds(ref_dem_clip_ds, processing='aspect', returnma=True, computeEdges=False)
    aspect = geolib.gdaldem_mem_ds(src_dem_clip_ds, processing='aspect', returnma=True, computeEdges=False)

    ref_dem_clip_ds = None
    src_dem_clip_ds = None

    #Apply slope filter to diff
    #Note that we combine masks from diff and slope in coreglib
    diff = np.ma.array(diff, mask=np.ma.getmaskarray(slope))

    #Get final mask after filtering
    static_mask = np.ma.getmaskarray(diff)

    #Compute stats for new masked difference map
    print("Filtered difference map")
    diff_stats = malib.print_stats(diff)
    dz = diff_stats[5]

    print("Computing sub-pixel offset between DEMs using mode: %s" % mode)

    #By default, don't create output figure
    fig = None

    #Default horizntal shift is (0,0)
    dx = 0
    dy = 0

    #Sum of absolute differences
    if mode == "sad":
        ref_dem = np.ma.array(ref_dem, mask=static_mask)
        src_dem = np.ma.array(src_dem, mask=static_mask)
        m, int_offset, sp_offset = coreglib.compute_offset_sad(ref_dem, src_dem, pad=pad)
        #Geotransform has negative y resolution, so don't need negative sign
        #np array is positive down
        #GDAL coordinates are positive up
        dx = sp_offset[1]*src_dem_gt[1]
        dy = sp_offset[0]*src_dem_gt[5]
    #Normalized cross-correlation of clipped, overlapping areas
    elif mode == "ncc":
        ref_dem = np.ma.array(ref_dem, mask=static_mask)
        src_dem = np.ma.array(src_dem, mask=static_mask)
        m, int_offset, sp_offset, fig = coreglib.compute_offset_ncc(ref_dem, src_dem, \
                pad=pad, prefilter=False, plot=plot)
        dx = sp_offset[1]*src_dem_gt[1]
        dy = sp_offset[0]*src_dem_gt[5]
    #Nuth and Kaab (2011)
    elif mode == "nuth":
        #Compute relationship between elevation difference, slope and aspect
        fit_param, fig = coreglib.compute_offset_nuth(diff, slope, aspect, plot=plot)
        if fit_param is None:
            print("Failed to calculate horizontal shift")
        else:
            #fit_param[0] is magnitude of shift vector
            #fit_param[1] is direction of shift vector
            #fit_param[2] is mean bias divided by tangent of mean slope
            #print(fit_param)
            dx = fit_param[0]*np.sin(np.deg2rad(fit_param[1]))
            dy = fit_param[0]*np.cos(np.deg2rad(fit_param[1]))
            med_slope = malib.fast_median(slope)
            nuth_dz = fit_param[2]*np.tan(np.deg2rad(med_slope))
            print('Median dz: %0.2f\nNuth dz: %0.2f' % (dz, nuth_dz))
            #dz = nuth_dz
    elif mode == "all":
        print("Not yet implemented")
        #Want to compare all methods, average offsets
        #m, int_offset, sp_offset = coreglib.compute_offset_sad(ref_dem, src_dem)
        #m, int_offset, sp_offset = coreglib.compute_offset_ncc(ref_dem, src_dem)
    elif mode == "none":
        print("Skipping alignment, writing out DEM with median bias over static surfaces removed")
        dst_fn = outprefix+'_med%0.1f.tif' % dz
        iolib.writeGTiff(src_dem_orig + dz, dst_fn, src_dem_ds)
        sys.exit()
    #Note: minus signs here since we are computing dz=(src-ref), but adjusting src
    return -dx, -dy, -dz, static_mask, fig
Пример #11
0
def main(argv=None):
    parser = getparser()
    args = parser.parse_args()

    #Should check that files exist
    ref_dem_fn = args.ref_fn
    src_dem_fn = args.src_fn

    mode = args.mode
    mask_list = args.mask_list
    max_offset = args.max_offset
    max_dz = args.max_dz
    slope_lim = tuple(args.slope_lim)
    tiltcorr = args.tiltcorr
    polyorder = args.polyorder
    res = args.res

    #Maximum number of iterations
    max_iter = args.max_iter

    #These are tolerances (in meters) to stop iteration
    tol = args.tol
    min_dx = tol
    min_dy = tol
    min_dz = tol

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.splitext(src_dem_fn)[0] + '_dem_align'

    if tiltcorr:
        outdir += '_tiltcorr'
        tiltcorr_done = False
        #Relax tolerance for initial round of co-registration
        #tiltcorr_tol = 0.1
        #if tol < tiltcorr_tol:
        #    tol = tiltcorr_tol

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = '%s_%s' % (os.path.splitext(os.path.split(src_dem_fn)[-1])[0], \
            os.path.splitext(os.path.split(ref_dem_fn)[-1])[0])
    outprefix = os.path.join(outdir, outprefix)

    print("\nReference: %s" % ref_dem_fn)
    print("Source: %s" % src_dem_fn)
    print("Mode: %s" % mode)
    print("Output: %s\n" % outprefix)

    src_dem_ds = gdal.Open(src_dem_fn)
    ref_dem_ds = gdal.Open(ref_dem_fn)

    #Get local cartesian coordinate system
    #local_srs = geolib.localtmerc_ds(src_dem_ds)
    #Use original source dataset coordinate system
    #Potentially issues with distortion and xyz/tiltcorr offsets for DEM with large extent
    local_srs = geolib.get_ds_srs(src_dem_ds)
    #local_srs = geolib.get_ds_srs(ref_dem_ds)

    #Resample to common grid
    ref_dem_res = float(geolib.get_res(ref_dem_ds, t_srs=local_srs, square=True)[0])
    #Create a copy to be updated in place
    src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
    src_dem_res = float(geolib.get_res(src_dem_ds, t_srs=local_srs, square=True)[0])
    src_dem_ds = None
    #Resample to user-specified resolution
    ref_dem_ds, src_dem_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
            extent='intersection', res=args.res, t_srs=local_srs, r='cubic')

    res = float(geolib.get_res(src_dem_ds_align, square=True)[0])
    print("\nReference DEM res: %0.2f" % ref_dem_res)
    print("Source DEM res: %0.2f" % src_dem_res)
    print("Resolution for coreg: %s (%0.2f m)\n" % (args.res, res))

    #Iteration number
    n = 1
    #Cumulative offsets
    dx_total = 0
    dy_total = 0
    dz_total = 0

    #Now iteratively update geotransform and vertical shift
    while True:
        print("*** Iteration %i ***" % n)
        dx, dy, dz, static_mask, fig = compute_offset(ref_dem_ds, src_dem_ds_align, src_dem_fn, mode, max_offset, \
                mask_list=mask_list, max_dz=max_dz, slope_lim=slope_lim, plot=True)
        xyz_shift_str_iter = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx, dy, dz)
        print("Incremental offset: %s" % xyz_shift_str_iter)

        dx_total += dx
        dy_total += dy
        dz_total += dz

        xyz_shift_str_cum = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx_total, dy_total, dz_total)
        print("Cumulative offset: %s" % xyz_shift_str_cum)
        #String to append to output filenames
        xyz_shift_str_cum_fn = '_%s_x%+0.2f_y%+0.2f_z%+0.2f' % (mode, dx_total, dy_total, dz_total)

        #Should make an animation of this converging
        if n == 1: 
            #static_mask_orig = static_mask
            if fig is not None:
                dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
                print("Writing offset plot: %s" % dst_fn)
                fig.gca().set_title("Incremental: %s\nCumulative: %s" % (xyz_shift_str_iter, xyz_shift_str_cum))
                fig.savefig(dst_fn, dpi=300)

        #Apply the horizontal shift to the original dataset
        src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx, dy, createcopy=False)
        #Should 
        src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz, createcopy=False)

        n += 1
        print("\n")
        #If magnitude of shift in all directions is less than tol
        #if n > max_iter or (abs(dx) <= min_dx and abs(dy) <= min_dy and abs(dz) <= min_dz):
        #If magnitude of shift is less than tol
        dm = np.sqrt(dx**2 + dy**2 + dz**2)
        dm_total = np.sqrt(dx_total**2 + dy_total**2 + dz_total**2)

        if dm_total > max_offset:
            sys.exit("Total offset exceeded specified max_offset (%0.2f m). Consider increasing -max_offset argument" % max_offset)

        #Stop iteration
        if n > max_iter or dm < tol:

            if fig is not None:
                dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
                print("Writing offset plot: %s" % dst_fn)
                fig.gca().set_title("Incremental:%s\nCumulative:%s" % (xyz_shift_str_iter, xyz_shift_str_cum))
                fig.savefig(dst_fn, dpi=300)

            #Compute final elevation difference
            if True:
                ref_dem_clip_ds_align, src_dem_clip_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
                        res=res, extent='intersection', t_srs=local_srs, r='cubic')
                ref_dem_align = iolib.ds_getma(ref_dem_clip_ds_align, 1)
                src_dem_align = iolib.ds_getma(src_dem_clip_ds_align, 1)
                ref_dem_clip_ds_align = None

                diff_align = src_dem_align - ref_dem_align
                src_dem_align = None
                ref_dem_align = None

                #Get updated, final mask
                static_mask_final = get_mask(src_dem_clip_ds_align, mask_list, src_dem_fn)
                static_mask_final = np.logical_or(np.ma.getmaskarray(diff_align), static_mask_final)
                
                #Final stats, before outlier removal
                diff_align_compressed = diff_align[~static_mask_final]
                diff_align_stats = malib.get_stats_dict(diff_align_compressed, full=True)

                #Prepare filtered version for tiltcorr fit
                diff_align_filt = np.ma.array(diff_align, mask=static_mask_final)
                diff_align_filt = outlier_filter(diff_align_filt, f=3, max_dz=max_dz)
                #diff_align_filt = outlier_filter(diff_align_filt, perc=(12.5, 87.5), max_dz=max_dz)
                slope = get_filtered_slope(src_dem_clip_ds_align)
                diff_align_filt = np.ma.array(diff_align_filt, mask=np.ma.getmaskarray(slope))
                diff_align_filt_stats = malib.get_stats_dict(diff_align_filt, full=True)

            #Fit 2D polynomial to residuals and remove
            #To do: add support for along-track and cross-track artifacts
            if tiltcorr and not tiltcorr_done:
                print("\n************")
                print("Calculating 'tiltcorr' 2D polynomial fit to residuals with order %i" % polyorder)
                print("************\n")
                gt = src_dem_clip_ds_align.GetGeoTransform()

                #Need to apply the mask here, so we're only fitting over static surfaces
                #Note that the origmask=False will compute vals for all x and y indices, which is what we want
                vals, resid, coeff = geolib.ma_fitpoly(diff_align_filt, order=polyorder, gt=gt, perc=(0,100), origmask=False)
                #vals, resid, coeff = geolib.ma_fitplane(diff_align_filt, gt, perc=(12.5, 87.5), origmask=False)

                #Should write out coeff or grid with correction 

                vals_stats = malib.get_stats_dict(vals)

                #Want to have max_tilt check here
                #max_tilt = 4.0 #m
                #Should do percentage
                #vals.ptp() > max_tilt

                #Note: dimensions of ds and vals will be different as vals are computed for clipped intersection
                #Need to recompute planar offset for full src_dem_ds_align extent and apply
                xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
                valgrid = geolib.polyval2d(xgrid, ygrid, coeff) 
                #For results of ma_fitplane
                #valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
                src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)

                if True:
                    print("Creating plot of polynomial fit to residuals")
                    fig, axa = plt.subplots(1,2, figsize=(8, 4))
                    dz_clim = malib.calcperc_sym(vals, (2, 98))
                    ax = pltlib.iv(diff_align_filt, ax=axa[0], cmap='RdBu', clim=dz_clim, \
                            label='Residual dz (m)', scalebar=False)
                    ax = pltlib.iv(valgrid, ax=axa[1], cmap='RdBu', clim=dz_clim, \
                            label='Polyfit dz (m)', ds=src_dem_ds_align)
                    #if tiltcorr:
                        #xyz_shift_str_cum_fn += "_tiltcorr"
                    tiltcorr_fig_fn = outprefix + '%s_polyfit.png' % xyz_shift_str_cum_fn
                    print("Writing out figure: %s\n" % tiltcorr_fig_fn)
                    fig.savefig(tiltcorr_fig_fn, dpi=300)

                print("Applying tilt correction to difference map")
                diff_align -= vals

                #Should iterate until tilts are below some threshold
                #For now, only do one tiltcorr
                tiltcorr_done=True
                #Now use original tolerance, and number of iterations 
                tol = args.tol
                max_iter = n + args.max_iter
            else:
                break

    if True:
        #Write out aligned difference map for clipped extent with vertial offset removed
        align_diff_fn = outprefix + '%s_align_diff.tif' % xyz_shift_str_cum_fn
        print("Writing out aligned difference map with median vertical offset removed")
        iolib.writeGTiff(diff_align, align_diff_fn, src_dem_clip_ds_align)

    if True:
        #Write out fitered aligned difference map
        align_diff_filt_fn = outprefix + '%s_align_diff_filt.tif' % xyz_shift_str_cum_fn
        print("Writing out filtered aligned difference map with median vertical offset removed")
        iolib.writeGTiff(diff_align_filt, align_diff_filt_fn, src_dem_clip_ds_align)

    #Extract final center coordinates for intersection
    center_coord_ll = geolib.get_center(src_dem_clip_ds_align, t_srs=geolib.wgs_srs)
    center_coord_xy = geolib.get_center(src_dem_clip_ds_align)
    src_dem_clip_ds_align = None

    #Write out final aligned src_dem 
    align_fn = outprefix + '%s_align.tif' % xyz_shift_str_cum_fn
    print("Writing out shifted src_dem with median vertical offset removed: %s" % align_fn)
    #Open original uncorrected dataset at native resolution
    src_dem_ds = gdal.Open(src_dem_fn)
    src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
    #Apply final horizontal and vertial shift to the original dataset
    #Note: potentially issues if we used a different projection during coregistration!
    src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx_total, dy_total, createcopy=False)
    src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz_total, createcopy=False)
    if tiltcorr:
        xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
        valgrid = geolib.polyval2d(xgrid, ygrid, coeff) 
        #For results of ma_fitplane
        #valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
        src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)
    #Might be cleaner way to write out MEM ds directly to disk
    src_dem_full_align = iolib.ds_getma(src_dem_ds_align)
    iolib.writeGTiff(src_dem_full_align, align_fn, src_dem_ds_align)

    if True:
        #Output final aligned src_dem, masked so only best pixels are preserved
        #Useful if creating a new reference product
        #Can also use apply_mask.py 
        print("Applying filter to shiftec src_dem")
        align_diff_filt_full_ds = warplib.memwarp_multi_fn([align_diff_filt_fn,], res=src_dem_ds_align, extent=src_dem_ds_align, \
                t_srs=src_dem_ds_align)[0]
        align_diff_filt_full = iolib.ds_getma(align_diff_filt_full_ds)
        align_diff_filt_full_ds = None
        align_fn_masked = outprefix + '%s_align_filt.tif' % xyz_shift_str_cum_fn
        iolib.writeGTiff(np.ma.array(src_dem_full_align, mask=np.ma.getmaskarray(align_diff_filt_full)), \
                align_fn_masked, src_dem_ds_align)

    src_dem_full_align = None
    src_dem_ds_align = None

    #Compute original elevation difference
    if True:
        ref_dem_clip_ds, src_dem_clip_ds = warplib.memwarp_multi([ref_dem_ds, src_dem_ds], \
                res=res, extent='intersection', t_srs=local_srs, r='cubic')
        src_dem_ds = None
        ref_dem_ds = None
        ref_dem_orig = iolib.ds_getma(ref_dem_clip_ds)
        src_dem_orig = iolib.ds_getma(src_dem_clip_ds)
        #Needed for plotting
        ref_dem_hs = geolib.gdaldem_mem_ds(ref_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
        src_dem_hs = geolib.gdaldem_mem_ds(src_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
        diff_orig = src_dem_orig - ref_dem_orig
        #Only compute stats over valid surfaces
        static_mask_orig = get_mask(src_dem_clip_ds, mask_list, src_dem_fn)
        #Note: this doesn't include outlier removal or slope mask!
        static_mask_orig = np.logical_or(np.ma.getmaskarray(diff_orig), static_mask_orig)
        #For some reason, ASTER DEM diff have a spike near the 0 bin, could be an issue with masking?
        diff_orig_compressed = diff_orig[~static_mask_orig]
        diff_orig_stats = malib.get_stats_dict(diff_orig_compressed, full=True)

        #Prepare filtered version for comparison 
        diff_orig_filt = np.ma.array(diff_orig, mask=static_mask_orig)
        diff_orig_filt = outlier_filter(diff_orig_filt, f=3, max_dz=max_dz)
        #diff_orig_filt = outlier_filter(diff_orig_filt, perc=(12.5, 87.5), max_dz=max_dz)
        slope = get_filtered_slope(src_dem_clip_ds)
        diff_orig_filt = np.ma.array(diff_orig_filt, mask=np.ma.getmaskarray(slope))
        diff_orig_filt_stats = malib.get_stats_dict(diff_orig_filt, full=True)

        #Write out original difference map
        print("Writing out original difference map for common intersection before alignment")
        orig_diff_fn = outprefix + '_orig_diff.tif'
        iolib.writeGTiff(diff_orig, orig_diff_fn, ref_dem_clip_ds)
        src_dem_clip_ds = None
        ref_dem_clip_ds = None

    if True:
        align_stats_fn = outprefix + '%s_align_stats.json' % xyz_shift_str_cum_fn
        align_stats = {}
        align_stats['src_fn'] = src_dem_fn 
        align_stats['ref_fn'] = ref_dem_fn 
        align_stats['align_fn'] = align_fn 
        align_stats['res'] = {} 
        align_stats['res']['src'] = src_dem_res
        align_stats['res']['ref'] = ref_dem_res
        align_stats['res']['coreg'] = res
        align_stats['center_coord'] = {'lon':center_coord_ll[0], 'lat':center_coord_ll[1], \
                'x':center_coord_xy[0], 'y':center_coord_xy[1]}
        align_stats['shift'] = {'dx':dx_total, 'dy':dy_total, 'dz':dz_total, 'dm':dm_total}
        #This tiltcorr flag gets set to false, need better flag
        if tiltcorr:
            align_stats['tiltcorr'] = {}
            align_stats['tiltcorr']['coeff'] = coeff.tolist()
            align_stats['tiltcorr']['val_stats'] = vals_stats
        align_stats['before'] = diff_orig_stats
        align_stats['before_filt'] = diff_orig_filt_stats
        align_stats['after'] = diff_align_stats
        align_stats['after_filt'] = diff_align_filt_stats
        
        import json
        with open(align_stats_fn, 'w') as f:
            json.dump(align_stats, f)

    #Create output plot
    if True:
        print("Creating final plot")
        kwargs = {'interpolation':'none'}
        #f, axa = plt.subplots(2, 4, figsize=(11, 8.5))
        f, axa = plt.subplots(2, 4, figsize=(16, 8))
        for ax in axa.ravel()[:-1]:
            ax.set_facecolor('k')
            pltlib.hide_ticks(ax)
        dem_clim = malib.calcperc(ref_dem_orig, (2,98))
        axa[0,0].imshow(ref_dem_hs, cmap='gray', **kwargs)
        im = axa[0,0].imshow(ref_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
        pltlib.add_cbar(axa[0,0], im, arr=ref_dem_orig, clim=dem_clim, label=None)
        pltlib.add_scalebar(axa[0,0], res=res)
        axa[0,0].set_title('Reference DEM')
        axa[0,1].imshow(src_dem_hs, cmap='gray', **kwargs)
        im = axa[0,1].imshow(src_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
        pltlib.add_cbar(axa[0,1], im, arr=src_dem_orig, clim=dem_clim, label=None)
        axa[0,1].set_title('Source DEM')
        #axa[0,2].imshow(~static_mask_orig, clim=(0,1), cmap='gray')
        axa[0,2].imshow(~static_mask, clim=(0,1), cmap='gray', **kwargs)
        axa[0,2].set_title('Surfaces for co-registration')
        dz_clim = malib.calcperc_sym(diff_orig_compressed, (5, 95))
        im = axa[1,0].imshow(diff_orig, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1,0], im, arr=diff_orig, clim=dz_clim, label=None)
        axa[1,0].set_title('Elev. Diff. Before (m)')
        im = axa[1,1].imshow(diff_align, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1,1], im, arr=diff_align, clim=dz_clim, label=None)
        axa[1,1].set_title('Elev. Diff. After (m)')

        #tight_dz_clim = (-1.0, 1.0)
        tight_dz_clim = (-2.0, 2.0)
        #tight_dz_clim = (-10.0, 10.0)
        #tight_dz_clim = malib.calcperc_sym(diff_align_filt, (5, 95))
        im = axa[1,2].imshow(diff_align_filt, cmap='RdBu', clim=tight_dz_clim)
        pltlib.add_cbar(axa[1,2], im, arr=diff_align_filt, clim=tight_dz_clim, label=None)
        axa[1,2].set_title('Elev. Diff. After (m)')

        #Tried to insert Nuth fig here
        #ax_nuth.change_geometry(1,2,1)
        #f.axes.append(ax_nuth)

        bins = np.linspace(dz_clim[0], dz_clim[1], 128)
        axa[1,3].hist(diff_orig_compressed, bins, color='g', label='Before', alpha=0.5)
        axa[1,3].hist(diff_align_compressed, bins, color='b', label='After', alpha=0.5)
        axa[1,3].set_xlim(*dz_clim)
        axa[1,3].axvline(0, color='k', linewidth=0.5, linestyle=':')
        axa[1,3].set_xlabel('Elev. Diff. (m)')
        axa[1,3].set_ylabel('Count (px)')
        axa[1,3].set_title("Source - Reference")
        before_str = 'Before\nmed: %0.2f\nnmad: %0.2f' % (diff_orig_stats['med'], diff_orig_stats['nmad'])
        axa[1,3].text(0.05, 0.95, before_str, va='top', color='g', transform=axa[1,3].transAxes, fontsize=8)
        after_str = 'After\nmed: %0.2f\nnmad: %0.2f' % (diff_align_stats['med'], diff_align_stats['nmad'])
        axa[1,3].text(0.65, 0.95, after_str, va='top', color='b', transform=axa[1,3].transAxes, fontsize=8)

        #This is empty
        axa[0,3].axis('off')

        suptitle = '%s\nx: %+0.2fm, y: %+0.2fm, z: %+0.2fm' % (os.path.split(outprefix)[-1], dx_total, dy_total, dz_total)
        f.suptitle(suptitle)
        f.tight_layout()
        plt.subplots_adjust(top=0.90)

        fig_fn = outprefix + '%s_align.png' % xyz_shift_str_cum_fn
        print("Writing out figure: %s" % fig_fn)
        f.savefig(fig_fn, dpi=300)
Пример #12
0
    print("Loading input DEM and Snow depth into masked arrays")
    dem1 = iolib.ds_getma(dem1_ds, 1)
    dz = iolib.ds_getma(dz_ds, 1)
    #Try to pull out second timestamp from dz_fn
    dem2_ts = timelib.fn_getdatetime_list(dz_fn)[-1]
    outprefix = os.path.splitext(os.path.split(dz_fn)[1])[0]

outprefix = os.path.join(args.outdir, outprefix)

#Calculate water year
wy = dem1_ts.year + 1
if dem1_ts.month >= 10:
    wy = dem1_ts.year

#These need to be updated in geolib to use gdaldem API
hs = geolib.gdaldem_mem_ds(dem1_ds, processing='hillshade', returnma=True)
hs_clim = (1,255)

dem_clim = malib.calcperc(dem1, (1,99))
res = geolib.get_res(dem1_ds)[0]

if args.density is None:
    #Attempt to extract from nearby SNOTEL sites for dem_ts
    #Attempt to use model
    #Last resort, use constant value
    rho_s = 0.5
    #rho_s = 0.4
    #rho_s = 0.36

#Convert snow depth to swe
swe = dz * rho_s
Пример #13
0
def mb_calc(gf, z1_date=z1_date, z2_date=z2_date, verbose=verbose):
    #print("\n%i of %i: %s\n" % (n+1, len(glacfeat_list), gf.feat_fn))
    print(gf.feat_fn)

    #This should already be handled by earlier attribute filter, but RGI area could be wrong
    #24k shp has area in m^2, RGI in km^2
    #if gf.glac_area/1E6 < min_glac_area:
    if gf.glac_area < min_glac_area:
        if verbose:
            print("Glacier area below %0.1f km2 threshold" % min_glac_area)
        return None

    #Warp everything to common res/extent/proj
    ds_list = warplib.memwarp_multi_fn([z1_fn, z2_fn], res='min', \
            extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose)

    if site == 'conus':
        #Add prism datasets
        prism_fn_list = [prism_ppt_annual_fn, prism_tmean_annual_fn]
        prism_fn_list.extend([
            prism_ppt_summer_fn, prism_ppt_winter_fn, prism_tmean_summer_fn,
            prism_tmean_winter_fn
        ])
        ds_list.extend(warplib.memwarp_multi_fn(prism_fn_list, res=ds_list[0], \
                extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose))

    if site == 'hma':
        #Add debris cover datasets
        #Should tar this up, and extract only necessary file
        #Downloaded from: http://mountainhydrology.org/data-nature-2017/
        kra_nature_dir = '/nobackup/deshean/data/Kraaijenbrink_hma/regions/out'
        #This assumes that numbers are identical between RGI50 and RGI60
        debris_class_fn = os.path.join(
            kra_nature_dir, 'RGI50-%s/classification.tif' % gf.glacnum)
        debris_thick_fn = os.path.join(
            kra_nature_dir, 'RGI50-%s/debris-thickness-50cm.tif' % gf.glacnum)
        ice_thick_fn = os.path.join(kra_nature_dir,
                                    'RGI50-%s/ice-thickness.tif' % gf.glacnum)
        hma_fn_list = []
        if os.path.exists(debris_class_fn):
            hma_fn_list.append(debris_class_fn)
        if os.path.exists(debris_thick_fn):
            hma_fn_list.append(debris_thick_fn)
        if os.path.exists(ice_thick_fn):
            hma_fn_list.append(ice_thick_fn)
        if len(hma_fn_list) > 0:
            #Add velocity
            hma_fn_list.extend([vx_fn, vy_fn])
            ds_list.extend(warplib.memwarp_multi_fn(hma_fn_list, res=ds_list[0], \
                    extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose))

    #Check to see if z2 is empty, as z1 should be continuous
    gf.z2 = iolib.ds_getma(ds_list[1])
    if gf.z2.count() == 0:
        if verbose:
            print("No z2 pixels")
        return None

    glac_geom_mask = geolib.geom2mask(gf.glac_geom, ds_list[0])
    gf.z1 = np.ma.array(iolib.ds_getma(ds_list[0]), mask=glac_geom_mask)
    #Apply SRTM penetration correction
    if z1_srtm_penetration_corr:
        gf.z1 = srtm_corr(gf.z1)
    if z2_srtm_penetration_corr:
        gf.z2 = srtm_corr(gf.z2)
    gf.z2 = np.ma.array(gf.z2, mask=glac_geom_mask)
    gf.dz = gf.z2 - gf.z1
    if gf.dz.count() == 0:
        if verbose:
            print("No valid dz pixels")
        return None

    #Should add better filtering here
    #Elevation dependent abs. threshold filter?

    filter_outliers = True
    #Remove clearly bogus pixels
    if filter_outliers:
        bad_perc = (0.1, 99.9)
        #bad_perc = (1, 99)
        rangelim = malib.calcperc(gf.dz, bad_perc)
        gf.dz = np.ma.masked_outside(gf.dz, *rangelim)

    gf.res = geolib.get_res(ds_list[0])
    valid_area = gf.dz.count() * gf.res[0] * gf.res[1]
    valid_area_perc = valid_area / gf.glac_area
    if valid_area_perc < min_valid_area_perc:
        if verbose:
            print(
                "Not enough valid pixels. %0.1f%% percent of glacier polygon area"
                % (100 * valid_area_perc))
        return None

    #Filter dz - throw out abs differences >150 m

    #Compute dz, volume change, mass balance and stats
    gf.z1_stats = malib.get_stats(gf.z1)
    gf.z2_stats = malib.get_stats(gf.z2)
    z2_elev_med = gf.z2_stats[5]
    z2_elev_p16 = gf.z2_stats[11]
    z2_elev_p84 = gf.z2_stats[12]

    #Caluclate stats for aspect and slope using z2
    #Requires GDAL 2.1+
    gf.z2_aspect = np.ma.array(geolib.gdaldem_mem_ds(ds_list[1],
                                                     processing='aspect',
                                                     returnma=True),
                               mask=glac_geom_mask)
    gf.z2_aspect_stats = malib.get_stats(gf.z2_aspect)
    z2_aspect_med = gf.z2_aspect_stats[5]
    gf.z2_slope = np.ma.array(geolib.gdaldem_mem_ds(ds_list[1],
                                                    processing='slope',
                                                    returnma=True),
                              mask=glac_geom_mask)
    gf.z2_slope_stats = malib.get_stats(gf.z2_slope)
    z2_slope_med = gf.z2_slope_stats[5]

    #Rasterize source dates
    if z1_date is None:
        z1_date = get_date_a(ds_list[0], z1_date_shp_lyr, glac_geom_mask,
                             z1_datefield)
        gf.t1 = z1_date.mean()
    else:
        gf.t1 = z1_date

    if z2_date is None:
        z2_date = get_date_a(ds_list[0], z2_date_shp_lyr, glac_geom_mask,
                             z2_datefield)
        #Attempt to use YYYYMMDD string
        #z2_dta = np.datetime64(z2_date.astype("S8").tolist())
        gf.t2 = z2_date.mean()
    else:
        gf.t2 = z2_date

    if isinstance(gf.t1, datetime):
        gf.t1 = timelib.dt2decyear(gf.t1)

    if isinstance(gf.t2, datetime):
        gf.t2 = timelib.dt2decyear(gf.t2)

    gf.t1 = float(gf.t1)
    gf.t2 = float(gf.t2)

    #Calculate dt grids
    #gf.dt = z2_date - z1_date
    #gf.dt = gf.dt.mean()
    #This should be decimal years
    gf.dt = gf.t2 - gf.t1
    #if isinstance(gf.dt, timedelta):
    #    gf.dt = gf.dt.total_seconds()/timelib.spy
    #Calculate dh/dt, in m/yr
    gf.dhdt = gf.dz / gf.dt
    gf.dhdt_stats = malib.get_stats(gf.dhdt)
    dhdt_mean = gf.dhdt_stats[3]
    dhdt_med = gf.dhdt_stats[5]

    rho_i = 0.91
    rho_s = 0.50
    rho_f = 0.60

    #This is recommendation by Huss et al (2013)
    rho_is = 0.85
    rho_sigma = 0.06

    #Can estimate ELA values computed from hypsometry and typical AAR
    #For now, assume ELA is mean
    gf.z1_ela = None
    gf.z1_ela = gf.z1_stats[3]
    gf.z2_ela = gf.z2_stats[3]
    #Note: in theory, the ELA should get higher with mass loss
    #In practice, using mean and same polygon, ELA gets lower as glacier surface thins
    if verbose:
        print("ELA(t1): %0.1f" % gf.z1_ela)
        print("ELA(t2): %0.1f" % gf.z2_ela)

    if gf.z1_ela > gf.z2_ela:
        min_ela = gf.z2_ela
        max_ela = gf.z1_ela
    else:
        min_ela = gf.z1_ela
        max_ela = gf.z2_ela

    #Calculate mass balance map from dhdt
    gf.mb = gf.dhdt * rho_is
    """
    # This attempted to assign different densities above and below ELA
    if gf.z1_ela is None:
        gf.mb = gf.dhdt * rho_is
    else:
        #Initiate with average density
        gf.mb = gf.dhdt*(rho_is + rho_f)/2.
        #Everything that is above ELA at t2 is elevation change over firn, use firn density
        accum_mask = (gf.z2 > gf.z2_ela).filled(0).astype(bool)
        gf.mb[accum_mask] = (gf.dhdt*rho_f)[accum_mask]
        #Everything that is below ELA at t1 is elevation change over ice, use ice density
        abl_mask = (gf.z1 <= gf.z1_ela).filled(0).astype(bool)
        gf.mb[abl_mask] = (gf.dhdt*rho_is)[abl_mask]
        #Everything in between, use average of ice and firn density
        #mb[(z1 > z1_ela) || (z2 <= z2_ela)] = dhdt*(rhois + rho_f)/2.
        #Linear ramp
        #rho_f + z2*((rho_is - rho_f)/(z2_ela - z1_ela))
        #mb = np.where(dhdt < ela, dhdt*rho_i, dhdt*rho_s)
    """

    #Use this for winter balance
    #mb = dhdt * rho_s

    gf.mb_stats = malib.get_stats(gf.mb)
    gf.mb_mean = gf.mb_stats[3]

    #Calculate uncertainty of total elevation change
    #TODO: Better spatial distribution characterization
    #Add slope-dependent component here
    dz_sigma = np.sqrt(z1_sigma**2 + z2_sigma**2)
    #Uncrtainty of dh/dt
    dhdt_sigma = dz_sigma / gf.dt

    #This is mb uncertainty map
    gf.mb_sigma = np.ma.abs(gf.mb) * np.sqrt((rho_sigma / rho_is)**2 +
                                             (dhdt_sigma / gf.dhdt)**2)
    gf.mb_sigma_stats = malib.get_stats(gf.mb_sigma)
    #This is average mb uncertainty
    gf.mb_mean_sigma = gf.mb_sigma_stats[3]

    #Now calculate mb for entire polygon
    area_sigma_perc = 0.09
    gf.mb_mean_totalarea = gf.mb_mean * gf.glac_area
    #Already have area uncertainty as percentage, just use directly
    gf.mb_mean_totalarea_sigma = np.ma.abs(gf.mb_mean_totalarea) * np.sqrt(
        (gf.mb_mean_sigma / gf.mb_mean)**2 + area_sigma_perc**2)

    mb_sum = np.sum(gf.mb) * gf.res[0] * gf.res[1]

    outlist = [gf.glacnum, gf.cx, gf.cy, z2_elev_med, z2_elev_p16, z2_elev_p84, z2_slope_med, z2_aspect_med, \
            gf.mb_mean, gf.mb_mean_sigma, gf.glac_area, gf.mb_mean_totalarea, gf.mb_mean_totalarea_sigma, \
            gf.t1, gf.t2, gf.dt]

    if site == 'conus':
        prism_ppt_annual = np.ma.array(iolib.ds_getma(ds_list[2]),
                                       mask=glac_geom_mask) / 1000.
        prism_ppt_annual_stats = malib.get_stats(prism_ppt_annual)
        prism_ppt_annual_mean = prism_ppt_annual_stats[3]

        prism_tmean_annual = np.ma.array(iolib.ds_getma(ds_list[3]),
                                         mask=glac_geom_mask)
        prism_tmean_annual_stats = malib.get_stats(prism_tmean_annual)
        prism_tmean_annual_mean = prism_tmean_annual_stats[3]

        outlist.extend([prism_ppt_annual_mean, prism_tmean_annual_mean])

        #This is mean monthly summer precip, need to multiply by nmonths to get cumulative
        n_summer = 4
        prism_ppt_summer = n_summer * np.ma.array(iolib.ds_getma(ds_list[4]),
                                                  mask=glac_geom_mask) / 1000.
        prism_ppt_summer_stats = malib.get_stats(prism_ppt_summer)
        prism_ppt_summer_mean = prism_ppt_summer_stats[3]

        n_winter = 8
        prism_ppt_winter = n_winter * np.ma.array(iolib.ds_getma(ds_list[5]),
                                                  mask=glac_geom_mask) / 1000.
        prism_ppt_winter_stats = malib.get_stats(prism_ppt_winter)
        prism_ppt_winter_mean = prism_ppt_winter_stats[3]

        prism_tmean_summer = np.ma.array(iolib.ds_getma(ds_list[6]),
                                         mask=glac_geom_mask)
        prism_tmean_summer_stats = malib.get_stats(prism_tmean_summer)
        prism_tmean_summer_mean = prism_tmean_summer_stats[3]

        prism_tmean_winter = np.ma.array(iolib.ds_getma(ds_list[7]),
                                         mask=glac_geom_mask)
        prism_tmean_winter_stats = malib.get_stats(prism_tmean_winter)
        prism_tmean_winter_mean = prism_tmean_winter_stats[3]

        outlist.extend([
            prism_ppt_summer_mean, prism_ppt_winter_mean,
            prism_tmean_summer_mean, prism_tmean_winter_mean
        ])

    if site == 'hma':
        #Classes are: 1 = clean ice, 2 = debris, 3 = pond
        #Load up debris cover maps, ice thickness
        if len(ds_list) > 2:
            gf.debris_class = np.ma.array(iolib.ds_getma(ds_list[2]),
                                          mask=glac_geom_mask)
            gf.debris_thick = np.ma.array(iolib.ds_getma(ds_list[3]),
                                          mask=glac_geom_mask)
            #Load ice thickness from glabtop2
            gf.H = np.ma.array(iolib.ds_getma(ds_list[4]), mask=glac_geom_mask)
            #Load surface velocity maps from Dehecq
            gf.vx = np.ma.array(iolib.ds_getma(ds_list[5]),
                                mask=glac_geom_mask)
            gf.vy = np.ma.array(iolib.ds_getma(ds_list[6]),
                                mask=glac_geom_mask)
            gf.vm = np.ma.sqrt(gf.vx**2 + gf.vy**2)
            v_col_factor = 0.8
            #Should smooth, better handling of data gaps
            gf.divU = np.gradient(v_col_factor * gf.vx)[1] + np.gradient(
                v_col_factor * gf.vy)[0]
            gf.divQ = gf.H * gf.divU
            #Compute debris/pond/clean percentages for entire polygon
            if gf.debris_class.count() > 0:
                gf.perc_clean = 100. * (gf.debris_class
                                        == 1).sum() / gf.debris_class.count()
                gf.perc_debris = 100. * (gf.debris_class
                                         == 2).sum() / gf.debris_class.count()
                gf.perc_pond = 100. * (gf.debris_class
                                       == 3).sum() / gf.debris_class.count()
            outlist.extend([
                gf.H.mean(),
                gf.debris_thick.mean(), gf.perc_debris, gf.perc_pond,
                gf.perc_clean
            ])

    if verbose:
        print('Mean mb: %0.2f +/- %0.2f mwe/yr' %
              (gf.mb_mean, gf.mb_mean_sigma))
        print('Sum/Area mb: %0.2f mwe/yr' % (mb_sum / gf.glac_area))
        print('Mean mb * Area: %0.2f +/- %0.2f mwe/yr' %
              (gf.mb_mean_totalarea, gf.mb_mean_totalarea_sigma))
        print('Sum mb: %0.2f mwe/yr' % mb_sum)
        #print('-------------------------------')

    #Write to master list
    #out.append(outlist)
    #Write to temporary file
    #writer.writerow(outlist)
    #f.flush()

    if writeout and (gf.glac_area / 1E6 > min_glac_area_writeout):
        out_dz_fn = os.path.join(outdir, gf.feat_fn + '_dz.tif')
        iolib.writeGTiff(gf.dz, out_dz_fn, ds_list[0])

        out_z1_fn = os.path.join(outdir, gf.feat_fn + '_z1.tif')
        iolib.writeGTiff(gf.z1, out_z1_fn, ds_list[0])

        out_z2_fn = os.path.join(outdir, gf.feat_fn + '_z2.tif')
        iolib.writeGTiff(gf.z2, out_z2_fn, ds_list[0])

        temp_fn = os.path.join(outdir, gf.feat_fn + '_z2_aspect.tif')
        iolib.writeGTiff(gf.z2_aspect, temp_fn, ds_list[0])

        temp_fn = os.path.join(outdir, gf.feat_fn + '_z2_slope.tif')
        iolib.writeGTiff(gf.z2_slope, temp_fn, ds_list[0])

        #Need to fix this - write out constant date arrays regardless of source
        #out_z1_date_fn = os.path.join(outdir, gf.feat_fn+'_ned_date.tif')
        #iolib.writeGTiff(z1_date, out_z1_date_fn, ds_list[0])

        if site == 'conus':
            out_prism_ppt_annual_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_annual.tif')
            iolib.writeGTiff(prism_ppt_annual, out_prism_ppt_annual_fn,
                             ds_list[0])
            out_prism_tmean_annual_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_annual.tif')
            iolib.writeGTiff(prism_tmean_annual, out_prism_tmean_annual_fn,
                             ds_list[0])

            out_prism_ppt_summer_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_summer.tif')
            iolib.writeGTiff(prism_ppt_summer, out_prism_ppt_summer_fn,
                             ds_list[0])
            out_prism_ppt_winter_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_winter.tif')
            iolib.writeGTiff(prism_ppt_winter, out_prism_ppt_winter_fn,
                             ds_list[0])

            out_prism_tmean_summer_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_summer.tif')
            iolib.writeGTiff(prism_tmean_summer, out_prism_tmean_summer_fn,
                             ds_list[0])
            out_prism_tmean_winter_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_winter.tif')
            iolib.writeGTiff(prism_tmean_winter, out_prism_tmean_winter_fn,
                             ds_list[0])

        if site == 'hma':
            if gf.H is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_H.tif')
                iolib.writeGTiff(gf.H, temp_fn, ds_list[0])

            if gf.debris_thick is not None:
                temp_fn = os.path.join(outdir,
                                       gf.feat_fn + '_debris_thick.tif')
                iolib.writeGTiff(gf.debris_thick, temp_fn, ds_list[0])

            if gf.debris_class is not None:
                temp_fn = os.path.join(outdir,
                                       gf.feat_fn + '_debris_class.tif')
                iolib.writeGTiff(gf.debris_class, temp_fn, ds_list[0])

            if gf.vm is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_vm.tif')
                iolib.writeGTiff(gf.vm, temp_fn, ds_list[0])

            if gf.divQ is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_divQ.tif')
                iolib.writeGTiff(gf.divQ, temp_fn, ds_list[0])

    #Do AED for all
    #Compute mb using scaled AED vs. polygon
    #Check for valid pixel count vs. feature area, fill if appropriate

    if mb_plot and (gf.glac_area / 1E6 > min_glac_area_writeout):
        z_bin_edges = hist_plot(gf, outdir)
        gf.z1_hs = geolib.gdaldem_mem_ds(ds_list[0],
                                         processing='hillshade',
                                         returnma=True)
        gf.z2_hs = geolib.gdaldem_mem_ds(ds_list[1],
                                         processing='hillshade',
                                         returnma=True)
        map_plot(gf, z_bin_edges, outdir)

    return outlist, gf
Пример #14
0
    pX_fltr, pY_fltr = geolib.mapToPixel(mX_fltr, mY_fltr, dem_mask_ds.GetGeoTransform())
    pX_fltr = np.atleast_1d(pX_fltr)
    pY_fltr = np.atleast_1d(pY_fltr)

    #Sample raster
    #This returns median and mad for ICESat footprint
    samp = geolib.sample(dem_mask_ds, mX_fltr, mY_fltr, pad=pad)
    samp_idx = ~(np.ma.getmaskarray(samp[:,0]))
    npts = samp_idx.nonzero()[0].size
    if npts < min_pts:
        print("Not enough points after sampling valud pixels, post bareground mask (%i < %i)" % (npts, min_pts))
        continue
       
    if True:
        print("Applying slope filter, masking points with slope > %0.1f" % max_slope)
        slope_ds = geolib.gdaldem_mem_ds(dem_mask_ds, processing='slope', returnma=False)
        slope_samp = geolib.sample(slope_ds, mX_fltr, mY_fltr, pad=pad)
        slope_samp_idx = (slope_samp[:,0] <= max_slope).data
        samp_idx = np.logical_and(slope_samp_idx, samp_idx)

    npts = samp_idx.nonzero()[0].size
    if npts < min_pts:
        print("Not enough points after %0.1f deg slope mask (%i < %i)" % (max_slope, npts, min_pts))
        continue

    glas_pts_fltr_mask = glas_pts_fltr[samp_idx]

    if os.path.exists(dem_mask_fn):
        print("Writing out %i points after mask" % glas_pts_fltr_mask.shape[0]) 
        out_csv_fn_mask = os.path.splitext(out_csv_fn)[0]+'_ref.csv'
        #Could add DEM samp columns here
Пример #15
0
#for dem_fn in [dem_ref_fn]+dem_fn_list:
for n, dem_fn in enumerate(dem_fn_list):
    print('%i of %i: %s' % (n + 1, len(dem_fn_list), dem_fn))
    #print(dem_fn)
    #dem_ds = iolib.fn_getds(dem_fn)
    #dem = iolib.ds_getma(dem_ds)
    dem_fn = stack.fn_list[n]
    #title = dem_fn
    title = None
    dem = stack.ma_stack[n]
    anomaly = anomaly_stack[n]
    #dem_clim = malib.calcperc(stack.ma_stack, (2,98))
    #dem_hs_fn = os.path.splitext(dem_fn)[0]+'_hs_az315.tif'
    #dem_hs = iolib.fn_getma(dem_hs_fn)
    #dem_hs = geolib.gdaldem_mem_ma(dem, dem_ds, returnma=True)
    dem_hs = geolib.gdaldem_mem_ds(dem_ds, returnma=True)
    #dt = timelib.fn_getdatetime(dem_fn)
    dt = stack.date_list[n]
    if dt is not None:
        title = dt.strftime('%Y-%m-%d')
    #f = makefig(dem, dem_hs, anomaly, ds=dem_ds, title=title)
    f, ax = plt.subplots()
    im = ax.imshow(anomaly, clim=anomaly_clim, cmap='RdBu')
    pltlib.add_cbar(ax, im, label='Elevation anomaly (m)')
    pltlib.add_scalebar(ax, res=stack.res, location='lower left')
    pltlib.hide_ticks(ax)
    ax.set_facecolor('k')
    if title is not None:
        ax.set_title(title)
    out_fn = os.path.join(
        outdir,
Пример #16
0
#GM
#dem_clim = (1766, 3247)
#SBB
#dem_clim = (2934, 3983)
hs_clim = (1, 255)

for i, dem_fn in enumerate(dem_fn_list):
    ax = grid[i]
    print(dem_fn)
    dem_ds = iolib.fn_getds(dem_fn)
    dem = iolib.ds_getma_sub(dem_ds)
    dem_hs_fn = os.path.splitext(dem_fn)[0] + '_hs_az315.tif'
    if os.path.exists(dem_hs_fn):
        dem_hs = iolib.fn_getma_sub(dem_hs_fn)
    else:
        dem_hs = geolib.gdaldem_mem_ds(dem_ds, 'hillshade', returnma=True)
    dt = timelib.fn_getdatetime(dem_fn)
    if dt is not None:
        title = dt.strftime('%Y-%m-%d')
        t = ax.set_title(title, fontdict={'fontsize': 6})
        t.set_position([0.5, 0.95])
    hs_im = ax.imshow(dem_hs, vmin=hs_clim[0], vmax=hs_clim[1], cmap='gray')
    dem_im = ax.imshow(dem,
                       vmin=dem_clim[0],
                       vmax=dem_clim[1],
                       cmap='cpt_rainbow',
                       alpha=0.5)
    ax.set_facecolor('k')
    pltlib.hide_ticks(ax)

for ax in grid[i + 1:]:
Пример #17
0
def compute_offset(dem1_ds,
                   dem2_ds,
                   dem2_fn,
                   mode='nuth',
                   max_offset_m=100,
                   remove_outliers=True,
                   apply_mask=True):
    #Make sure the input datasets have the same resolution/extent
    #Use projection of source DEM
    dem1_clip_ds, dem2_clip_ds = warplib.memwarp_multi([dem1_ds, dem2_ds], \
            res='max', extent='intersection', t_srs=dem2_ds)

    #Compute size of NCC and SAD search window in pixels
    res = float(geolib.get_res(dem1_clip_ds, square=True)[0])
    max_offset_px = (max_offset_m / res) + 1
    #print(max_offset_px)
    pad = (int(max_offset_px), int(max_offset_px))

    #This will be updated geotransform for dem2
    dem2_gt = np.array(dem2_clip_ds.GetGeoTransform())

    #Load the arrays
    dem1 = iolib.ds_getma(dem1_clip_ds, 1)
    dem2 = iolib.ds_getma(dem2_clip_ds, 1)

    #Compute difference for unaligned inputs
    print("Elevation difference stats for uncorrected input DEMs")
    #Shouldn't need to worry about common mask here, as both inputs are ma
    diff_euler = dem2 - dem1

    static_mask = None
    if apply_mask:
        #Need dem2_fn here to find TOA fn
        static_mask = get_mask(dem2_clip_ds, dem2_fn)
        dem1 = np.ma.array(dem1, mask=static_mask)
        dem2 = np.ma.array(dem2, mask=static_mask)
        diff_euler = np.ma.array(diff_euler, mask=static_mask)
        static_mask = np.ma.getmaskarray(diff_euler)

    if diff_euler.count() == 0:
        sys.exit("No overlapping, unmasked pixels shared between input DEMs")

    #Compute stats for new masked difference map
    diff_stats = malib.print_stats(diff_euler)
    dz = diff_stats[5]

    #This needs further testing
    if remove_outliers:
        med = diff_stats[5]
        nmad = diff_stats[6]
        f = 3
        rmin = med - f * nmad
        rmax = med + f * nmad
        #Use IQR
        #rmin = diff_stats[7]
        #rmax = diff_stats[8]
        diff_euler = np.ma.masked_outside(diff_euler, rmin, rmax)
        #Should also apply to original dem1 and dem2 for sad and ncc

    print("Computing sub-pixel offset between DEMs using mode: %s" % mode)

    #By default, don't create output figure
    fig = None

    #Sum of absolute differences
    if mode == "sad":
        m, int_offset, sp_offset = coreglib.compute_offset_sad(dem1,
                                                               dem2,
                                                               pad=pad)
        #Geotransform has negative y resolution, so don't need negative sign
        #np array is positive down
        #GDAL coordinates are positive up
        dx = sp_offset[1] * dem2_gt[1]
        dy = sp_offset[0] * dem2_gt[5]
    #Normalized cross-correlation of clipped, overlapping areas
    elif mode == "ncc":
        m, int_offset, sp_offset, fig = coreglib.compute_offset_ncc(dem1, dem2, \
                pad=pad, prefilter=False, plot=True)
        dx = sp_offset[1] * dem2_gt[1]
        dy = sp_offset[0] * dem2_gt[5]
    #Nuth and Kaab (2011)
    elif mode == "nuth":
        print("Computing slope and aspect")
        dem1_slope = geolib.gdaldem_mem_ds(dem1_clip_ds,
                                           processing='slope',
                                           returnma=True)
        dem1_aspect = geolib.gdaldem_mem_ds(dem1_clip_ds,
                                            processing='aspect',
                                            returnma=True)
        #Compute relationship between elevation difference, slope and aspect
        fit_param, fig = coreglib.compute_offset_nuth(diff_euler, dem1_slope,
                                                      dem1_aspect)
        #fit_param[0] is magnitude of shift vector
        #fit_param[1] is direction of shift vector
        #fit_param[2] is mean bias divided by tangent of mean slope
        #print(fit_param)
        dx = fit_param[0] * np.sin(np.deg2rad(fit_param[1]))
        dy = fit_param[0] * np.cos(np.deg2rad(fit_param[1]))
        #med_slope = malib.fast_median(dem1_slope)
        #dz = fit_param[2]*np.tan(np.deg2rad(med_slope))
    elif mode == "all":
        print("Not yet implemented")
        #Want to compare all methods, average offsets
        #m, int_offset, sp_offset = coreglib.compute_offset_sad(dem1, dem2)
        #m, int_offset, sp_offset = coreglib.compute_offset_ncc(dem1, dem2)
    #This is a hack to apply the computed median bias correction for shpclip area only
    elif mode == "none":
        print(
            "Skipping alignment, writing out DEM with median bias over static surfaces removed"
        )
        dst_fn = outprefix + '_med%0.1f.tif' % dz
        iolib.writeGTiff(dem2_orig + dz, dst_fn, dem2_ds)
        sys.exit()
    #Note: minus signs here since we are computing dz=(src-ref), but adjusting src
    return -dx, -dy, -dz, static_mask, fig