Exemplo n.º 1
0
def parse_res(res, src_ds_list=None, t_srs=None):
    """Parse arbitrary input res 

    Parameters
    ----------
    res : str or gdal.Dataset or filename or float
        Arbitrary input res 
    src_ds_list : list of gdal.Dataset objects, optional
        Needed if specifying 'first' or 'last'
    t_srs : osr.SpatialReference() object 
        Projection for res calculations, optional

    Returns
    -------
    res : float 
        Output resolution
        None if source resolution should be preserved
    """
    #Default to using first t_srs for res calculations
    #Assumes src_ds_list is not None
    t_srs = parse_srs(t_srs, src_ds_list)

    #Valid options for res
    res_str_list = [
        'first', 'last', 'min', 'max', 'mean', 'med', 'common_scale_factor'
    ]

    #Compute output resolution in t_srs
    if res in res_str_list and src_ds_list is not None:
        #Returns min, max, mean, med
        res_stats = geolib.get_res_stats(src_ds_list, t_srs=t_srs)
        if res == 'first':
            res = geolib.get_res(src_ds_list[0], t_srs=t_srs, square=True)[0]
        elif res == 'last':
            res = geolib.get_res(src_ds_list[-1], t_srs=t_srs, square=True)[0]
        elif res == 'min':
            res = res_stats[0]
        elif res == 'max':
            res = res_stats[1]
        elif res == 'mean':
            res = res_stats[2]
        elif res == 'med':
            res = res_stats[3]
        elif res == 'common_scale_factor':
            #Determine res to upsample min and downsample max by constant factor
            res = np.sqrt(res_stats[1] / res_stats[0]) * res_stats[0]
    elif res == 'source':
        res = None
    elif isinstance(res, gdal.Dataset):
        res = geolib.get_res(res, t_srs=t_srs, square=True)[0]
    elif isinstance(res, str) and os.path.exists(res):
        res = geolib.get_res(gdal.Open(res), t_srs=t_srs, square=True)[0]
    else:
        res = float(res)
    return res
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser(description="Utility to compute hypsometry for input DEM")
    parser.add_argument('-mask_fn', type=str, default=None, help='Glacier Polygon filename (mask.shp)')
    parser.add_argument('-bin_width', type=float, default=100.0, help='Elevation bin with (default: %(default)s)')
    parser.add_argument('dem_fn', type=str, help='Input DEM filename')
    args = parser.parse_args()

    #Input DEM
    dem_fn = args.dem_fn
    #Extract GDAL dataset from input dem_fn
    dem_ds = iolib.fn_getds(dem_fn)
    #Extract NumPy masked array from dem_ds
    print("Loading input DEM: %s" % args.dem_fn)
    dem = iolib.ds_getma(dem_ds)
    #Fill dem?
    #Extract DEM resolution (m)
    dem_res = geolib.get_res(dem_ds, square=True)[0]

    #Generate glacier mask from shp 
    if args.mask_fn is not None:
        print("Masking input DEM using: %s" % args.mask_fn)
        #This calls gdal_rasterize with parameters of dem_ds
        mask = geolib.shp2array(args.mask_fn, r_ds=dem_ds)
        #Apply mask to DEM
        dem = np.ma.array(dem, mask=mask)

    #Generate aed 
    print("Generating AED")
    bin_centers, bin_areas = aed(dem, dem_res, args.bin_width) 
    #Write out to csv
    csv_fn = os.path.splitext(dem_fn)[0]+'_aed.csv'
    write_aed(bin_centers, bin_areas, csv_fn)
    #Generate plot
    plot_dem_aed(dem, bin_centers, bin_areas)
Exemplo n.º 3
0
def iv(a, ax=None, clim=None, clim_perc=(2,98), cmap='cpt_rainbow', label=None, title=None, \
        ds=None, res=None, hillshade=False, scalebar=True):
    """
    Quick image viewer with standardized display settings
    """
    if ax is None:
        #ax = plt.subplot()
        f, ax = plt.subplots()
    ax.set_aspect('equal')
    if clim is None:
        clim = get_clim(a, clim_perc)
    cm = cmap_setndv(cmap, cmap)
    alpha = 1.0
    if hillshade:
        if ds is not None:
            hs = geolib.gdaldem_mem_ds(ds,
                                       processing='hillshade',
                                       computeEdges=True,
                                       returnma=True)
            b_cm = cmap_setndv('gray', cmap)
            #Set the overlay bad values to completely transparent, otherwise darkens the bg
            cm.set_bad(alpha=0)
            bg_clim_perc = (2, 98)
            bg_clim = get_clim(hs, bg_clim_perc)
            #bg_clim = (1, 255)
            bgplot = ax.imshow(hs, cmap=b_cm, clim=bg_clim)
            alpha = 0.5
    if scalebar:
        if ds is not None:
            #Get resolution at center of dataset
            ccoord = geolib.get_center(ds, t_srs=geolib.wgs_srs)
            #Compute resolution in local cartesian coordinates at center
            c_srs = geolib.localortho(*ccoord)
            res = geolib.get_res(ds, c_srs)[0]
        if res is not None:
            sb_loc = best_scalebar_location(a)
            add_scalebar(ax, res, location=sb_loc)
    imgplot = ax.imshow(a, cmap=cm, clim=clim, alpha=alpha, **imshow_kwargs)
    cbar_kwargs['extend'] = get_cbar_extend(a, clim=clim)
    cbar_kwargs['format'] = get_cbar_format(a)
    cbar = add_cbar(ax, imgplot, label=label)
    hide_ticks(ax)
    if title is not None:
        ax.set_title(title)
    plt.tight_layout()
    return ax
Exemplo n.º 4
0
def res_sort(img_list):
    """
    sort images based on resolution, finest resolution on top
    Parameters
    ----------
    img_list: list
        list of images to be sorted
    Returns
    ----------
    sorted_img_list: list
        list of sorted images with finest resolution on top
    """
    ds_list = [iolib.fn_getds(img) for img in img_list]
    res_list = [geolib.get_res(ds, square=True)[0] for ds in ds_list]
    #https://www.geeksforgeeks.org/python-sort-values-first-list-using-second-list
    zipped_pairs = zip(res_list, img_list)
    sorted_img_list = [x for _, x in sorted(zipped_pairs)]
    return sorted_img_list
Exemplo n.º 5
0
def map_plot(site_list, ds):
    a = iolib.ds_getma(ds)
    clim = malib.calcperc(a, (2, 98))
    mX = site_list[:, 1]
    mY = site_list[:, 2]
    pX, pY = geolib.mapToPixel(mX, mY, ds.GetGeoTransform())
    #f, ax = plt.subplots(1, figsize=(6,6), subplot_kw={'aspect':'equal', 'adjustable':'box-forced'})
    f, ax = plt.subplots(1, figsize=(6, 6), subplot_kw={'aspect': 'equal'})
    im = ax.imshow(a, vmin=clim[0], vmax=clim[1], cmap='inferno')
    ax.set_facecolor('0.5')
    from imview.lib import pltlib
    pltlib.add_scalebar(ax, geolib.get_res(ds)[0])
    ax.scatter(pX, pY, s=16, facecolors='w', edgecolors='k')
    for i, lbl in enumerate(site_list[:, 0]):
        bbox = dict(boxstyle='round,pad=0.1', fc='k', alpha=0.7)
        ax.annotate(str(int(lbl)),
                    xy=(pX[i], pY[i]),
                    xytext=(0, 4),
                    textcoords='offset points',
                    fontsize=8,
                    color='w',
                    bbox=bbox)
    return f
Exemplo n.º 6
0
ax.set_ylabel('Count')
fig_fn = 'snowex_gm_snowdepth_diff_hist.pdf'
f.savefig(fig_fn, dpi=300, bbox_inches='tight')

#Map plot of snow depth
f, ax = plt.subplots()
ax.set_aspect('equal')
ax.set_facecolor('k')
ax.imshow(hs, cmap='gray')
#clim = malib.calcperc(depth, (2,98))
clim = (0, 2)
im = ax.imshow(snowdepth, cmap='inferno', clim=clim, alpha=0.7)
ax.set_ylim(dem.shape[0], 0)
ax.set_xlim(0, dem.shape[1])
pltlib.add_cbar(ax, im, label='Snow Depth (m)')
pltlib.add_scalebar(ax, geolib.get_res(dem_ds)[0])
pltlib.hide_ticks(ax)
fig_fn = 'snowex_gm_snowdepth.png'
f.savefig(fig_fn, dpi=300, bbox_inches='tight')
#Now overlay pits
sc = ax.scatter(x,
                y,
                s=s,
                c=depth,
                cmap='inferno',
                vmin=clim[0],
                vmax=clim[1],
                edgecolors='k')
fig_fn = 'snowex_gm_snowdepth_pitoverlay.png'
f.savefig(fig_fn, dpi=300, bbox_inches='tight')
Exemplo n.º 7
0
    diff_euler_bincenters = []
    diff_euler_binvals = []
    diff_euler_bincount = []
    for i in range(edges.size - 1):
        print "%i of %i: %0.1f to %0.1f m" % (i, edges.size - 1, edges[i],
                                              edges[i + 1])
        idx = np.logical_and((ref_dem > edges[i]).data,
                             (ref_dem <= edges[i + 1]).data)
        bincenter = edges[i] + ((edges[i + 1] - edges[i]) / 2.0)
        diff_euler_bincenters.append(bincenter)
        dh_idx = dh[idx]
        diff_euler_binvals.append(np.median(dh_idx))
        diff_euler_bincount.append(dh_idx.count())
    diff_euler_binvals = np.ma.array(diff_euler_binvals)
    diff_euler_bincount = np.ma.array(diff_euler_bincount)
    res = geolib.get_res(dem1_ds, square=True)[0]
    diff_euler_binarea = (diff_euler_bincount * res**2) / 1E6
    #Threshold area for bin in km^2
    binarea_thresh = 0.1
    binarea_mask = ~((diff_euler_binarea > binarea_thresh).data)
    diff_euler_binvals = np.ma.array(diff_euler_binvals, mask=binarea_mask)
    diff_euler_binarea = np.ma.array(diff_euler_binarea, mask=binarea_mask)
    edges = np.ma.array(edges[:-1], mask=binarea_mask)

    import matplotlib.pyplot as plt
    fig = plt.figure(figsize=(5, 10))
    fig.add_subplot(211)
    #Plot negative in Red, postive in Blue
    plt.bar(edges, diff_euler_binvals, width=binwidth, color='k')
    if rates:
        plt.ylabel('Median Elevation Change Rate (m/yr)')
Exemplo n.º 8
0
def compute_offset(ref_dem_ds, src_dem_ds, src_dem_fn, mode='nuth', remove_outliers=True, max_offset=100, \
        max_dz=100, slope_lim=(0.1, 40), mask_list=['glaciers',], plot=True):
    #Make sure the input datasets have the same resolution/extent
    #Use projection of source DEM
    ref_dem_clip_ds, src_dem_clip_ds = warplib.memwarp_multi([ref_dem_ds, src_dem_ds], \
            res='max', extent='intersection', t_srs=src_dem_ds, r='cubic')

    #Compute size of NCC and SAD search window in pixels
    res = float(geolib.get_res(ref_dem_clip_ds, square=True)[0])
    max_offset_px = (max_offset/res) + 1
    #print(max_offset_px)
    pad = (int(max_offset_px), int(max_offset_px))

    #This will be updated geotransform for src_dem
    src_dem_gt = np.array(src_dem_clip_ds.GetGeoTransform())

    #Load the arrays
    ref_dem = iolib.ds_getma(ref_dem_clip_ds, 1)
    src_dem = iolib.ds_getma(src_dem_clip_ds, 1)

    print("Elevation difference stats for uncorrected input DEMs (src - ref)")
    diff = src_dem - ref_dem

    static_mask = get_mask(src_dem_clip_ds, mask_list, src_dem_fn)
    diff = np.ma.array(diff, mask=static_mask)

    if diff.count() == 0:
        sys.exit("No overlapping, unmasked pixels shared between input DEMs")

    if remove_outliers:
        diff = outlier_filter(diff, f=3, max_dz=max_dz)

    #Want to use higher quality DEM, should determine automatically from original res/count
    #slope = get_filtered_slope(ref_dem_clip_ds, slope_lim=slope_lim)
    slope = get_filtered_slope(src_dem_clip_ds, slope_lim=slope_lim)

    print("Computing aspect")
    #aspect = geolib.gdaldem_mem_ds(ref_dem_clip_ds, processing='aspect', returnma=True, computeEdges=False)
    aspect = geolib.gdaldem_mem_ds(src_dem_clip_ds, processing='aspect', returnma=True, computeEdges=False)

    ref_dem_clip_ds = None
    src_dem_clip_ds = None

    #Apply slope filter to diff
    #Note that we combine masks from diff and slope in coreglib
    diff = np.ma.array(diff, mask=np.ma.getmaskarray(slope))

    #Get final mask after filtering
    static_mask = np.ma.getmaskarray(diff)

    #Compute stats for new masked difference map
    print("Filtered difference map")
    diff_stats = malib.print_stats(diff)
    dz = diff_stats[5]

    print("Computing sub-pixel offset between DEMs using mode: %s" % mode)

    #By default, don't create output figure
    fig = None

    #Default horizntal shift is (0,0)
    dx = 0
    dy = 0

    #Sum of absolute differences
    if mode == "sad":
        ref_dem = np.ma.array(ref_dem, mask=static_mask)
        src_dem = np.ma.array(src_dem, mask=static_mask)
        m, int_offset, sp_offset = coreglib.compute_offset_sad(ref_dem, src_dem, pad=pad)
        #Geotransform has negative y resolution, so don't need negative sign
        #np array is positive down
        #GDAL coordinates are positive up
        dx = sp_offset[1]*src_dem_gt[1]
        dy = sp_offset[0]*src_dem_gt[5]
    #Normalized cross-correlation of clipped, overlapping areas
    elif mode == "ncc":
        ref_dem = np.ma.array(ref_dem, mask=static_mask)
        src_dem = np.ma.array(src_dem, mask=static_mask)
        m, int_offset, sp_offset, fig = coreglib.compute_offset_ncc(ref_dem, src_dem, \
                pad=pad, prefilter=False, plot=plot)
        dx = sp_offset[1]*src_dem_gt[1]
        dy = sp_offset[0]*src_dem_gt[5]
    #Nuth and Kaab (2011)
    elif mode == "nuth":
        #Compute relationship between elevation difference, slope and aspect
        fit_param, fig = coreglib.compute_offset_nuth(diff, slope, aspect, plot=plot)
        if fit_param is None:
            print("Failed to calculate horizontal shift")
        else:
            #fit_param[0] is magnitude of shift vector
            #fit_param[1] is direction of shift vector
            #fit_param[2] is mean bias divided by tangent of mean slope
            #print(fit_param)
            dx = fit_param[0]*np.sin(np.deg2rad(fit_param[1]))
            dy = fit_param[0]*np.cos(np.deg2rad(fit_param[1]))
            med_slope = malib.fast_median(slope)
            nuth_dz = fit_param[2]*np.tan(np.deg2rad(med_slope))
            print('Median dz: %0.2f\nNuth dz: %0.2f' % (dz, nuth_dz))
            #dz = nuth_dz
    elif mode == "all":
        print("Not yet implemented")
        #Want to compare all methods, average offsets
        #m, int_offset, sp_offset = coreglib.compute_offset_sad(ref_dem, src_dem)
        #m, int_offset, sp_offset = coreglib.compute_offset_ncc(ref_dem, src_dem)
    elif mode == "none":
        print("Skipping alignment, writing out DEM with median bias over static surfaces removed")
        dst_fn = outprefix+'_med%0.1f.tif' % dz
        iolib.writeGTiff(src_dem_orig + dz, dst_fn, src_dem_ds)
        sys.exit()
    #Note: minus signs here since we are computing dz=(src-ref), but adjusting src
    return -dx, -dy, -dz, static_mask, fig
Exemplo n.º 9
0
def main():

    if len(sys.argv) != 2:
        sys.exit("Usage: %s dz.tif" % os.path.basename(sys.argv[0]))

    #This is mean density for N Cascades snow
    #rho = 0.5
    #Density of pure ice
    rho = 0.917

    #Clip negative values to 0
    filt = False

    src_fn = sys.argv[1]
    src_ds = iolib.fn_getds(src_fn)
    res = geolib.get_res(src_ds, square=True)[0]
    bma = iolib.ds_getma(src_ds)

    #Attempt to extract t1 and t2 from input filename
    ts = timelib.fn_getdatetime_list(src_fn)
    #Hardcode timestamps
    #ts = [datetime.datetime(2013,9,10), datetime.datetime(2014,5,14)]

    dt_yr = None
    if len(ts) == 2:
        dt = ts[1] - ts[0]
        year = datetime.timedelta(days=365.25)
        dt_yr = dt.total_seconds() / year.total_seconds()

    #Can add filter here to remove outliers, perc_fltr(0.01, 99.9)
    if filt:
        mask = np.ma.getmaskarray(bma)
        bma[bma < 0] = 0
        bma = np.ma.array(bma, mask=mask)

    #Print out stats
    print('\n')
    stats = malib.print_stats(bma)

    count = stats[0]
    area = res**2 * count
    mean = stats[3]
    med = stats[5]

    s_m3 = np.ma.sum(bma) * res**2
    s_km3 = s_m3 / 1E9
    s_mwe = mean * rho
    s_gt = s_km3 * rho
    s_mm = s_gt / 374

    if dt_yr is not None:
        print("%s to %s: %0.2f yr" % (ts[0], ts[1], dt_yr))
        print("%0.0f m^3 (%0.0f m^3/yr)" % (s_m3, s_m3 / dt_yr))
        print("%0.3f km^3 (%0.3f km^3/yr)" % (s_km3, s_km3 / dt_yr))
        print("Density: %0.3f g/cc" % rho)
        print("%0.3f GT (%0.3f GT/yr)" % (s_gt, s_gt / dt_yr))
        print("%0.6f mm SLR (%0.6f mm/yr)" % (s_mm, s_mm / dt_yr))
        print("%0.3f m.w.e. (%0.3f m.w.e./yr)" % (s_mwe, s_mwe / dt_yr))
    else:
        print("Area: %0.2f km2" % (area / 1E6))
        print("%0.0f m^3" % s_m3)
        print("%0.3f km^3" % s_km3)
        print("Density: %0.3f g/cc" % rho)
        print("%0.3f GT" % s_gt)
        print("%0.6f mm SLR" % s_mm)
        print("%0.3f m.w.e." % s_mwe)
Exemplo n.º 10
0
def dem_align(ref_dem,
              source_dem,
              max_displacement,
              outprefix,
              align,
              trans_only=False,
              threads=n_cpu):
    """
    This function implements the full DEM alignment workflow using ASP's pc_align and point2dem programs
    See relevent doumentation here:  https://stereopipeline.readthedocs.io/en/latest/tools/pc_align.html
    Parameters
    ----------
    ref_dem: str
        path to reference DEM for alignment
    source_dem: str
        path to source DEM to be aligned
    max_displacement: float
        Maximum expected displacement between input DEMs, useful for culling outliers before solving for shifts, default: 100 m
    outprefix: str
        prefix with which pc_align results will be saved (can be a path, general convention for repo is some path with run prefix, eg., aligned_to/run)
    align: str
        ICP's alignment algorithm to use. default: point-to-plane
    trans_only: bool
        if True, this instructs the program to compute translation only when point cloud optimization. Default: False
    threads: int
        number of threads to use for each stereo job
    """
    # this block checks wheter reference DEM is finer resolution or source DEM
    # if reference DEM is finer resolution, then source is aligned to reference
    # if source DEM is finer, then reference is aligned to source and source is corrected via the inverse transformation matrix of source to reference alignment.
    source_ds = iolib.fn_getds(source_dem)
    ref_ds = iolib.fn_getds(ref_dem)
    source_res = geolib.get_res(source_ds, square=True)[0]
    ref_res = geolib.get_res(ref_ds, square=True)[0]
    tr = source_res
    tsrs = source_ds.GetProjection()
    print(type(tsrs))
    if ref_res <= source_res:
        source = True
        pc_align_args = [ref_dem, source_dem]
        pc_id = 'trans_source.tif'
        pc_align_vec = '-transform.txt'
    else:
        source = False
        pc_align_args = [source_dem, ref_dem]
        pc_id = 'trans_reference.tif'
        pc_align_vec = '-inverse-transform.txt'
    print("Aligning clouds via the {} method".format(align))

    pc_align_opts = get_pc_align_opts(outprefix,
                                      max_displacement,
                                      align=align,
                                      source=source,
                                      trans_only=trans_only,
                                      threads=threads)
    pc_align_log = run_cmd('pc_align', pc_align_opts + pc_align_args)
    print(pc_align_log)
    # this try, except block checks for 2 things.
    #- Did the transformed point-cloud got produced ?
    #- was the maximum displacement greater than twice the max_displacement specified by the user ?
    # 2nd condition is implemented for tricky alignement of individual triplet DEMs to reference, as some small DEMs might be awkardly displaced to > 1000 m.
    # if the above conditions are not met, then gridding of the transformed point-cloud into final DEM will not occur.
    try:
        pc = glob.glob(outprefix + '*' + pc_id)[0]
        pc_log = sorted(glob.glob(outprefix + '*' + 'log-pc_align*.txt'))[
            -1]  # this will hopefully pull out latest transformation log
    except:
        print("Failed to find aligned point cloud file")
        sys.exit()
    max_disp = get_total_shift(pc_log)
    print("Maximum displacement is {}".format(max_disp))
    if max_disp <= 2 * max_displacement:
        grid = True
    else:
        grid = False

    if grid == True:
        point2dem_opts = get_point2dem_opts(tr, tsrs, threads=threads)
        point2dem_args = [pc]
        print("Saving aligned reference DEM at {}-DEM.tif".format(
            os.path.splitext(pc)[0]))
        p2dem_log = run_cmd('point2dem', point2dem_opts + point2dem_args)
        # create alignment vector with consistent name of alignment vector for camera alignment
        final_align_vector = os.path.join(os.path.dirname(outprefix),
                                          'alignment_vector.txt')
        pc_align_vec = glob.glob(os.path.join(outprefix + pc_align_vec))[0]
        print("Creating DEM alignment vector at {final_align_vector}")
        shutil.copy2(pc_align_vec, final_align_vector)
        print(p2dem_log)
    elif grid == False:
        print(
            "aligned cloud not produced or the total shift applied to cloud is greater than 2 times the max_displacement specified, gridding abandoned"
        )
Exemplo n.º 11
0
def main(argv=None):
    parser = getparser()
    args = parser.parse_args()

    #Should check that files exist
    ref_dem_fn = args.ref_fn
    src_dem_fn = args.src_fn

    mode = args.mode
    mask_list = args.mask_list
    max_offset = args.max_offset
    max_dz = args.max_dz
    slope_lim = tuple(args.slope_lim)
    tiltcorr = args.tiltcorr
    polyorder = args.polyorder
    res = args.res

    #Maximum number of iterations
    max_iter = args.max_iter

    #These are tolerances (in meters) to stop iteration
    tol = args.tol
    min_dx = tol
    min_dy = tol
    min_dz = tol

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.splitext(src_dem_fn)[0] + '_dem_align'

    if tiltcorr:
        outdir += '_tiltcorr'
        tiltcorr_done = False
        #Relax tolerance for initial round of co-registration
        #tiltcorr_tol = 0.1
        #if tol < tiltcorr_tol:
        #    tol = tiltcorr_tol

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = '%s_%s' % (os.path.splitext(os.path.split(src_dem_fn)[-1])[0], \
            os.path.splitext(os.path.split(ref_dem_fn)[-1])[0])
    outprefix = os.path.join(outdir, outprefix)

    print("\nReference: %s" % ref_dem_fn)
    print("Source: %s" % src_dem_fn)
    print("Mode: %s" % mode)
    print("Output: %s\n" % outprefix)

    src_dem_ds = gdal.Open(src_dem_fn)
    ref_dem_ds = gdal.Open(ref_dem_fn)

    #Get local cartesian coordinate system
    #local_srs = geolib.localtmerc_ds(src_dem_ds)
    #Use original source dataset coordinate system
    #Potentially issues with distortion and xyz/tiltcorr offsets for DEM with large extent
    local_srs = geolib.get_ds_srs(src_dem_ds)
    #local_srs = geolib.get_ds_srs(ref_dem_ds)

    #Resample to common grid
    ref_dem_res = float(geolib.get_res(ref_dem_ds, t_srs=local_srs, square=True)[0])
    #Create a copy to be updated in place
    src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
    src_dem_res = float(geolib.get_res(src_dem_ds, t_srs=local_srs, square=True)[0])
    src_dem_ds = None
    #Resample to user-specified resolution
    ref_dem_ds, src_dem_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
            extent='intersection', res=args.res, t_srs=local_srs, r='cubic')

    res = float(geolib.get_res(src_dem_ds_align, square=True)[0])
    print("\nReference DEM res: %0.2f" % ref_dem_res)
    print("Source DEM res: %0.2f" % src_dem_res)
    print("Resolution for coreg: %s (%0.2f m)\n" % (args.res, res))

    #Iteration number
    n = 1
    #Cumulative offsets
    dx_total = 0
    dy_total = 0
    dz_total = 0

    #Now iteratively update geotransform and vertical shift
    while True:
        print("*** Iteration %i ***" % n)
        dx, dy, dz, static_mask, fig = compute_offset(ref_dem_ds, src_dem_ds_align, src_dem_fn, mode, max_offset, \
                mask_list=mask_list, max_dz=max_dz, slope_lim=slope_lim, plot=True)
        xyz_shift_str_iter = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx, dy, dz)
        print("Incremental offset: %s" % xyz_shift_str_iter)

        dx_total += dx
        dy_total += dy
        dz_total += dz

        xyz_shift_str_cum = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx_total, dy_total, dz_total)
        print("Cumulative offset: %s" % xyz_shift_str_cum)
        #String to append to output filenames
        xyz_shift_str_cum_fn = '_%s_x%+0.2f_y%+0.2f_z%+0.2f' % (mode, dx_total, dy_total, dz_total)

        #Should make an animation of this converging
        if n == 1: 
            #static_mask_orig = static_mask
            if fig is not None:
                dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
                print("Writing offset plot: %s" % dst_fn)
                fig.gca().set_title("Incremental: %s\nCumulative: %s" % (xyz_shift_str_iter, xyz_shift_str_cum))
                fig.savefig(dst_fn, dpi=300)

        #Apply the horizontal shift to the original dataset
        src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx, dy, createcopy=False)
        #Should 
        src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz, createcopy=False)

        n += 1
        print("\n")
        #If magnitude of shift in all directions is less than tol
        #if n > max_iter or (abs(dx) <= min_dx and abs(dy) <= min_dy and abs(dz) <= min_dz):
        #If magnitude of shift is less than tol
        dm = np.sqrt(dx**2 + dy**2 + dz**2)
        dm_total = np.sqrt(dx_total**2 + dy_total**2 + dz_total**2)

        if dm_total > max_offset:
            sys.exit("Total offset exceeded specified max_offset (%0.2f m). Consider increasing -max_offset argument" % max_offset)

        #Stop iteration
        if n > max_iter or dm < tol:

            if fig is not None:
                dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
                print("Writing offset plot: %s" % dst_fn)
                fig.gca().set_title("Incremental:%s\nCumulative:%s" % (xyz_shift_str_iter, xyz_shift_str_cum))
                fig.savefig(dst_fn, dpi=300)

            #Compute final elevation difference
            if True:
                ref_dem_clip_ds_align, src_dem_clip_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
                        res=res, extent='intersection', t_srs=local_srs, r='cubic')
                ref_dem_align = iolib.ds_getma(ref_dem_clip_ds_align, 1)
                src_dem_align = iolib.ds_getma(src_dem_clip_ds_align, 1)
                ref_dem_clip_ds_align = None

                diff_align = src_dem_align - ref_dem_align
                src_dem_align = None
                ref_dem_align = None

                #Get updated, final mask
                static_mask_final = get_mask(src_dem_clip_ds_align, mask_list, src_dem_fn)
                static_mask_final = np.logical_or(np.ma.getmaskarray(diff_align), static_mask_final)
                
                #Final stats, before outlier removal
                diff_align_compressed = diff_align[~static_mask_final]
                diff_align_stats = malib.get_stats_dict(diff_align_compressed, full=True)

                #Prepare filtered version for tiltcorr fit
                diff_align_filt = np.ma.array(diff_align, mask=static_mask_final)
                diff_align_filt = outlier_filter(diff_align_filt, f=3, max_dz=max_dz)
                #diff_align_filt = outlier_filter(diff_align_filt, perc=(12.5, 87.5), max_dz=max_dz)
                slope = get_filtered_slope(src_dem_clip_ds_align)
                diff_align_filt = np.ma.array(diff_align_filt, mask=np.ma.getmaskarray(slope))
                diff_align_filt_stats = malib.get_stats_dict(diff_align_filt, full=True)

            #Fit 2D polynomial to residuals and remove
            #To do: add support for along-track and cross-track artifacts
            if tiltcorr and not tiltcorr_done:
                print("\n************")
                print("Calculating 'tiltcorr' 2D polynomial fit to residuals with order %i" % polyorder)
                print("************\n")
                gt = src_dem_clip_ds_align.GetGeoTransform()

                #Need to apply the mask here, so we're only fitting over static surfaces
                #Note that the origmask=False will compute vals for all x and y indices, which is what we want
                vals, resid, coeff = geolib.ma_fitpoly(diff_align_filt, order=polyorder, gt=gt, perc=(0,100), origmask=False)
                #vals, resid, coeff = geolib.ma_fitplane(diff_align_filt, gt, perc=(12.5, 87.5), origmask=False)

                #Should write out coeff or grid with correction 

                vals_stats = malib.get_stats_dict(vals)

                #Want to have max_tilt check here
                #max_tilt = 4.0 #m
                #Should do percentage
                #vals.ptp() > max_tilt

                #Note: dimensions of ds and vals will be different as vals are computed for clipped intersection
                #Need to recompute planar offset for full src_dem_ds_align extent and apply
                xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
                valgrid = geolib.polyval2d(xgrid, ygrid, coeff) 
                #For results of ma_fitplane
                #valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
                src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)

                if True:
                    print("Creating plot of polynomial fit to residuals")
                    fig, axa = plt.subplots(1,2, figsize=(8, 4))
                    dz_clim = malib.calcperc_sym(vals, (2, 98))
                    ax = pltlib.iv(diff_align_filt, ax=axa[0], cmap='RdBu', clim=dz_clim, \
                            label='Residual dz (m)', scalebar=False)
                    ax = pltlib.iv(valgrid, ax=axa[1], cmap='RdBu', clim=dz_clim, \
                            label='Polyfit dz (m)', ds=src_dem_ds_align)
                    #if tiltcorr:
                        #xyz_shift_str_cum_fn += "_tiltcorr"
                    tiltcorr_fig_fn = outprefix + '%s_polyfit.png' % xyz_shift_str_cum_fn
                    print("Writing out figure: %s\n" % tiltcorr_fig_fn)
                    fig.savefig(tiltcorr_fig_fn, dpi=300)

                print("Applying tilt correction to difference map")
                diff_align -= vals

                #Should iterate until tilts are below some threshold
                #For now, only do one tiltcorr
                tiltcorr_done=True
                #Now use original tolerance, and number of iterations 
                tol = args.tol
                max_iter = n + args.max_iter
            else:
                break

    if True:
        #Write out aligned difference map for clipped extent with vertial offset removed
        align_diff_fn = outprefix + '%s_align_diff.tif' % xyz_shift_str_cum_fn
        print("Writing out aligned difference map with median vertical offset removed")
        iolib.writeGTiff(diff_align, align_diff_fn, src_dem_clip_ds_align)

    if True:
        #Write out fitered aligned difference map
        align_diff_filt_fn = outprefix + '%s_align_diff_filt.tif' % xyz_shift_str_cum_fn
        print("Writing out filtered aligned difference map with median vertical offset removed")
        iolib.writeGTiff(diff_align_filt, align_diff_filt_fn, src_dem_clip_ds_align)

    #Extract final center coordinates for intersection
    center_coord_ll = geolib.get_center(src_dem_clip_ds_align, t_srs=geolib.wgs_srs)
    center_coord_xy = geolib.get_center(src_dem_clip_ds_align)
    src_dem_clip_ds_align = None

    #Write out final aligned src_dem 
    align_fn = outprefix + '%s_align.tif' % xyz_shift_str_cum_fn
    print("Writing out shifted src_dem with median vertical offset removed: %s" % align_fn)
    #Open original uncorrected dataset at native resolution
    src_dem_ds = gdal.Open(src_dem_fn)
    src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
    #Apply final horizontal and vertial shift to the original dataset
    #Note: potentially issues if we used a different projection during coregistration!
    src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx_total, dy_total, createcopy=False)
    src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz_total, createcopy=False)
    if tiltcorr:
        xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
        valgrid = geolib.polyval2d(xgrid, ygrid, coeff) 
        #For results of ma_fitplane
        #valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
        src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)
    #Might be cleaner way to write out MEM ds directly to disk
    src_dem_full_align = iolib.ds_getma(src_dem_ds_align)
    iolib.writeGTiff(src_dem_full_align, align_fn, src_dem_ds_align)

    if True:
        #Output final aligned src_dem, masked so only best pixels are preserved
        #Useful if creating a new reference product
        #Can also use apply_mask.py 
        print("Applying filter to shiftec src_dem")
        align_diff_filt_full_ds = warplib.memwarp_multi_fn([align_diff_filt_fn,], res=src_dem_ds_align, extent=src_dem_ds_align, \
                t_srs=src_dem_ds_align)[0]
        align_diff_filt_full = iolib.ds_getma(align_diff_filt_full_ds)
        align_diff_filt_full_ds = None
        align_fn_masked = outprefix + '%s_align_filt.tif' % xyz_shift_str_cum_fn
        iolib.writeGTiff(np.ma.array(src_dem_full_align, mask=np.ma.getmaskarray(align_diff_filt_full)), \
                align_fn_masked, src_dem_ds_align)

    src_dem_full_align = None
    src_dem_ds_align = None

    #Compute original elevation difference
    if True:
        ref_dem_clip_ds, src_dem_clip_ds = warplib.memwarp_multi([ref_dem_ds, src_dem_ds], \
                res=res, extent='intersection', t_srs=local_srs, r='cubic')
        src_dem_ds = None
        ref_dem_ds = None
        ref_dem_orig = iolib.ds_getma(ref_dem_clip_ds)
        src_dem_orig = iolib.ds_getma(src_dem_clip_ds)
        #Needed for plotting
        ref_dem_hs = geolib.gdaldem_mem_ds(ref_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
        src_dem_hs = geolib.gdaldem_mem_ds(src_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
        diff_orig = src_dem_orig - ref_dem_orig
        #Only compute stats over valid surfaces
        static_mask_orig = get_mask(src_dem_clip_ds, mask_list, src_dem_fn)
        #Note: this doesn't include outlier removal or slope mask!
        static_mask_orig = np.logical_or(np.ma.getmaskarray(diff_orig), static_mask_orig)
        #For some reason, ASTER DEM diff have a spike near the 0 bin, could be an issue with masking?
        diff_orig_compressed = diff_orig[~static_mask_orig]
        diff_orig_stats = malib.get_stats_dict(diff_orig_compressed, full=True)

        #Prepare filtered version for comparison 
        diff_orig_filt = np.ma.array(diff_orig, mask=static_mask_orig)
        diff_orig_filt = outlier_filter(diff_orig_filt, f=3, max_dz=max_dz)
        #diff_orig_filt = outlier_filter(diff_orig_filt, perc=(12.5, 87.5), max_dz=max_dz)
        slope = get_filtered_slope(src_dem_clip_ds)
        diff_orig_filt = np.ma.array(diff_orig_filt, mask=np.ma.getmaskarray(slope))
        diff_orig_filt_stats = malib.get_stats_dict(diff_orig_filt, full=True)

        #Write out original difference map
        print("Writing out original difference map for common intersection before alignment")
        orig_diff_fn = outprefix + '_orig_diff.tif'
        iolib.writeGTiff(diff_orig, orig_diff_fn, ref_dem_clip_ds)
        src_dem_clip_ds = None
        ref_dem_clip_ds = None

    if True:
        align_stats_fn = outprefix + '%s_align_stats.json' % xyz_shift_str_cum_fn
        align_stats = {}
        align_stats['src_fn'] = src_dem_fn 
        align_stats['ref_fn'] = ref_dem_fn 
        align_stats['align_fn'] = align_fn 
        align_stats['res'] = {} 
        align_stats['res']['src'] = src_dem_res
        align_stats['res']['ref'] = ref_dem_res
        align_stats['res']['coreg'] = res
        align_stats['center_coord'] = {'lon':center_coord_ll[0], 'lat':center_coord_ll[1], \
                'x':center_coord_xy[0], 'y':center_coord_xy[1]}
        align_stats['shift'] = {'dx':dx_total, 'dy':dy_total, 'dz':dz_total, 'dm':dm_total}
        #This tiltcorr flag gets set to false, need better flag
        if tiltcorr:
            align_stats['tiltcorr'] = {}
            align_stats['tiltcorr']['coeff'] = coeff.tolist()
            align_stats['tiltcorr']['val_stats'] = vals_stats
        align_stats['before'] = diff_orig_stats
        align_stats['before_filt'] = diff_orig_filt_stats
        align_stats['after'] = diff_align_stats
        align_stats['after_filt'] = diff_align_filt_stats
        
        import json
        with open(align_stats_fn, 'w') as f:
            json.dump(align_stats, f)

    #Create output plot
    if True:
        print("Creating final plot")
        kwargs = {'interpolation':'none'}
        #f, axa = plt.subplots(2, 4, figsize=(11, 8.5))
        f, axa = plt.subplots(2, 4, figsize=(16, 8))
        for ax in axa.ravel()[:-1]:
            ax.set_facecolor('k')
            pltlib.hide_ticks(ax)
        dem_clim = malib.calcperc(ref_dem_orig, (2,98))
        axa[0,0].imshow(ref_dem_hs, cmap='gray', **kwargs)
        im = axa[0,0].imshow(ref_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
        pltlib.add_cbar(axa[0,0], im, arr=ref_dem_orig, clim=dem_clim, label=None)
        pltlib.add_scalebar(axa[0,0], res=res)
        axa[0,0].set_title('Reference DEM')
        axa[0,1].imshow(src_dem_hs, cmap='gray', **kwargs)
        im = axa[0,1].imshow(src_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
        pltlib.add_cbar(axa[0,1], im, arr=src_dem_orig, clim=dem_clim, label=None)
        axa[0,1].set_title('Source DEM')
        #axa[0,2].imshow(~static_mask_orig, clim=(0,1), cmap='gray')
        axa[0,2].imshow(~static_mask, clim=(0,1), cmap='gray', **kwargs)
        axa[0,2].set_title('Surfaces for co-registration')
        dz_clim = malib.calcperc_sym(diff_orig_compressed, (5, 95))
        im = axa[1,0].imshow(diff_orig, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1,0], im, arr=diff_orig, clim=dz_clim, label=None)
        axa[1,0].set_title('Elev. Diff. Before (m)')
        im = axa[1,1].imshow(diff_align, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1,1], im, arr=diff_align, clim=dz_clim, label=None)
        axa[1,1].set_title('Elev. Diff. After (m)')

        #tight_dz_clim = (-1.0, 1.0)
        tight_dz_clim = (-2.0, 2.0)
        #tight_dz_clim = (-10.0, 10.0)
        #tight_dz_clim = malib.calcperc_sym(diff_align_filt, (5, 95))
        im = axa[1,2].imshow(diff_align_filt, cmap='RdBu', clim=tight_dz_clim)
        pltlib.add_cbar(axa[1,2], im, arr=diff_align_filt, clim=tight_dz_clim, label=None)
        axa[1,2].set_title('Elev. Diff. After (m)')

        #Tried to insert Nuth fig here
        #ax_nuth.change_geometry(1,2,1)
        #f.axes.append(ax_nuth)

        bins = np.linspace(dz_clim[0], dz_clim[1], 128)
        axa[1,3].hist(diff_orig_compressed, bins, color='g', label='Before', alpha=0.5)
        axa[1,3].hist(diff_align_compressed, bins, color='b', label='After', alpha=0.5)
        axa[1,3].set_xlim(*dz_clim)
        axa[1,3].axvline(0, color='k', linewidth=0.5, linestyle=':')
        axa[1,3].set_xlabel('Elev. Diff. (m)')
        axa[1,3].set_ylabel('Count (px)')
        axa[1,3].set_title("Source - Reference")
        before_str = 'Before\nmed: %0.2f\nnmad: %0.2f' % (diff_orig_stats['med'], diff_orig_stats['nmad'])
        axa[1,3].text(0.05, 0.95, before_str, va='top', color='g', transform=axa[1,3].transAxes, fontsize=8)
        after_str = 'After\nmed: %0.2f\nnmad: %0.2f' % (diff_align_stats['med'], diff_align_stats['nmad'])
        axa[1,3].text(0.65, 0.95, after_str, va='top', color='b', transform=axa[1,3].transAxes, fontsize=8)

        #This is empty
        axa[0,3].axis('off')

        suptitle = '%s\nx: %+0.2fm, y: %+0.2fm, z: %+0.2fm' % (os.path.split(outprefix)[-1], dx_total, dy_total, dz_total)
        f.suptitle(suptitle)
        f.tight_layout()
        plt.subplots_adjust(top=0.90)

        fig_fn = outprefix + '%s_align.png' % xyz_shift_str_cum_fn
        print("Writing out figure: %s" % fig_fn)
        f.savefig(fig_fn, dpi=300)
Exemplo n.º 12
0
def crop_sim_res_extent(img_list, outfol, vrt=False, rpc=False):
    """
    Warp images to common 'finest' resolution and intersecting extent
    This is useful for stereo processing with mapprojected imagery with the skysat pairs

    Parameters
    ----------
    img_list: list
        list containing two images
    outfol: str
        path to folder where warped images will be saved
    vrt: bool
        Produce warped VRT instead of geotiffs if True
    rpc: bool
        Copy RPC information to warped images if True
    Returns
    ----------
    out: list
        list containing the two warped images, first entry (left image) is the image which was of finer resolution (more nadir) initially
        If the images do not intersect, two None objects are returned in the list
    """
    resample_alg = 'lanczos'
    img1 = img_list[0]
    img2 = img_list[1]
    img1_ds = iolib.fn_getds(img1)
    img2_ds = iolib.fn_getds(img2)
    res1 = geolib.get_res(img1_ds, square=True)[0]
    res2 = geolib.get_res(img2_ds, square=True)[0]
    # set left image as higher resolution, this is repeated for video, but
    # good for triplet with no gsd information
    if res1 < res2:
        l_img = img1
        r_img = img2
        res = res1
    else:
        l_img = img2
        r_img = img1
        res = res2
    # ASP stereo command expects the input to be .tif/.tiff, complains for .vrt
    # Try to save with vrt driver but a tif extension ?
    l_img_warp = os.path.join(
        outfol,
        os.path.splitext(os.path.basename(l_img))[0] + '_warp.tif')
    r_img_warp = os.path.join(
        outfol,
        os.path.splitext(os.path.basename(r_img))[0] + '_warp.tif')
    if not (os.path.exists(l_img_warp)):
        # can turn on verbose during qa/qc
        # Better to turn off during large runs, writing takes time
        verbose = False
        if not os.path.exists(outfol):
            os.makedirs(outfol)
        try:
            #this will simply break and continue if the images do not intersect
            ds_list = warplib.memwarp_multi_fn([l_img, r_img],
                                               r=resample_alg,
                                               verbose=verbose,
                                               res='min',
                                               extent='intersection')
            if vrt:
                extent = geolib.ds_extent(ds_list[0])
                res = geolib.get_res(ds_list[0], square=True)
                vrt_options = gdal.BuildVRTOptions(resampleAlg='average',
                                                   resolution='user',
                                                   xRes=res[0],
                                                   yRes=res[1],
                                                   outputBounds=tuple(extent))
                l_vrt = gdal.BuildVRT(l_img_warp, [
                    l_img,
                ],
                                      options=vrt_options)
                r_vrt = gdal.BuildVRT(r_img_warp, [
                    r_img,
                ],
                                      options=vrt_options)
                # close vrt to save to disk
                l_vrt = None
                r_vrt = None
                out = [l_img_warp, r_img_warp]
            else:
                # I am opting out of writing out vrt, to prevent correlation
                # artifacts. GeoTiffs will be written out in the meantime
                l_img_ma = iolib.ds_getma(ds_list[0])
                r_img_ma = iolib.ds_getma(ds_list[1])
                iolib.writeGTiff(l_img_ma, l_img_warp, ds_list[0])
                iolib.writeGTiff(r_img_ma, r_img_warp, ds_list[1])
                out = [l_img_warp, r_img_warp]
                del (ds_list)
                if rpc:
                    copy_rpc(l_img, l_img_warp)
                    copy_rpc(r_img, r_img_warp)
        except BaseException:
            out = None
    else:
        out = [l_img_warp, r_img_warp]
    return out
Exemplo n.º 13
0
    z1 = np.ma.array(iolib.ds_getma(ds_list[0]), mask=glac_geom_mask)
    z2 = np.ma.array(z2, mask=glac_geom_mask)
    dz = z2 - z1
    if dz.count() == 0:
        print("No valid dz pixels")
        continue

    filter_outliers = True
    #Remove clearly bogus pixels
    if filter_outliers:
        #bad_perc = (0.1, 99.9)
        bad_perc = (1, 99)
        rangelim = malib.calcperc(dz, bad_perc)
        dz = np.ma.masked_outside(dz, *rangelim)

    ds_res = geolib.get_res(ds_list[0])
    valid_area = dz.count() * ds_res[0] * ds_res[1]
    valid_area_perc = valid_area / glac_area
    min_valid_area_perc = 0.80
    if valid_area_perc < min_valid_area_perc:
        print(
            "Not enough valid pixels. %0.1f%% percent of glacier polygon area"
            % (100 * valid_area_perc))
        continue

    #Rasterize NED source dates
    if site == 'conus':
        z1_date_r_ds = iolib.mem_drv.CreateCopy('', ds_list[0])
        gdal.RasterizeLayer(z1_date_r_ds, [1],
                            z1_date_shp_lyr,
                            options=["ATTRIBUTE=S_DATE_CLN"])
Exemplo n.º 14
0
def main2(args):
    #Should check that files exist
    dem1_fn = args.ref_fn
    dem2_fn = args.src_fn
    mode = args.mode
    apply_mask = not args.nomask
    max_offset_m = args.max_offset
    tiltcorr = args.tiltcorr

    #These are tolerances (in meters) to stop iteration
    tol = args.tol
    min_dx = tol
    min_dy = tol
    min_dz = tol

    #Maximum number of iterations
    max_n = 10

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.splitext(dem2_fn)[0] + '_dem_align'

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = '%s_%s' % (os.path.splitext(os.path.split(dem2_fn)[-1])[0], \
            os.path.splitext(os.path.split(dem1_fn)[-1])[0])
    outprefix = os.path.join(outdir, outprefix)

    print("\nReference: %s" % dem1_fn)
    print("Source: %s" % dem2_fn)
    print("Mode: %s" % mode)
    print("Output: %s\n" % outprefix)

    dem2_ds = gdal.Open(dem2_fn, gdal.GA_ReadOnly)
    #Often the "ref" DEM is high-res lidar or similar
    #This is a shortcut to resample to match "source" DEM
    dem1_ds = warplib.memwarp_multi_fn([
        dem1_fn,
    ],
                                       res=dem2_ds,
                                       extent=dem2_ds,
                                       t_srs=dem2_ds)[0]
    #dem1_ds = gdal.Open(dem1_fn, gdal.GA_ReadOnly)

    #Create a copy to be updated in place
    dem2_ds_align = iolib.mem_drv.CreateCopy('', dem2_ds, 0)
    #dem2_ds_align = dem2_ds

    #Iteration number
    n = 1
    #Cumulative offsets
    dx_total = 0
    dy_total = 0
    dz_total = 0

    #Now iteratively update geotransform and vertical shift
    while True:
        print("*** Iteration %i ***" % n)
        dx, dy, dz, static_mask, fig = compute_offset(dem1_ds,
                                                      dem2_ds_align,
                                                      dem2_fn,
                                                      mode,
                                                      max_offset_m,
                                                      apply_mask=apply_mask)
        if n == 1:
            static_mask_orig = static_mask
        xyz_shift_str_iter = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx, dy,
                                                                     dz)
        print("Incremental offset: %s" % xyz_shift_str_iter)

        #Should make an animation of this converging
        if fig is not None:
            dst_fn = outprefix + '_%s_iter%i_plot.png' % (mode, n)
            print("Writing offset plot: %s" % dst_fn)
            fig.gca().set_title(xyz_shift_str_iter)
            fig.savefig(dst_fn, dpi=300, bbox_inches='tight', pad_inches=0.1)

        #Apply the horizontal shift to the original dataset
        dem2_ds_align = coreglib.apply_xy_shift(dem2_ds_align,
                                                dx,
                                                dy,
                                                createcopy=False)
        dem2_ds_align = coreglib.apply_z_shift(dem2_ds_align,
                                               dz,
                                               createcopy=False)

        dx_total += dx
        dy_total += dy
        dz_total += dz
        print("Cumulative offset: dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" %
              (dx_total, dy_total, dz_total))

        #Fit plane to residuals and remove
        #Might be better to do this after converging
        """
        if tiltcorr:
            print("Applying planar tilt correction")
            gt = dem2_ds_align.GetGeoTransform()
            #Need to compute diff_euler here
            #Copy portions of compute_offset, create new function 
            vals, resid, coeff = geolib.ma_fitplane(diff_euler_align, gt, perc=(4, 96))
            dem2_ds_align = coreglib.apply_z_shift(dem2_ds_align, -vals, createcopy=False)
        """

        n += 1
        print("\n")
        #If magnitude of shift in all directions is less than tol
        #if n > max_n or (abs(dx) <= min_dx and abs(dy) <= min_dy and abs(dz) <= min_dz):
        #If magnitude of shift is less than tol
        dm = np.sqrt(dx**2 + dy**2 + dz**2)
        if n > max_n or dm < tol:
            break

    #String to append to output filenames
    xyz_shift_str_cum = '_%s_x%+0.2f_y%+0.2f_z%+0.2f' % (mode, dx_total,
                                                         dy_total, dz_total)
    if tiltcorr:
        xyz_shift_str_cum += "_tiltcorr"

    #Compute original elevation difference
    if True:
        dem1_clip_ds, dem2_clip_ds = warplib.memwarp_multi([dem1_ds, dem2_ds], \
                res='max', extent='intersection', t_srs=dem2_ds)
        dem1_orig = iolib.ds_getma(dem1_clip_ds, 1)
        dem2_orig = iolib.ds_getma(dem2_clip_ds, 1)
        diff_euler_orig = dem2_orig - dem1_orig
        if not apply_mask:
            static_mask_orig = np.ma.getmaskarray(diff_euler_orig)
        diff_euler_orig_compressed = diff_euler_orig[~static_mask_orig]
        diff_euler_orig_stats = np.array(
            malib.print_stats(diff_euler_orig_compressed))

        #Write out original eulerian difference map
        print(
            "Writing out original euler difference map for common intersection before alignment"
        )
        dst_fn = outprefix + '_orig_dz_eul.tif'
        iolib.writeGTiff(diff_euler_orig, dst_fn, dem1_clip_ds)

    #Compute final elevation difference
    if True:
        dem1_clip_ds_align, dem2_clip_ds_align = warplib.memwarp_multi([dem1_ds, dem2_ds_align], \
                res='max', extent='intersection', t_srs=dem2_ds_align)
        dem1_align = iolib.ds_getma(dem1_clip_ds_align, 1)
        dem2_align = iolib.ds_getma(dem2_clip_ds_align, 1)
        diff_euler_align = dem2_align - dem1_align
        if not apply_mask:
            static_mask = np.ma.getmaskarray(diff_euler_align)
        diff_euler_align_compressed = diff_euler_align[~static_mask]
        diff_euler_align_stats = np.array(
            malib.print_stats(diff_euler_align_compressed))

        #Fit plane to residuals and remove
        if tiltcorr:
            print("Applying planar tilt correction")
            gt = dem1_clip_ds_align.GetGeoTransform()
            #Need to apply the mask here, so we're only fitting over static surfaces
            #Note that the origmask=False will compute vals for all x and y indices, which is what we want
            vals, resid, coeff = geolib.ma_fitplane(np.ma.array(diff_euler_align, mask=static_mask), \
                    gt, perc=(4, 96), origmask=False)
            #Remove planar offset from difference map
            diff_euler_align -= vals
            #Remove planar offset from aligned dem2
            #Note: dimensions of ds and vals will be different as vals are computed for clipped intersection
            #Recompute planar offset for dem2_ds_align extent
            xgrid, ygrid = geolib.get_xy_grids(dem2_ds_align)
            vals = coeff[0] * xgrid + coeff[1] * ygrid + coeff[2]
            dem2_ds_align = coreglib.apply_z_shift(dem2_ds_align,
                                                   -vals,
                                                   createcopy=False)
            if not apply_mask:
                static_mask = np.ma.getmaskarray(diff_euler_align)
            diff_euler_align_compressed = diff_euler_align[~static_mask]
            diff_euler_align_stats = np.array(
                malib.print_stats(diff_euler_align_compressed))
            print("Creating fitplane plot")
            fig, ax = plt.subplots(figsize=(6, 6))
            fitplane_clim = malib.calcperc(vals, (2, 98))
            im = ax.imshow(vals, cmap='cpt_rainbow', clim=fitplane_clim)
            res = float(geolib.get_res(dem2_clip_ds, square=True)[0])
            pltlib.add_scalebar(ax, res=res)
            pltlib.hide_ticks(ax)
            pltlib.add_cbar(ax, im, label='Fit plane residuals (m)')
            fig.tight_layout()
            dst_fn1 = outprefix + '%s_align_dz_eul_fitplane.png' % xyz_shift_str_cum
            print("Writing out figure: %s" % dst_fn1)
            fig.savefig(dst_fn1, dpi=300, bbox_inches='tight', pad_inches=0.1)

        #Compute higher-order fits?
        #Could also attempt to model along-track and cross-track artifacts

        #Write out aligned eulerian difference map for clipped extent with vertial offset removed
        dst_fn = outprefix + '%s_align_dz_eul.tif' % xyz_shift_str_cum
        print(
            "Writing out aligned difference map with median vertical offset removed"
        )
        iolib.writeGTiff(diff_euler_align, dst_fn, dem1_clip_ds)

    #Write out aligned dem_2 with vertial offset removed
    if True:
        dst_fn2 = outprefix + '%s_align.tif' % xyz_shift_str_cum
        print(
            "Writing out shifted dem2 with median vertical offset removed: %s"
            % dst_fn2)
        #Might be cleaner way to write out MEM ds directly to disk
        dem2_align = iolib.ds_getma(dem2_ds_align)
        iolib.writeGTiff(dem2_align, dst_fn2, dem2_ds_align)
        dem2_ds_align = None

    #Create output plot
    if True:
        print("Creating final plot")
        dem1_hs = geolib.gdaldem_mem_ma(dem1_orig, dem1_clip_ds, returnma=True)
        dem2_hs = geolib.gdaldem_mem_ma(dem2_orig, dem2_clip_ds, returnma=True)
        f, axa = plt.subplots(2, 3, figsize=(11, 8.5))
        for ax in axa.ravel()[:-1]:
            ax.set_facecolor('k')
            pltlib.hide_ticks(ax)
        dem_clim = malib.calcperc(dem1_orig, (2, 98))
        axa[0, 0].imshow(dem1_hs, cmap='gray')
        axa[0, 0].imshow(dem1_orig,
                         cmap='cpt_rainbow',
                         clim=dem_clim,
                         alpha=0.6)
        res = float(geolib.get_res(dem1_clip_ds, square=True)[0])
        pltlib.add_scalebar(axa[0, 0], res=res)
        axa[0, 0].set_title('Reference DEM')
        axa[0, 1].imshow(dem2_hs, cmap='gray')
        axa[0, 1].imshow(dem2_orig,
                         cmap='cpt_rainbow',
                         clim=dem_clim,
                         alpha=0.6)
        axa[0, 1].set_title('Source DEM')
        axa[0, 2].imshow(~static_mask_orig, clim=(0, 1), cmap='gray')
        axa[0, 2].set_title('Surfaces for co-registration')
        dz_clim = malib.calcperc_sym(diff_euler_orig_compressed, (5, 95))
        im = axa[1, 0].imshow(diff_euler_orig, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1, 0], im, label=None)
        axa[1, 0].set_title('Elev. Diff. Before (m)')
        im = axa[1, 1].imshow(diff_euler_align, cmap='RdBu', clim=dz_clim)
        pltlib.add_cbar(axa[1, 1], im, label=None)
        axa[1, 1].set_title('Elev. Diff. After (m)')

        #Tried to insert Nuth fig here
        #ax_nuth.change_geometry(1,2,1)
        #f.axes.append(ax_nuth)

        bins = np.linspace(dz_clim[0], dz_clim[1], 128)
        axa[1, 2].hist(diff_euler_orig_compressed,
                       bins,
                       color='g',
                       label='Before',
                       alpha=0.5)
        axa[1, 2].hist(diff_euler_align_compressed,
                       bins,
                       color='b',
                       label='After',
                       alpha=0.5)
        axa[1, 2].axvline(0, color='k', linewidth=0.5, linestyle=':')
        axa[1, 2].set_xlabel('Elev. Diff. (m)')
        axa[1, 2].set_ylabel('Count (px)')
        axa[1, 2].set_title("Source - Reference")
        #axa[1,2].legend(loc='upper right')
        #before_str = 'Before\nmean: %0.2f\nstd: %0.2f\nmed: %0.2f\nnmad: %0.2f' % tuple(diff_euler_orig_stats[np.array((3,4,5,6))])
        #after_str = 'After\nmean: %0.2f\nstd: %0.2f\nmed: %0.2f\nnmad: %0.2f' % tuple(diff_euler_align_stats[np.array((3,4,5,6))])
        before_str = 'Before\nmed: %0.2f\nnmad: %0.2f' % tuple(
            diff_euler_orig_stats[np.array((5, 6))])
        axa[1, 2].text(0.05,
                       0.95,
                       before_str,
                       va='top',
                       color='g',
                       transform=axa[1, 2].transAxes)
        after_str = 'After\nmed: %0.2f\nnmad: %0.2f' % tuple(
            diff_euler_align_stats[np.array((5, 6))])
        axa[1, 2].text(0.65,
                       0.95,
                       after_str,
                       va='top',
                       color='b',
                       transform=axa[1, 2].transAxes)

        suptitle = '%s\nx: %+0.2fm, y: %+0.2fm, z: %+0.2fm' % (
            os.path.split(outprefix)[-1], dx_total, dy_total, dz_total)
        f.suptitle(suptitle)
        f.tight_layout()
        plt.subplots_adjust(top=0.90)

        dst_fn = outprefix + '%s_align.png' % xyz_shift_str_cum
        print("Writing out figure: %s" % dst_fn)
        f.savefig(dst_fn, dpi=300, bbox_inches='tight', pad_inches=0.1)

        #Removing residual planar tilt can introduce additional slope/aspect dependent offset
        #Want to run another round of main dem_align after removing planar tilt
        if tiltcorr:
            print("\n Rerunning after applying tilt correction \n")
            #Create copy of original arguments
            import copy
            args2 = copy.copy(args)
            #Use aligned, tilt-corrected DEM as input src_fn for second round
            args2.src_fn = dst_fn2
            #Assume we've already corrected most of the tilt during first round (also prevents endless loop)
            args2.tiltcorr = False
            main2(args2)
Exemplo n.º 15
0
def compute_offset(dem1_ds,
                   dem2_ds,
                   dem2_fn,
                   mode='nuth',
                   max_offset_m=100,
                   remove_outliers=True,
                   apply_mask=True):
    #Make sure the input datasets have the same resolution/extent
    #Use projection of source DEM
    dem1_clip_ds, dem2_clip_ds = warplib.memwarp_multi([dem1_ds, dem2_ds], \
            res='max', extent='intersection', t_srs=dem2_ds)

    #Compute size of NCC and SAD search window in pixels
    res = float(geolib.get_res(dem1_clip_ds, square=True)[0])
    max_offset_px = (max_offset_m / res) + 1
    #print(max_offset_px)
    pad = (int(max_offset_px), int(max_offset_px))

    #This will be updated geotransform for dem2
    dem2_gt = np.array(dem2_clip_ds.GetGeoTransform())

    #Load the arrays
    dem1 = iolib.ds_getma(dem1_clip_ds, 1)
    dem2 = iolib.ds_getma(dem2_clip_ds, 1)

    #Compute difference for unaligned inputs
    print("Elevation difference stats for uncorrected input DEMs")
    #Shouldn't need to worry about common mask here, as both inputs are ma
    diff_euler = dem2 - dem1

    static_mask = None
    if apply_mask:
        #Need dem2_fn here to find TOA fn
        static_mask = get_mask(dem2_clip_ds, dem2_fn)
        dem1 = np.ma.array(dem1, mask=static_mask)
        dem2 = np.ma.array(dem2, mask=static_mask)
        diff_euler = np.ma.array(diff_euler, mask=static_mask)
        static_mask = np.ma.getmaskarray(diff_euler)

    if diff_euler.count() == 0:
        sys.exit("No overlapping, unmasked pixels shared between input DEMs")

    #Compute stats for new masked difference map
    diff_stats = malib.print_stats(diff_euler)
    dz = diff_stats[5]

    #This needs further testing
    if remove_outliers:
        med = diff_stats[5]
        nmad = diff_stats[6]
        f = 3
        rmin = med - f * nmad
        rmax = med + f * nmad
        #Use IQR
        #rmin = diff_stats[7]
        #rmax = diff_stats[8]
        diff_euler = np.ma.masked_outside(diff_euler, rmin, rmax)
        #Should also apply to original dem1 and dem2 for sad and ncc

    print("Computing sub-pixel offset between DEMs using mode: %s" % mode)

    #By default, don't create output figure
    fig = None

    #Sum of absolute differences
    if mode == "sad":
        m, int_offset, sp_offset = coreglib.compute_offset_sad(dem1,
                                                               dem2,
                                                               pad=pad)
        #Geotransform has negative y resolution, so don't need negative sign
        #np array is positive down
        #GDAL coordinates are positive up
        dx = sp_offset[1] * dem2_gt[1]
        dy = sp_offset[0] * dem2_gt[5]
    #Normalized cross-correlation of clipped, overlapping areas
    elif mode == "ncc":
        m, int_offset, sp_offset, fig = coreglib.compute_offset_ncc(dem1, dem2, \
                pad=pad, prefilter=False, plot=True)
        dx = sp_offset[1] * dem2_gt[1]
        dy = sp_offset[0] * dem2_gt[5]
    #Nuth and Kaab (2011)
    elif mode == "nuth":
        print("Computing slope and aspect")
        dem1_slope = geolib.gdaldem_mem_ds(dem1_clip_ds,
                                           processing='slope',
                                           returnma=True)
        dem1_aspect = geolib.gdaldem_mem_ds(dem1_clip_ds,
                                            processing='aspect',
                                            returnma=True)
        #Compute relationship between elevation difference, slope and aspect
        fit_param, fig = coreglib.compute_offset_nuth(diff_euler, dem1_slope,
                                                      dem1_aspect)
        #fit_param[0] is magnitude of shift vector
        #fit_param[1] is direction of shift vector
        #fit_param[2] is mean bias divided by tangent of mean slope
        #print(fit_param)
        dx = fit_param[0] * np.sin(np.deg2rad(fit_param[1]))
        dy = fit_param[0] * np.cos(np.deg2rad(fit_param[1]))
        #med_slope = malib.fast_median(dem1_slope)
        #dz = fit_param[2]*np.tan(np.deg2rad(med_slope))
    elif mode == "all":
        print("Not yet implemented")
        #Want to compare all methods, average offsets
        #m, int_offset, sp_offset = coreglib.compute_offset_sad(dem1, dem2)
        #m, int_offset, sp_offset = coreglib.compute_offset_ncc(dem1, dem2)
    #This is a hack to apply the computed median bias correction for shpclip area only
    elif mode == "none":
        print(
            "Skipping alignment, writing out DEM with median bias over static surfaces removed"
        )
        dst_fn = outprefix + '_med%0.1f.tif' % dz
        iolib.writeGTiff(dem2_orig + dz, dst_fn, dem2_ds)
        sys.exit()
    #Note: minus signs here since we are computing dz=(src-ref), but adjusting src
    return -dx, -dy, -dz, static_mask, fig
Exemplo n.º 16
0
def main():
    parser = getparser()
    args = parser.parse_args()

    fn = args.fn

    #This is mean density for N Cascades snow
    #rho = 0.5
    #Density of pure ice
    rho = args.rho
    #If number is in kg/m^3 rather than g/cc
    if rho > 10.:
        rho /= 1000.

    #Clip negative values to 0
    filt = False

    src_ds = iolib.fn_getds(fn)
    res = geolib.get_res(src_ds, square=True)[0]
    bma = iolib.ds_getma(src_ds)

    #Attempt to extract t1 and t2 from input filename
    ts = timelib.fn_getdatetime_list(fn)
    #Hardcode timestamps
    #ts = [datetime.datetime(2013,9,10), datetime.datetime(2014,5,14)]

    dt_yr = None
    if len(ts) == 2:
        dt = ts[1] - ts[0]
        year = datetime.timedelta(days=365.25)
        dt_yr = dt.total_seconds() / year.total_seconds()

    #Can add filter here to remove outliers, perc_fltr(0.01, 99.9)
    if filt:
        mask = np.ma.getmaskarray(bma)
        bma[bma < 0] = 0
        bma = np.ma.array(bma, mask=mask)

    #Print out stats
    print('\n')
    stats = malib.print_stats(bma)
    print('\n')

    count = stats[0]
    area = res**2 * count
    mean = stats[3]
    med = stats[5]

    s_m3 = np.ma.sum(bma) * res**2
    s_km3 = s_m3 / 1E9
    s_mwe = mean * rho
    s_gt = s_km3 * rho
    #s_mm = s_gt/374
    #https://climatesanity.wordpress.com/conversion-factors-for-ice-and-water-mass-and-volume/
    s_mm = s_gt / 360

    if dt_yr is not None:
        print("%s to %s: %0.2f yr" % (ts[0], ts[1], dt_yr))
        print("%0.0f m^3 (%0.0f m^3/yr)" % (s_m3, s_m3 / dt_yr))
        print("%0.3f km^3 (%0.3f km^3/yr)" % (s_km3, s_km3 / dt_yr))
        print("Density: %0.3f g/cc" % rho)
        print("%0.3f GT (%0.3f GT/yr)" % (s_gt, s_gt / dt_yr))
        print("%0.6f mm SLR (%0.6f mm/yr)" % (s_mm, s_mm / dt_yr))
        print("%0.3f m.w.e. (%0.3f m.w.e./yr)" % (s_mwe, s_mwe / dt_yr))
    else:
        print("Area: %0.2f km2" % (area / 1E6))
        print("%0.0f m^3" % s_m3)
        print("%0.3f km^3" % s_km3)
        print("Density: %0.3f g/cc" % rho)
        print("%0.3f GT" % s_gt)
        print("%0.6f mm SLR" % s_mm)
        print("%0.3f m.w.e." % s_mwe)
    print('\n')
Exemplo n.º 17
0
    dem2_ts = timelib.fn_getdatetime_list(dz_fn)[-1]
    outprefix = os.path.splitext(os.path.split(dz_fn)[1])[0]

outprefix = os.path.join(args.outdir, outprefix)

#Calculate water year
wy = dem1_ts.year + 1
if dem1_ts.month >= 10:
    wy = dem1_ts.year

#These need to be updated in geolib to use gdaldem API
hs = geolib.gdaldem_mem_ds(dem1_ds, processing='hillshade', returnma=True)
hs_clim = (1,255)

dem_clim = malib.calcperc(dem1, (1,99))
res = geolib.get_res(dem1_ds)[0]

if args.density is None:
    #Attempt to extract from nearby SNOTEL sites for dem_ts
    #Attempt to use model
    #Last resort, use constant value
    rho_s = 0.5
    #rho_s = 0.4
    #rho_s = 0.36

#Convert snow depth to swe
swe = dz * rho_s

if args.filter:
    print("Filtering SWE map")
    #Median filter to remove artifacts
Exemplo n.º 18
0
Arquivo: vmap.py Projeto: whigg/vmap
def main():
    parser = getparser()
    args = parser.parse_args()
    if args.seedmode == 'existing_velocity':
        if args.vx_fn is None or args.vy_fn is None:
            parser.error('"-seedmode existing_velocity" requires "-vx_fn" and "-vy_fn"')

    print('\n%s' % datetime.now())
    print('%s UTC\n' % datetime.utcnow())

    align = args.align
    seedmode = args.seedmode
    spr = args.refinement
    erode = args.erode
    #Correlator tile timeout
    #With proper seeding, correlation should be very fast
    #timeout = 360 
    timeout = 1200 
    threads = args.threads

    kernel = (args.kernel, args.kernel)
    #SGM correlator
    if spr > 3:
        #kernel = (7,7)
        kernel = (11,11)
        erode = 0

    #Smooth the output F.tif 
    smoothF = args.filter 

    res = args.tr
    #Resample input to something easier to work with
    #res = 4.0

    #Open input files
    fn1 = args.fn1
    fn2 = args.fn2 

    if not iolib.fn_check(fn1) or not iolib.fn_check(fn2):
        sys.exit("Unable to locate input files")

    if args.outdir is not None:
        outdir = args.outdir
    else:
        outdir = '%s__%s_vmap_%sm_%ipx_spm%i' % (os.path.splitext(os.path.split(fn1)[1])[0], \
                os.path.splitext(os.path.split(fn2)[1])[0], res, kernel[0], spr)

    #Note, can encounter filename length issues in boost, just use vmap prefix
    outprefix = '%s/vmap' % (outdir)
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    #Check to see if inputs have geolocation and projection information
    ds1 = iolib.fn_getds(fn1)
    ds2 = iolib.fn_getds(fn2)

    if geolib.srs_check(ds1) and geolib.srs_check(ds2):
        ds1_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn1))[0]+'_warp.tif')
        ds2_clip_fn = os.path.join(outdir, os.path.splitext(os.path.basename(fn2))[0]+'_warp.tif')

        if not os.path.exists(ds1_clip_fn) or not os.path.exists(ds2_clip_fn):
            #This should write out files to new subdir
            ds1_clip, ds2_clip = warplib.diskwarp_multi_fn([fn1, fn2], extent='intersection', res=res, r='average', outdir=outdir)
            ds1_clip = None
            ds2_clip = None
            #However, if inputs have identical extent/res/proj, then link to original files
            if not os.path.exists(ds1_clip_fn):
                os.symlink(os.path.abspath(fn1), ds1_clip_fn)
            if not os.path.exists(ds2_clip_fn):
                os.symlink(os.path.abspath(fn2), ds2_clip_fn)
            align = 'None'

        #Mask support - limit correlation only to rock/ice surfaces, no water/veg
        #This masks input images - guarantee we won't waste time correlating over vegetation
        #TODO: Add support to load arbitrary raster or shp mask
        if args.mask_input:
            ds1_masked_fn = os.path.splitext(ds1_clip_fn)[0]+'_masked.tif'
            ds2_masked_fn = os.path.splitext(ds2_clip_fn)[0]+'_masked.tif'

            if not os.path.exists(ds1_masked_fn) or not os.path.exists(ds2_masked_fn):
                #Load NLCD or bareground mask
                from demcoreg.dem_mask import get_lulc_mask

                ds1_clip = iolib.fn_getds(ds1_clip_fn)
                lulc_mask_fn = os.path.join(outdir, 'lulc_mask.tif')
                #if not os.path.exists(nlcd_mask_fn):
                lulc_mask = get_lulc_mask(ds1_clip, mask_glaciers=False, filter='not_forest')
                iolib.writeGTiff(lulc_mask, lulc_mask_fn, ds1_clip) 
                ds1_clip = None

                #Now apply to original images 
                #This could be problematic for huge inputs, see apply_mask.py
                #lulc_mask = lulc_mask.astype(int)
                for fn in (ds1_clip_fn, ds2_clip_fn):
                    ds = iolib.fn_getds(fn)
                    a = iolib.ds_getma(ds)
                    a = np.ma.array(a, mask=~(lulc_mask))
                    if a.count() > 0:
                        out_fn = os.path.splitext(fn)[0]+'_masked.tif'
                        iolib.writeGTiff(a,out_fn,ds)
                        a = None
                    else:
                        sys.exit("No unmasked pixels over bare earth")
            ds1_clip_fn = ds1_masked_fn
            ds2_clip_fn = ds2_masked_fn
    else:
        ds1_clip_fn = fn1
        ds2_clip_fn = fn2
        #Now let user specify alignment methods as option - don't hardcode
        #align = 'Homography'
        #align = 'AffineEpipolar'
    ds1 = None
    ds2 = None

    #Should have extra kwargs option here
    stereo_opt = get_stereo_opt(threads=threads, kernel=kernel, timeout=timeout, \
            erode=erode, spr=spr, align=align)
    
    #Stereo arguments
    #Latest version of ASP should accept tif without camera models
    #stereo_args = [ds1_clip_fn, ds2_clip_fn, outprefix]
    #Nope - still need to provide dummy camera models, and they must be unique files
    #Use the dummy.tsai file bundled in the vmap repo
    dummy_tsai = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'dummy.tsai')
    dummy_tsai2 = os.path.splitext(dummy_tsai)[0]+'2.tsai'
    if not os.path.exists(dummy_tsai2):
        dummy_tsai2 = os.symlink(dummy_tsai, os.path.splitext(dummy_tsai)[0]+'2.tsai')
    stereo_args = [ds1_clip_fn, ds2_clip_fn, dummy_tsai, dummy_tsai2, outprefix]

    #Run stereo_pprc
    if not os.path.exists(outprefix+'-R_sub.tif'):
        run_cmd('stereo_pprc', stereo_opt+stereo_args, msg='0: Preprocessing')
        #Copy proj info to outputs, this should happen automatically now?
        for ext in ('L', 'R', 'L_sub', 'R_sub', 'lMask', 'rMask', 'lMask_sub', 'rMask_sub'):
            geolib.copyproj(ds1_clip_fn, '%s-%s.tif' % (outprefix,ext))

    #Prepare seeding for stereo_corr
    #TODO: these are untested after refactoring
    if not os.path.exists(outprefix+'_D_sub.tif'):
        #Don't need to do anything for default seed-mode 1
        if seedmode == 'sparse_disp':
            #Sparse correlation of full-res images
            stereo_opt.extend(['--corr-seed-mode', '3'])
            sparse_disp_opt = []
            sparse_disp_opt.extend(['--Debug', '--coarse', '512', '--fine', '256', '--no_epipolar_fltr']) 
            sparse_disp_opt.extend(['-P', str(threads)])
            sparse_disp_args = [outprefix+'-L.tif', outprefix+'-R.tif', outprefix]
            run_cmd('sparse_disp', sparse_disp_opt+sparse_disp_args, msg='0.5: D_sub generation')
        elif seedmode == 'existing_velocity':
            #User-input low-res velocity maps for seeding
            #TODO: Add functions that fetch best available velocities for Ant/GrIS or user-defined low-res velocities
            #Automatically query GoLive velocities here
            vx_fn = args.vx_fn 
            vy_fn = args.vy_fn 
            #Check for existence

            #HMA seeding
            vdir = '/nobackup/deshean/rpcdem/hma/velocity_jpl_amaury_2013-2015'
            vx_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.x_vel.TIF')
            vy_fn = os.path.join(vdir, 'PKH_WRS2_B8_2013_2015_snr5_n1_r170_res12.y_vel.TIF')

            if os.path.exists(vx_fn) and os.path.exists(vy_fn):
                ds1_clip = iolib.fn_getds(ds1_clip_fn)
                ds1_res = geolib.get_res(ds1_clip, square=True)[0]

                #Compute L_sub res - use this for output dimensions
                L_sub_fn = outprefix+'-L_sub.tif' 
                L_sub_ds = gdal.Open(L_sub_fn)
                L_sub_x_scale = float(ds1_clip.RasterXSize) / L_sub_ds.RasterXSize
                L_sub_y_scale = float(ds1_clip.RasterYSize) / L_sub_ds.RasterYSize
                L_sub_scale = np.max([L_sub_x_scale, L_sub_y_scale])
                L_sub_res = ds1_res * L_sub_scale

                #Since we are likely upsampling here, use cubicspline
                vx_ds_clip, vy_ds_clip = warplib.memwarp_multi_fn([vx_fn, vy_fn], extent=ds1_clip, \
                        t_srs=ds1_clip, res=L_sub_res, r='cubicspline')

                ds1_clip = None

                #Get vx and vy arrays
                vx = iolib.ds_getma(vx_ds_clip)
                vy = iolib.ds_getma(vy_ds_clip)

                #Determine time interval between inputs
                #Use to scaling of known low-res velocities
                t_factor = get_t_factor_fn(ds1_clip_fn, ds2_clip_fn, ds=vx_ds_clip)

                if t_factor is not None:
                    #Compute expected offset in scaled pixels 
                    dx = (vx*t_factor)/L_sub_res
                    dy = (vy*t_factor)/L_sub_res
                    #Note: Joughin and Rignot's values are positive y up!
                    #ASP is positive y down, so need to multiply these values by -1
                    #dy = -(vy*t_factor)/L_sub_res

                    #Should smooth/fill dx and dy

                    #If absolute search window is only 30x30
                    #Don't seed, just use fixed search window 
                    #search_window_area_thresh = 900
                    search_window_area_thresh = 0 
                    search_window = np.array([dx.min(), dy.min(), dx.max(), dy.max()])
                    dx_p = calcperc(dx, perc=(0.5, 99.5))
                    dy_p = calcperc(dy, perc=(0.5, 99.5))
                    search_window = np.array([dx_p[0], dy_p[0], dx_p[1], dy_p[1]])
                    search_window_area = (search_window[2]-search_window[0]) * (search_window[3]-search_window[1])
                    if search_window_area < search_window_area_thresh:
                        stereo_opt.extend(['--corr-seed-mode', '0'])
                        stereo_opt.append('--corr-search')
                        stereo_opt.extend([str(x) for x in search_window])
                        #pad_perc=0.1
                        #stereo_opt.extend(['--corr-sub-seed-percent', str(pad_perc)]
                    #Otherwise, generate a D_sub map from low-res velocity
                    else:
                        stereo_opt.extend(['--corr-seed-mode', '3'])
                        #This is relative to the D_sub scaled disparities
                        d_sub_fn = L_sub_fn.split('-L_sub')[0]+'-D_sub.tif' 
                        gen_d_sub(d_sub_fn, dx, dy)

    #If the above didn't generate a D_sub.tif for seeding, run stereo_corr to generate Low-res D_sub.tif
    if not os.path.exists(outprefix+'-D_sub.tif'):
        newopt = ['--compute-low-res-disparity-only',]
        run_cmd('stereo_corr', newopt+stereo_opt+stereo_args, msg='1.1: Low-res Correlation')
    #Copy projection info to D_sub
    geolib.copyproj(outprefix+'-L_sub.tif', outprefix+'-D_sub.tif')
      
    #Mask D_sub to limit correlation over bare earth surfaces
    #This _should_ be a better approach than masking input images, but stereo_corr doesn't honor D_sub
    #Still need to mask input images before stereo_pprc
    #Left this in here for reference, or if this changes in ASP
    if False:
        D_sub_ds = gdal.Open(outprefix+'-D_sub.tif', gdal.GA_Update)

        #Mask support - limit correlation only to rock/ice surfaces, no water/veg
        from demcoreg.dem_mask import get_nlcd, mask_nlcd
        nlcd_fn = get_nlcd()
        nlcd_ds = warplib.diskwarp_multi_fn([nlcd_fn,], extent=D_sub_ds, res=D_sub_ds, t_srs=D_sub_ds, r='near', outdir=outdir)[0]
        #validmask = mask_nlcd(nlcd_ds, valid='rock+ice')
        validmask = mask_nlcd(nlcd_ds, valid='not_forest', mask_glaciers=False)
        nlcd_mask_fn = os.path.join(outdir, 'nlcd_validmask.tif')
        iolib.writeGTiff(validmask, nlcd_mask_fn, nlcd_ds) 

        #Now apply to D_sub (band 3 is valid mask)
        #validmask = validmask.astype(int)
        for b in (1,2,3):
            dsub = iolib.ds_getma(D_sub_ds, b)
            dsub = np.ma.array(dsub, mask=~(validmask))
            D_sub_ds.GetRasterBand(b).WriteArray(dsub.filled())
        D_sub_ds = None

    #OK, finally run stereo_corr full-res integer correlation with appropriate seeding
    if not os.path.exists(outprefix+'-D.tif'):
        run_cmd('stereo_corr', stereo_opt+stereo_args, msg='1: Correlation')
        geolib.copyproj(ds1_clip_fn, outprefix+'-D.tif')

    #Run stereo_rfne
    if spr > 0:
        if not os.path.exists(outprefix+'-RD.tif'):
            run_cmd('stereo_rfne', stereo_opt+stereo_args, msg='2: Refinement')
            geolib.copyproj(ds1_clip_fn, outprefix+'-RD.tif')
        d_fn = make_ln(outdir, outprefix, '-RD.tif')
    else:
        ln_fn = outprefix+'-RD.tif'
        if os.path.lexists(ln_fn):
            os.remove(ln_fn)
        os.symlink(os.path.split(outprefix)[1]+'-D.tif', ln_fn)

    #Run stereo_fltr
    if not os.path.exists(outprefix+'-F.tif'):
        run_cmd('stereo_fltr', stereo_opt+stereo_args, msg='3: Filtering')
        geolib.copyproj(ds1_clip_fn, outprefix+'-F.tif')

    d_fn = make_ln(outdir, outprefix, '-F.tif')

    if smoothF and not os.path.exists(outprefix+'-F_smooth.tif'):
        print('Smoothing F.tif')
        from pygeotools.lib import filtlib 
        #Fill holes and smooth F
        F_fill_fn = outprefix+'-F_smooth.tif'
        F_ds = gdal.Open(outprefix+'-F.tif', gdal.GA_ReadOnly)
        #import dem_downsample_fill
        #F_fill_ds = dem_downsample_fill.gdalfill_ds(F_fill_ds)
        print('Creating F_smooth.tif')
        F_fill_ds = iolib.gtif_drv.CreateCopy(F_fill_fn, F_ds, 0, options=iolib.gdal_opt)
        F_ds = None
        for n in (1, 2):
            print('Smoothing band %i' % n)
            b = F_fill_ds.GetRasterBand(n)
            b_fill_bma = iolib.b_getma(b)
            #b_fill_bma = iolib.b_getma(dem_downsample_fill.gdalfill(b))
            #Filter extreme values (careful, could lose areas of valid data with fastest v)
            #b_fill_bma = filtlib.perc_fltr(b_fill_bma, perc=(0.01, 99.99))
            #These filters remove extreme values and fill data gaps
            #b_fill_bma = filtlib.median_fltr_skimage(b_fill_bma, radius=7, erode=0)
            #b_fill_bma = filtlib.median_fltr(b_fill_bma, fsize=7, origmask=True)
            #Gaussian filter
            b_fill_bma = filtlib.gauss_fltr_astropy(b_fill_bma, size=9)
            b.WriteArray(b_fill_bma)
        F_fill_ds = None
        d_fn = make_ln(outdir, outprefix, '-F_smooth.tif')

    print('\n%s' % datetime.now())
    print('%s UTC\n' % datetime.utcnow())

    #If time interval is specified, convert pixel displacements to rates
    if args.dt != 'none':
        #Check if vm.tif already exists
        #Should probably just overwrite by default
        #if os.path.exists(os.path.splitext(d_fn)[0]+'_vm.tif'):
        #    print("\nFound existing velocity magnitude map!\n"
        #else:
        #Generate output velocity products and figure
        #Requires that vmap repo is in PATH
        cmd = ['disp2v.py', d_fn]
        #Note: this will attempt to automatically determine control surfaces
        #disp2v.py will accept arbitrary mask, could pass through here
        if args.remove_offsets:
            cmd.append('-remove_offsets')
        cmd.extend(['-dt', args.dt])
        print("Converting disparities to velocities")
        print(cmd)
        subprocess.call(cmd)
Exemplo n.º 19
0
def warp(src_ds, res=None, extent=None, t_srs=None, r='cubic', driver=mem_drv, dst_fn=None, dst_ndv=None, verbose=True):
    """Warp an input dataset with predetermined arguments specifying output res/extent/srs

    This is the function that actually calls gdal.ReprojectImage
    
    Parameters
    ----------
    src_ds : gdal.Dataset object
        Dataset to be warped
    res : float
        Desired output resolution
    extent : list of float
        Desired output extent in t_srs coordinate system
    t_srs : osr.SpatialReference()
        Desired output spatial reference
    r : str
        Desired resampling algorithm
    driver : GDAL Driver to use for warp 
        Either MEM or GTiff
    dst_fn : str
        Output filename (for disk warp)
    dst_ndv : float
        Desired output NoData Value

    Returns
    -------
    dst_ds : gdal.Dataset object
        Warped dataset (either in memory or on disk)

    """
    src_srs = geolib.get_ds_srs(src_ds)
    
    if t_srs is None:
        t_srs = geolib.get_ds_srs(src_ds)
    
    src_gt = src_ds.GetGeoTransform()
    #Note: get_res returns [x_res, y_res]
    #Could just use gt here and average x_res and y_res
    src_res = geolib.get_res(src_ds, t_srs=t_srs, square=True)[0]

    if res is None:
        res = src_res

    if extent is None:
        extent = geolib.ds_geom_extent(src_ds, t_srs=t_srs)
    
    #Note: GDAL Lanczos creates block artifacts
    #Wait for gdalwarp to support gaussian resampling
    #Want to use Lanczos for downsampling
    #if src_res < res:
    #    gra = gdal.GRA_Lanczos
    #See http://blog.codinghorror.com/better-image-resizing/
    # Suggests cubic for downsampling, bilinear for upsampling
    #    gra = gdal.GRA_Cubic
    #Cubic for upsampling
    #elif src_res >= res:
    #    gra = gdal.GRA_Bilinear

    gra = parse_rs_alg(r)

    #At this point, the resolution and extent values must be float
    #Extent must be list
    res = float(res)
    extent = [float(i) for i in extent]

    #Might want to move this to memwarp_multi, keep memwarp basic w/ gdal.GRA types

    #Create progress function
    prog_func = None
    if verbose:
        prog_func = gdal.TermProgress
    
    if dst_fn is None:
        #This is a dummy fn if only in mem, but can be accessed later via GetFileList()
        #Actually, no, doesn't look like the filename survivies
        dst_fn = ''
    
    #Compute output image dimensions
    dst_nl = int(round((extent[3] - extent[1])/res))
    dst_ns = int(round((extent[2] - extent[0])/res))
    #dst_nl = int(math.ceil((extent[3] - extent[1])/res))
    #dst_ns = int(math.ceil((extent[2] - extent[0])/res))
    #dst_nl = int(math.floor((extent[3] - extent[1])/res))
    #dst_ns = int(math.floor((extent[2] - extent[0])/res))
    if verbose:
        print('nl: %i ns: %i res: %0.3f' % (dst_nl, dst_ns, res))
    #Create output dataset
    src_b = src_ds.GetRasterBand(1)
    src_dt = src_b.DataType
    src_nl = src_ds.RasterYSize
    src_ns = src_ds.RasterXSize

    dst_ds = driver.Create(dst_fn, dst_ns, dst_nl, src_ds.RasterCount, src_dt) 

    dst_ds.SetProjection(t_srs.ExportToWkt())
    #Might be an issue to use src_gt rotation terms here with arbitrary extent/res
    dst_gt = [extent[0], res, src_gt[2], extent[3], src_gt[4], -res]
    dst_ds.SetGeoTransform(dst_gt)
   
    #This will smooth the input before downsampling to prevent aliasing, fill gaps
    #Pretty inefficent, as we need to create another intermediate dataset
    gauss = False 

    for n in range(1, src_ds.RasterCount+1):
        if dst_ndv is None:
            src_b = src_ds.GetRasterBand(n)
            src_ndv = iolib.get_ndv_b(src_b)
            dst_ndv = src_ndv
        b = dst_ds.GetRasterBand(n)
        b.SetNoDataValue(dst_ndv)
        b.Fill(dst_ndv)

        if gauss:
            from pygeotools.lib import filtlib
            #src_a = src_b.GetVirtualMemArray()
            #Compute resampling ratio to determine filter window size
            res_ratio = float(res)/src_res
            if verbose:
                print("Resampling factor: %0.3f" % res_ratio)
            #Might be more efficient to do iterative gauss filter with size 3, rather than larger windows
            f_size = math.floor(res_ratio/2.)*2+1
            #This is conservative to avoid filling holes with noise
            #f_size = math.floor(res_ratio/2.)*2-1
            if f_size <= 1:
                continue

            if verbose:
                print("Smoothing window size: %i" % f_size)
            #Create temp dataset to store filtered array - avoid overwriting original
            temp_ds = driver.Create('', src_ns, src_nl, src_ds.RasterCount, src_dt) 
            temp_ds.SetProjection(src_srs.ExportToWkt())
            temp_ds.SetGeoTransform(src_gt)
            temp_b = temp_ds.GetRasterBand(n)
            temp_b.SetNoDataValue(dst_ndv)
            temp_b.Fill(dst_ndv)

            src_a = iolib.b_getma(src_b)
            src_a = filtlib.gauss_fltr_astropy(src_a, size=f_size)
            #Want to run with maskfill, so only fills gaps, without expanding isolated points
            temp_b.WriteArray(src_a)
            src_ds = temp_ds
            
            #In theory, NN should be fine since we already smoothed.  In practice, cubic still provides slightly better results
            #gra = gdal.GRA_NearestNeighbour
    
    """
    if not verbose:
        #Suppress GDAL progress bar
        orig_stdout = sys.stdout
        sys.stdout = open(os.devnull, 'w')
    """

    #Note: default maxerror=0.0, second 0.0 argument
    gdal.ReprojectImage(src_ds, dst_ds, src_srs.ExportToWkt(), t_srs.ExportToWkt(), gra, 0.0, 0.0, prog_func)

    """
    if not verbose:
        sys.stdout.close()
        sys.stdout = orig_stdout
    """

    #Note: this is now done in diskwarp
    #Write out to disk
    #if driver != mem_drv:
    #    dst_ds.FlushCache()

    #Return GDAL dataset object in memory
    return dst_ds
Exemplo n.º 20
0
def bma_fig(fig,
            bma,
            cmap='cpt_rainbow',
            clim=None,
            clim_perc=(2, 98),
            bg=None,
            bg_perc=(2, 98),
            n_subplt=1,
            subplt=1,
            label=None,
            title=None,
            contour_int=None,
            contour_fn=None,
            alpha=0.5,
            ticks=False,
            scalebar=None,
            ds=None,
            shp=None,
            imshow_kwargs={'interpolation': 'nearest'},
            cbar_kwargs={'orientation': 'vertical'},
            **kwargs):
    #We don't use the kwargs, just there to save parsing in main

    if clim is None:
        clim = pltlib.get_clim(bma, clim_perc=clim_perc)

    print("Colorbar limits: %0.3f %0.3f" % (clim[0], clim[1]))

    #Link all subplots for zoom/pan
    sharex = sharey = None
    if len(fig.get_axes()) > 0:
        sharex = sharey = fig.get_axes()[0]

    #Hack to catch situations with only 1 subplot, but a subplot number > 1
    if n_subplt == 1:
        subplt = 1

    #One row, multiple columns
    ax = fig.add_subplot(1, n_subplt, subplt, sharex=sharex, sharey=sharey)
    #This occupies the full figure
    #ax = fig.add_axes([0., 0., 1., 1., ])

    #ax.patch.set_facecolor('black')
    ax.patch.set_facecolor('white')

    #Set appropriate nodata value color
    cmap_name = cmap
    cmap = pltlib.cmap_setndv(cmap_name)

    #ax.set_title("Band %i" % subplt, fontsize=10)
    if title is not None:
        ax.set_title(title)

    #If a background image is provided, plot it first
    if bg is not None:
        #Note, alpha=1 is opaque, 0 completely transparent
        #alpha = 0.6
        bg_perc = (4, 96)
        bg_alpha = 1.0
        #bg_clim = malib.calcperc(bg, bg_perc)
        bg_clim = (1, 255)
        bg_cmap_name = 'gray'
        bg_cmap = pltlib.cmap_setndv(bg_cmap_name, cmap_name)
        #bg_cmap = plt.get_cmap(bg_cmap_name)
        #if 'inferno' in cmap_name:
        #    bg_cmap.set_bad('0.5', alpha=1)
        #else:
        #    bg_cmap.set_bad('k', alpha=1)
        #Set the overlay bad values to completely transparent, otherwise darkens the bg
        cmap.set_bad(alpha=0)
        bgplot = ax.imshow(bg, cmap=bg_cmap, clim=bg_clim, alpha=bg_alpha)
        imgplot = ax.imshow(bma,
                            alpha=alpha,
                            cmap=cmap,
                            clim=clim,
                            **imshow_kwargs)
    else:
        imgplot = ax.imshow(bma, cmap=cmap, clim=clim, **imshow_kwargs)

    gt = None
    if ds is not None:
        gt = np.array(ds.GetGeoTransform())
        gt_scale_factor = min(
            np.array([ds.RasterYSize, ds.RasterXSize]) /
            np.array(bma.shape, dtype=float))
        gt[1] *= gt_scale_factor
        gt[5] *= gt_scale_factor
        ds_srs = geolib.get_ds_srs(ds)
        if ticks:
            scale_ticks(ax, ds)
        else:
            pltlib.hide_ticks(ax)
        xres = geolib.get_res(ds)[0]
    else:
        pltlib.hide_ticks(ax)
    #This forces the black line outlining the image subplot to snap to the actual image dimensions
    #depreciated in 2.2
    #ax.set_adjustable('box-forced')

    if cbar_kwargs:
        #Should set the format based on dtype of input data
        #cbar_kwargs['format'] = '%i'
        #cbar_kwargs['format'] = '%0.1f'
        #cbar_kwargs['orientation'] = 'horizontal'

        #Determine whether we need to add extend triangles to colorbar
        cbar_kwargs['extend'] = pltlib.get_cbar_extend(bma, clim)

        #Add the colorbar to the axes
        cbar = pltlib.add_cbar(ax,
                               imgplot,
                               label=label,
                               cbar_kwargs=cbar_kwargs)

    #Plot contours every contour_int interval and update colorbar appropriately
    if contour_int is not None:
        if contour_fn is not None:
            contour_bma = iolib.fn_getma(contour_fn)
            contour_bma_clim = malib.calcperc(contour_bma)
        else:
            contour_bma = bma
            contour_bma_clim = clim

        #PIG bed ridge contours
        #bma_clim = (-1300, -300)
        #Jak front shear margin contours
        #bma_clim = (2000, 4000)
        contour_bma_clim = (100, 250)
        cstart = int(np.floor(contour_bma_clim[0] / contour_int)) * contour_int
        cend = int(np.ceil(contour_bma_clim[1] / contour_int)) * contour_int

        #Turn off dashed negative (beds are below sea level)
        #matplotlib.rcParams['contour.negative_linestyle'] = 'solid'

        clvl = np.arange(cstart, cend + 1, contour_int)
        contour_prop = {
            'levels': clvl,
            'linestyle': '-',
            'linewidths': 0.5,
            'alpha': 1.0
        }
        #contours = ax.contour(contour_bma, colors='k', **contour_prop)
        #contour_cmap = 'gray'
        contour_cmap = 'gray_r'
        #This prevents white contours
        contour_cmap_clim = (0, contour_bma_clim[-1])
        contours = ax.contour(contour_bma, cmap=contour_cmap, vmin=contour_cmap_clim[0], \
                vmax=contour_cmap_clim[-1], **contour_prop)

        #Add labels
        ax.clabel(contours,
                  inline=True,
                  inline_spacing=0,
                  fontsize=4,
                  fmt='%i')

        #Update the cbar with contour locations
        #cbar.add_lines(contours)
        #cbar.set_ticks(contours.levels)

    #Plot shape overlay, moved code to pltlib
    if shp is not None:
        pltlib.shp_overlay(ax, ds, shp, gt=gt, color='k')

    if scalebar:
        scale_ticks(ax, ds)
        sb_loc = pltlib.best_scalebar_location(bma)
        #Force scalebar position
        #sb_loc = 'lower right'
        pltlib.add_scalebar(ax, xres, location=sb_loc)
        if not ticks:
            pltlib.hide_ticks(ax)

    #Set up interactive display
    global gbma
    gbma = bma
    global ggt
    ggt = gt

    #Clicking on a subplot will make it active for z-coordinate display
    fig.canvas.mpl_connect('button_press_event', onclick)
    fig.canvas.mpl_connect('axes_enter_event', enter_axis)

    #Add support for interactive z-value display
    ax.format_coord = format_coord
Exemplo n.º 21
0
def warp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', warptype=memwarp, outdir=None, dst_ndv=None, verbose=True, debug=False):
    """This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds
    
    Parameters
    ----------
    src_ds_list : list of gdal.Dataset objects
        List of original datasets to be warped
    res : arbitrary type
        Desired output resolution
    extent : arbitrary type
        Desired output extent
    t_srs : arbitrary type
        Desired output spatial reference
    r : str
        Desired resampling algorithm
    warptype : function
        Desired warp type (write to memory or disk)
    outdir : str
        Desired output directory (for disk warp)
    dst_ndv : float
        Desired output NoData Value
    verbose : bool 
        Print warp parameters
    debug : bool 
        Print extra information for debugging purposes

    Returns
    -------
    out_ds_list : list of gdal.Dataset objects
        List of warped datasets (either in memory or on disk)
    """
    #Type cast arguments as str for evaluation
    #Avoid path errors
    #res = str(res)
    #extent = str(extent)
    #t_srs = str(t_srs)

    #Parse the input
    t_srs = parse_srs(t_srs, src_ds_list)
    res = parse_res(res, src_ds_list, t_srs)
    extent = parse_extent(extent, src_ds_list, t_srs)

    if verbose:
        print("\nWarping all inputs to the following:")
        print("Resolution: %s" % res)
        print("Extent: %s" % str(extent))
        print("Projection: '%s'" % t_srs.ExportToProj4())
        print("Resampling alg: %s\n" % r)  

    out_ds_list = []
    for i, ds in enumerate(src_ds_list):
        fn_list = ds.GetFileList()
        fn = '[memory]'
        if fn_list is not None:
            fn = fn_list[0]
        if verbose:
            print("%i of %i: %s" % (i+1, len(src_ds_list), fn))

        #If input srs are different, must warp
        ds_t_srs = geolib.get_ds_srs(ds)
        srscheck = bool(t_srs.IsSame(ds_t_srs))
       
        if debug:
            print('\n%s' % ds_t_srs.ExportToWkt())
            print('%s\n' % t_srs.ExportToWkt())
            print('srscheck: %s\n' % srscheck)

        rescheck = False
        extentcheck = False

        #if srscheck:
        #Extract info from ds to see if warp is necessary
        ds_res = geolib.get_res(ds, square=True)[0]
        ds_extent = geolib.ds_extent(ds)

        #Note: these checks necessary to handle rounding and precision issues
        #Round extent and res to nearest mm
        precision = 1E-3
        #Or if t_srs has units of degrees
        if ds_t_srs.IsGeographic():
            precision = 1E-8

        rescheck = (res is None) or geolib.res_compare(res, ds_res, precision=precision)
        extentcheck = (extent is None) or geolib.extent_compare(extent, ds_extent, precision=precision)

        if debug:
            print('\n%s, %s\n' % (ds_res, res)) 
            print('%s' % ds_extent)
            print('%s\n' % extent) 
            print('rescheck: %s' % rescheck)
            print('extentcheck: %s\n' % extentcheck)

        #If the ds passes all three, it is identical to desired output, short circuit
        if rescheck and extentcheck and srscheck:
            out_ds_list.append(ds)
        else:
            dst_ds = warptype(ds, res, extent, t_srs, r, outdir, dst_ndv=dst_ndv, verbose=verbose)
            out_ds_list.append(dst_ds)

    return out_ds_list
Exemplo n.º 22
0
def main():
    parser = getparser()
    args = parser.parse_args()

    t_unit = args.dt
    plot = args.plot
    remove_offsets = args.remove_offsets
    mask_fn = args.mask_fn
    if mask_fn is not None:
        remove_offsets = True

    #Input is 3-band disparity map, extract bands directly
    src_fn = args.disp_fn
    if not iolib.fn_check(src_fn):
        sys.exit("Unable to locate input file: %s" % src_fn)

    src_ds = iolib.fn_getds(src_fn)
    if src_ds.RasterCount != 3:
        sys.exit("Input file must be ASP disparity map (3 bands: x, y, mask)")
    #Extract pixel resolution
    h_res, v_res = geolib.get_res(src_ds)

    #Horizontal scale factor
    #If running on disparity_view output (gdal_translate -outsize 5% 5% F.tif F_5.tif)
    #h_res /= 20
    #v_res /= 20

    #Load horizontal and vertical disparities
    h = iolib.ds_getma(src_ds, bnum=1)
    v = iolib.ds_getma(src_ds, bnum=2)

    #ASP output has northward motion as negative values in band 2
    v *= -1

    t1, t2 = timelib.fn_getdatetime_list(src_fn)
    dt = t2 - t1
    #Default t_factor is in 1/years
    t_factor = timelib.get_t_factor(t1, t2)

    #Input timestamp arrays if inputs are mosaics
    if False:
        t1_fn = ''
        t2_fn = ''
        if os.path.exists(t1_fn) and os.path.exists(t2_fn):
            t_factor = timelib.get_t_factor_fn(t1_fn, t2_fn)
        if t_factor is None:
            sys.exit("Unable to determine input timestamps")

    if t_unit == 'day':
        t_factor *= 365.25

    print("Input dates:")
    print(t1)
    print(t2)
    print(dt)
    print(t_factor, t_unit)

    #Scale values for polar stereographic distortion
    srs = geolib.get_ds_srs(src_ds)
    proj_scale_factor = 1.0
    #Want to scale to get correct distances for polar sterographic
    if srs.IsSame(geolib.nps_srs) or srs.IsSame(geolib.sps_srs):
        proj_scale_factor = geolib.scale_ps_ds(src_ds)

    #Convert disparity values in pixels to m/t_unit
    h_myr = h * h_res * proj_scale_factor / t_factor
    h = None
    v_myr = v * v_res * proj_scale_factor / t_factor
    v = None

    #Velocity Magnitude
    m = np.ma.sqrt(h_myr**2 + v_myr**2)
    print("Velocity Magnitude stats")
    malib.print_stats(m)

    #Remove x and y offsets over control surfaces
    offset_str = ''
    if remove_offsets:
        if mask_fn is None:
            from demcoreg.dem_mask import get_mask
            print(
                "\nUsing demcoreg to prepare mask of stable control surfaces\n"
            )
            #TODO: Accept mask_list as in demcoreg
            #mask_list = args.mask_list
            # for now keep it simple, limit to non-glacier surfaces
            mask_list = [
                'glaciers',
            ]
            mask = get_mask(src_ds, mask_list=mask_list, dem_fn=src_fn)
        else:
            print("\nWarping input raster mask")
            #This can be from previous dem_mask.py run (e.g. *rockmask.tif)
            mask_ds = warplib.memwarp_multi_fn([
                mask_fn,
            ],
                                               res=src_ds,
                                               extent=src_ds,
                                               t_srs=src_ds)[0]
            mask = iolib.ds_getma(mask_ds)
            #The default from ds_getma is a masked array, so need to isolate boolean mask
            #Assume input is 0 for masked, 1 for unmasked (valid control surface)
            mask = mask.filled().astype('bool')
            #This should work, as the *rockmask.py is 1 for unmasked, 0 for masked, with ndv=0
            #mask = np.ma.getmaskarray(mask)
            #Vector mask - untested
            if os.path.splitext(mask_fn)[1] == 'shp':
                mask = geolib.shp2array(mask_fn, src_ds)

        print("\nRemoving median x and y offset over static control surfaces")
        h_myr_count = h_myr.count()
        h_myr_static_count = np.ma.array(h_myr, mask=mask).count()
        h_myr_mad, h_myr_med = malib.mad(np.ma.array(h_myr, mask=mask),
                                         return_med=True)
        v_myr_mad, v_myr_med = malib.mad(np.ma.array(v_myr, mask=mask),
                                         return_med=True)

        print("Static pixel count: %i (%0.1f%%)" %
              (h_myr_static_count,
               100 * float(h_myr_static_count) / h_myr_count))
        print("median (+/-NMAD)")
        print("x velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (h_myr_med, h_myr_mad, t_unit))
        print("y velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (v_myr_med, v_myr_mad, t_unit))
        h_myr -= h_myr_med
        v_myr -= v_myr_med
        offset_str = '_offsetcorr_h%0.2f_v%0.2f' % (h_myr_med, v_myr_med)
        #Velocity Magnitude
        m = np.ma.sqrt(h_myr**2 + v_myr**2)
        print("Velocity Magnitude stats after correction")
        malib.print_stats(m)

    if plot:
        fig_fn = os.path.splitext(src_fn)[0] + '.png'
        label = 'Velocity (m/%s)' % t_unit
        f, ax = make_plot(m, fig_fn, label)
        plotvec(h_myr, v_myr)
        plt.tight_layout()
        plt.savefig(fig_fn,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0,
                    edgecolor='none')

    print("Writing out files")
    gt = src_ds.GetGeoTransform()
    proj = src_ds.GetProjection()
    dst_fn = os.path.splitext(src_fn)[0] + '_vm%s.tif' % offset_str
    iolib.writeGTiff(m, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vx%s.tif' % offset_str
    iolib.writeGTiff(h_myr, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vy%s.tif' % offset_str
    iolib.writeGTiff(v_myr, dst_fn, create=True, gt=gt, proj=proj)
    src_ds = None
Exemplo n.º 23
0
def bma_fig(fig, bma, cmap='cpt_rainbow', clim=None, clim_perc=(2,98), bg=None, bg_perc=(2,98), n_subplt=1, subplt=1, label=None, title=None, cint=None, alpha=0.5, ticks=False, scalebar=None, ds=None, shp=None, imshow_kwargs={'interpolation':'nearest'}, cbar_kwargs={'extend':'both', 'orientation':'vertical', 'shrink':0.7, 'fraction':0.12, 'pad':0.02}, **kwargs):
    #We don't use the kwargs, just there to save parsing in main
    
    if clim is None:
        clim = malib.calcperc(bma, clim_perc)
        #Deal with masked cases
        if clim[0] == clim[1]:
            if clim[0] > bma.fill_value:
                clim = (bma.fill_value, clim[0])
            else:
                clim = (clim[0], bma.fill_value)
        print "Colorbar limits (%0.1f-%0.1f%%): %0.3f %0.3f" % (clim_perc[0], clim_perc[1], clim[0], clim[1])
    else:
        print "Colorbar limits: %0.3f %0.3f" % (clim[0], clim[1])

    #Link all subplots for zoom/pan
    sharex = sharey = None
    if len(fig.get_axes()) > 0:
        sharex = sharey = fig.get_axes()[0]

    #Hack to catch situations with only 1 subplot, but a subplot number > 1
    if n_subplt == 1:
        subplt = 1

    #One row, multiple columns
    ax = fig.add_subplot(1, n_subplt, subplt, sharex=sharex, sharey=sharey)
    #This occupies the full figure
    #ax = fig.add_axes([0., 0., 1., 1., ])

    #ax.patch.set_facecolor('black')
    ax.patch.set_facecolor('white')

    cmap_name = cmap
    cmap = plt.get_cmap(cmap_name)
    if 'inferno' in cmap_name:
        #Use a gray background
        cmap.set_bad('0.5', alpha=1)
    else:
        #This sets the nodata background to opaque black
        cmap.set_bad('k', alpha=1)
        #cmap.set_bad('w', alpha=1)

    #ax.set_title("Band %i" % subplt, fontsize=10)
    if title is not None:
        ax.set_title(title)

    #If a background image is provided, plot it first
    if bg is not None:
        #Note, 1 is opaque, 0 completely transparent
        #alpha = 0.6
        #bg_perc = (4,96)
        bg_perc = (0.05, 99.95)
        #bg_perc = (1, 99)
        bg_alpha = 1.0
        #bg_alpha = 0.5 
        bg_clim = malib.calcperc(bg, bg_perc)
        bg_cmap_name = 'gray'
        bg_cmap = plt.get_cmap(bg_cmap_name)
        if 'inferno' in cmap_name:
            bg_cmap.set_bad('0.5', alpha=1)
        else:
            bg_cmap.set_bad('k', alpha=1)
        #Set the overlay bad values to completely transparent, otherwise darkens the bg
        cmap.set_bad(alpha=0)
        bgplot = ax.imshow(bg, cmap=bg_cmap, clim=bg_clim, alpha=bg_alpha)
        imgplot = ax.imshow(bma, alpha=alpha, cmap=cmap, clim=clim, **imshow_kwargs)
    else:
        imgplot = ax.imshow(bma, cmap=cmap, clim=clim, **imshow_kwargs)
 
    gt = None
    if ds is not None:
        gt = np.array(ds.GetGeoTransform())
        gt_scale_factor = min(np.array([ds.RasterYSize, ds.RasterXSize])/np.array(bma.shape,dtype=float))
        gt[1] *= gt_scale_factor
        gt[5] *= gt_scale_factor
        ds_srs = geolib.get_ds_srs(ds)
        if ticks:
            scale_ticks(ax, ds)
        else:
            pltlib.hide_ticks(ax)
        xres = geolib.get_res(ds)[0]
    else:
        pltlib.hide_ticks(ax)
    #This forces the black line outlining the image subplot to snap to the actual image dimensions
    ax.set_adjustable('box-forced')

    cbar = True 
    if cbar:
        #Had to turn off the ax=ax for overlay to work
        #cbar = fig.colorbar(imgplot, ax=ax, extend='both', shrink=0.5) 
        #Should set the format based on dtype of input data 
        #cbar_kwargs['format'] = '%i'
        #cbar_kwargs['format'] = '%0.1f'
        #cbar_kwargs['orientation'] = 'horizontal'
        #cbar_kwargs['shrink'] = 0.8

        cbar = pltlib.add_cbar(ax, imgplot, label=label, cbar_kwargs=cbar_kwargs)
   
    #Plot contours every cint interval and update colorbar appropriately
    if cint is not None:
        if bma_c is not None:
            bma_clim = malib.calcperc(bma_c)
            #PIG bed ridge contours
            #bma_clim = (-1300, -300)
            #Jak front shear margin contours
            #bma_clim = (2000, 4000)
            cstart = int(np.floor(bma_clim[0] / cint)) * cint 
            cend = int(np.ceil(bma_clim[1] / cint)) * cint
        else:
            #cstart = int(np.floor(bma.min() / cint)) * cint 
            #cend = int(np.ceil(bma.max() / cint)) * cint
            cstart = int(np.floor(clim[0] / cint)) * cint 
            cend = int(np.ceil(clim[1] / cint)) * cint

        #Turn off dashed negative (beds are below sea level)
        #matplotlib.rcParams['contour.negative_linestyle'] = 'solid'

        clvl = np.arange(cstart, cend+1, cint)
        #contours = ax.contour(bma_c, colors='k', levels=clvl, alpha=0.5)
        contours = ax.contour(bma_c, cmap='gray', linestyle='--', levels=clvl, alpha=1.0)

        #Update the cbar with contour locations
        cbar.add_lines(contours)
        cbar.set_ticks(contours.levels)

    #Plot shape overlay, moved code to pltlib
    if shp is not None:
        pltlib.shp_overlay(ax, ds, shp, gt=gt)

    if scalebar:
        scale_ticks(ax, ds)
        pltlib.add_scalebar(ax, xres)
        if not ticks:
            pltlib.hide_ticks(ax)

    #imgplot.set_cmap(cmap)
    #imgplot.set_clim(clim)
  
    global gbma
    gbma = bma
    global ggt
    ggt = gt

    #Clicking on a subplot will make it active for z-coordinate display
    fig.canvas.mpl_connect('button_press_event', onclick)
    fig.canvas.mpl_connect('axes_enter_event', enter_axis)
    
    #Add support for interactive z-value display 
    ax.format_coord = format_coord
Exemplo n.º 24
0
def mb_calc(gf, z1_date=z1_date, z2_date=z2_date, verbose=verbose):
    #print("\n%i of %i: %s\n" % (n+1, len(glacfeat_list), gf.feat_fn))
    print(gf.feat_fn)

    #This should already be handled by earlier attribute filter, but RGI area could be wrong
    #24k shp has area in m^2, RGI in km^2
    #if gf.glac_area/1E6 < min_glac_area:
    if gf.glac_area < min_glac_area:
        if verbose:
            print("Glacier area below %0.1f km2 threshold" % min_glac_area)
        return None

    #Warp everything to common res/extent/proj
    ds_list = warplib.memwarp_multi_fn([z1_fn, z2_fn], res='min', \
            extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose)

    if site == 'conus':
        #Add prism datasets
        prism_fn_list = [prism_ppt_annual_fn, prism_tmean_annual_fn]
        prism_fn_list.extend([
            prism_ppt_summer_fn, prism_ppt_winter_fn, prism_tmean_summer_fn,
            prism_tmean_winter_fn
        ])
        ds_list.extend(warplib.memwarp_multi_fn(prism_fn_list, res=ds_list[0], \
                extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose))

    if site == 'hma':
        #Add debris cover datasets
        #Should tar this up, and extract only necessary file
        #Downloaded from: http://mountainhydrology.org/data-nature-2017/
        kra_nature_dir = '/nobackup/deshean/data/Kraaijenbrink_hma/regions/out'
        #This assumes that numbers are identical between RGI50 and RGI60
        debris_class_fn = os.path.join(
            kra_nature_dir, 'RGI50-%s/classification.tif' % gf.glacnum)
        debris_thick_fn = os.path.join(
            kra_nature_dir, 'RGI50-%s/debris-thickness-50cm.tif' % gf.glacnum)
        ice_thick_fn = os.path.join(kra_nature_dir,
                                    'RGI50-%s/ice-thickness.tif' % gf.glacnum)
        hma_fn_list = []
        if os.path.exists(debris_class_fn):
            hma_fn_list.append(debris_class_fn)
        if os.path.exists(debris_thick_fn):
            hma_fn_list.append(debris_thick_fn)
        if os.path.exists(ice_thick_fn):
            hma_fn_list.append(ice_thick_fn)
        if len(hma_fn_list) > 0:
            #Add velocity
            hma_fn_list.extend([vx_fn, vy_fn])
            ds_list.extend(warplib.memwarp_multi_fn(hma_fn_list, res=ds_list[0], \
                    extent=gf.glac_geom_extent, t_srs=aea_srs, verbose=verbose))

    #Check to see if z2 is empty, as z1 should be continuous
    gf.z2 = iolib.ds_getma(ds_list[1])
    if gf.z2.count() == 0:
        if verbose:
            print("No z2 pixels")
        return None

    glac_geom_mask = geolib.geom2mask(gf.glac_geom, ds_list[0])
    gf.z1 = np.ma.array(iolib.ds_getma(ds_list[0]), mask=glac_geom_mask)
    #Apply SRTM penetration correction
    if z1_srtm_penetration_corr:
        gf.z1 = srtm_corr(gf.z1)
    if z2_srtm_penetration_corr:
        gf.z2 = srtm_corr(gf.z2)
    gf.z2 = np.ma.array(gf.z2, mask=glac_geom_mask)
    gf.dz = gf.z2 - gf.z1
    if gf.dz.count() == 0:
        if verbose:
            print("No valid dz pixels")
        return None

    #Should add better filtering here
    #Elevation dependent abs. threshold filter?

    filter_outliers = True
    #Remove clearly bogus pixels
    if filter_outliers:
        bad_perc = (0.1, 99.9)
        #bad_perc = (1, 99)
        rangelim = malib.calcperc(gf.dz, bad_perc)
        gf.dz = np.ma.masked_outside(gf.dz, *rangelim)

    gf.res = geolib.get_res(ds_list[0])
    valid_area = gf.dz.count() * gf.res[0] * gf.res[1]
    valid_area_perc = valid_area / gf.glac_area
    if valid_area_perc < min_valid_area_perc:
        if verbose:
            print(
                "Not enough valid pixels. %0.1f%% percent of glacier polygon area"
                % (100 * valid_area_perc))
        return None

    #Filter dz - throw out abs differences >150 m

    #Compute dz, volume change, mass balance and stats
    gf.z1_stats = malib.get_stats(gf.z1)
    gf.z2_stats = malib.get_stats(gf.z2)
    z2_elev_med = gf.z2_stats[5]
    z2_elev_p16 = gf.z2_stats[11]
    z2_elev_p84 = gf.z2_stats[12]

    #Caluclate stats for aspect and slope using z2
    #Requires GDAL 2.1+
    gf.z2_aspect = np.ma.array(geolib.gdaldem_mem_ds(ds_list[1],
                                                     processing='aspect',
                                                     returnma=True),
                               mask=glac_geom_mask)
    gf.z2_aspect_stats = malib.get_stats(gf.z2_aspect)
    z2_aspect_med = gf.z2_aspect_stats[5]
    gf.z2_slope = np.ma.array(geolib.gdaldem_mem_ds(ds_list[1],
                                                    processing='slope',
                                                    returnma=True),
                              mask=glac_geom_mask)
    gf.z2_slope_stats = malib.get_stats(gf.z2_slope)
    z2_slope_med = gf.z2_slope_stats[5]

    #Rasterize source dates
    if z1_date is None:
        z1_date = get_date_a(ds_list[0], z1_date_shp_lyr, glac_geom_mask,
                             z1_datefield)
        gf.t1 = z1_date.mean()
    else:
        gf.t1 = z1_date

    if z2_date is None:
        z2_date = get_date_a(ds_list[0], z2_date_shp_lyr, glac_geom_mask,
                             z2_datefield)
        #Attempt to use YYYYMMDD string
        #z2_dta = np.datetime64(z2_date.astype("S8").tolist())
        gf.t2 = z2_date.mean()
    else:
        gf.t2 = z2_date

    if isinstance(gf.t1, datetime):
        gf.t1 = timelib.dt2decyear(gf.t1)

    if isinstance(gf.t2, datetime):
        gf.t2 = timelib.dt2decyear(gf.t2)

    gf.t1 = float(gf.t1)
    gf.t2 = float(gf.t2)

    #Calculate dt grids
    #gf.dt = z2_date - z1_date
    #gf.dt = gf.dt.mean()
    #This should be decimal years
    gf.dt = gf.t2 - gf.t1
    #if isinstance(gf.dt, timedelta):
    #    gf.dt = gf.dt.total_seconds()/timelib.spy
    #Calculate dh/dt, in m/yr
    gf.dhdt = gf.dz / gf.dt
    gf.dhdt_stats = malib.get_stats(gf.dhdt)
    dhdt_mean = gf.dhdt_stats[3]
    dhdt_med = gf.dhdt_stats[5]

    rho_i = 0.91
    rho_s = 0.50
    rho_f = 0.60

    #This is recommendation by Huss et al (2013)
    rho_is = 0.85
    rho_sigma = 0.06

    #Can estimate ELA values computed from hypsometry and typical AAR
    #For now, assume ELA is mean
    gf.z1_ela = None
    gf.z1_ela = gf.z1_stats[3]
    gf.z2_ela = gf.z2_stats[3]
    #Note: in theory, the ELA should get higher with mass loss
    #In practice, using mean and same polygon, ELA gets lower as glacier surface thins
    if verbose:
        print("ELA(t1): %0.1f" % gf.z1_ela)
        print("ELA(t2): %0.1f" % gf.z2_ela)

    if gf.z1_ela > gf.z2_ela:
        min_ela = gf.z2_ela
        max_ela = gf.z1_ela
    else:
        min_ela = gf.z1_ela
        max_ela = gf.z2_ela

    #Calculate mass balance map from dhdt
    gf.mb = gf.dhdt * rho_is
    """
    # This attempted to assign different densities above and below ELA
    if gf.z1_ela is None:
        gf.mb = gf.dhdt * rho_is
    else:
        #Initiate with average density
        gf.mb = gf.dhdt*(rho_is + rho_f)/2.
        #Everything that is above ELA at t2 is elevation change over firn, use firn density
        accum_mask = (gf.z2 > gf.z2_ela).filled(0).astype(bool)
        gf.mb[accum_mask] = (gf.dhdt*rho_f)[accum_mask]
        #Everything that is below ELA at t1 is elevation change over ice, use ice density
        abl_mask = (gf.z1 <= gf.z1_ela).filled(0).astype(bool)
        gf.mb[abl_mask] = (gf.dhdt*rho_is)[abl_mask]
        #Everything in between, use average of ice and firn density
        #mb[(z1 > z1_ela) || (z2 <= z2_ela)] = dhdt*(rhois + rho_f)/2.
        #Linear ramp
        #rho_f + z2*((rho_is - rho_f)/(z2_ela - z1_ela))
        #mb = np.where(dhdt < ela, dhdt*rho_i, dhdt*rho_s)
    """

    #Use this for winter balance
    #mb = dhdt * rho_s

    gf.mb_stats = malib.get_stats(gf.mb)
    gf.mb_mean = gf.mb_stats[3]

    #Calculate uncertainty of total elevation change
    #TODO: Better spatial distribution characterization
    #Add slope-dependent component here
    dz_sigma = np.sqrt(z1_sigma**2 + z2_sigma**2)
    #Uncrtainty of dh/dt
    dhdt_sigma = dz_sigma / gf.dt

    #This is mb uncertainty map
    gf.mb_sigma = np.ma.abs(gf.mb) * np.sqrt((rho_sigma / rho_is)**2 +
                                             (dhdt_sigma / gf.dhdt)**2)
    gf.mb_sigma_stats = malib.get_stats(gf.mb_sigma)
    #This is average mb uncertainty
    gf.mb_mean_sigma = gf.mb_sigma_stats[3]

    #Now calculate mb for entire polygon
    area_sigma_perc = 0.09
    gf.mb_mean_totalarea = gf.mb_mean * gf.glac_area
    #Already have area uncertainty as percentage, just use directly
    gf.mb_mean_totalarea_sigma = np.ma.abs(gf.mb_mean_totalarea) * np.sqrt(
        (gf.mb_mean_sigma / gf.mb_mean)**2 + area_sigma_perc**2)

    mb_sum = np.sum(gf.mb) * gf.res[0] * gf.res[1]

    outlist = [gf.glacnum, gf.cx, gf.cy, z2_elev_med, z2_elev_p16, z2_elev_p84, z2_slope_med, z2_aspect_med, \
            gf.mb_mean, gf.mb_mean_sigma, gf.glac_area, gf.mb_mean_totalarea, gf.mb_mean_totalarea_sigma, \
            gf.t1, gf.t2, gf.dt]

    if site == 'conus':
        prism_ppt_annual = np.ma.array(iolib.ds_getma(ds_list[2]),
                                       mask=glac_geom_mask) / 1000.
        prism_ppt_annual_stats = malib.get_stats(prism_ppt_annual)
        prism_ppt_annual_mean = prism_ppt_annual_stats[3]

        prism_tmean_annual = np.ma.array(iolib.ds_getma(ds_list[3]),
                                         mask=glac_geom_mask)
        prism_tmean_annual_stats = malib.get_stats(prism_tmean_annual)
        prism_tmean_annual_mean = prism_tmean_annual_stats[3]

        outlist.extend([prism_ppt_annual_mean, prism_tmean_annual_mean])

        #This is mean monthly summer precip, need to multiply by nmonths to get cumulative
        n_summer = 4
        prism_ppt_summer = n_summer * np.ma.array(iolib.ds_getma(ds_list[4]),
                                                  mask=glac_geom_mask) / 1000.
        prism_ppt_summer_stats = malib.get_stats(prism_ppt_summer)
        prism_ppt_summer_mean = prism_ppt_summer_stats[3]

        n_winter = 8
        prism_ppt_winter = n_winter * np.ma.array(iolib.ds_getma(ds_list[5]),
                                                  mask=glac_geom_mask) / 1000.
        prism_ppt_winter_stats = malib.get_stats(prism_ppt_winter)
        prism_ppt_winter_mean = prism_ppt_winter_stats[3]

        prism_tmean_summer = np.ma.array(iolib.ds_getma(ds_list[6]),
                                         mask=glac_geom_mask)
        prism_tmean_summer_stats = malib.get_stats(prism_tmean_summer)
        prism_tmean_summer_mean = prism_tmean_summer_stats[3]

        prism_tmean_winter = np.ma.array(iolib.ds_getma(ds_list[7]),
                                         mask=glac_geom_mask)
        prism_tmean_winter_stats = malib.get_stats(prism_tmean_winter)
        prism_tmean_winter_mean = prism_tmean_winter_stats[3]

        outlist.extend([
            prism_ppt_summer_mean, prism_ppt_winter_mean,
            prism_tmean_summer_mean, prism_tmean_winter_mean
        ])

    if site == 'hma':
        #Classes are: 1 = clean ice, 2 = debris, 3 = pond
        #Load up debris cover maps, ice thickness
        if len(ds_list) > 2:
            gf.debris_class = np.ma.array(iolib.ds_getma(ds_list[2]),
                                          mask=glac_geom_mask)
            gf.debris_thick = np.ma.array(iolib.ds_getma(ds_list[3]),
                                          mask=glac_geom_mask)
            #Load ice thickness from glabtop2
            gf.H = np.ma.array(iolib.ds_getma(ds_list[4]), mask=glac_geom_mask)
            #Load surface velocity maps from Dehecq
            gf.vx = np.ma.array(iolib.ds_getma(ds_list[5]),
                                mask=glac_geom_mask)
            gf.vy = np.ma.array(iolib.ds_getma(ds_list[6]),
                                mask=glac_geom_mask)
            gf.vm = np.ma.sqrt(gf.vx**2 + gf.vy**2)
            v_col_factor = 0.8
            #Should smooth, better handling of data gaps
            gf.divU = np.gradient(v_col_factor * gf.vx)[1] + np.gradient(
                v_col_factor * gf.vy)[0]
            gf.divQ = gf.H * gf.divU
            #Compute debris/pond/clean percentages for entire polygon
            if gf.debris_class.count() > 0:
                gf.perc_clean = 100. * (gf.debris_class
                                        == 1).sum() / gf.debris_class.count()
                gf.perc_debris = 100. * (gf.debris_class
                                         == 2).sum() / gf.debris_class.count()
                gf.perc_pond = 100. * (gf.debris_class
                                       == 3).sum() / gf.debris_class.count()
            outlist.extend([
                gf.H.mean(),
                gf.debris_thick.mean(), gf.perc_debris, gf.perc_pond,
                gf.perc_clean
            ])

    if verbose:
        print('Mean mb: %0.2f +/- %0.2f mwe/yr' %
              (gf.mb_mean, gf.mb_mean_sigma))
        print('Sum/Area mb: %0.2f mwe/yr' % (mb_sum / gf.glac_area))
        print('Mean mb * Area: %0.2f +/- %0.2f mwe/yr' %
              (gf.mb_mean_totalarea, gf.mb_mean_totalarea_sigma))
        print('Sum mb: %0.2f mwe/yr' % mb_sum)
        #print('-------------------------------')

    #Write to master list
    #out.append(outlist)
    #Write to temporary file
    #writer.writerow(outlist)
    #f.flush()

    if writeout and (gf.glac_area / 1E6 > min_glac_area_writeout):
        out_dz_fn = os.path.join(outdir, gf.feat_fn + '_dz.tif')
        iolib.writeGTiff(gf.dz, out_dz_fn, ds_list[0])

        out_z1_fn = os.path.join(outdir, gf.feat_fn + '_z1.tif')
        iolib.writeGTiff(gf.z1, out_z1_fn, ds_list[0])

        out_z2_fn = os.path.join(outdir, gf.feat_fn + '_z2.tif')
        iolib.writeGTiff(gf.z2, out_z2_fn, ds_list[0])

        temp_fn = os.path.join(outdir, gf.feat_fn + '_z2_aspect.tif')
        iolib.writeGTiff(gf.z2_aspect, temp_fn, ds_list[0])

        temp_fn = os.path.join(outdir, gf.feat_fn + '_z2_slope.tif')
        iolib.writeGTiff(gf.z2_slope, temp_fn, ds_list[0])

        #Need to fix this - write out constant date arrays regardless of source
        #out_z1_date_fn = os.path.join(outdir, gf.feat_fn+'_ned_date.tif')
        #iolib.writeGTiff(z1_date, out_z1_date_fn, ds_list[0])

        if site == 'conus':
            out_prism_ppt_annual_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_annual.tif')
            iolib.writeGTiff(prism_ppt_annual, out_prism_ppt_annual_fn,
                             ds_list[0])
            out_prism_tmean_annual_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_annual.tif')
            iolib.writeGTiff(prism_tmean_annual, out_prism_tmean_annual_fn,
                             ds_list[0])

            out_prism_ppt_summer_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_summer.tif')
            iolib.writeGTiff(prism_ppt_summer, out_prism_ppt_summer_fn,
                             ds_list[0])
            out_prism_ppt_winter_fn = os.path.join(
                outdir, gf.feat_fn + '_precip_winter.tif')
            iolib.writeGTiff(prism_ppt_winter, out_prism_ppt_winter_fn,
                             ds_list[0])

            out_prism_tmean_summer_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_summer.tif')
            iolib.writeGTiff(prism_tmean_summer, out_prism_tmean_summer_fn,
                             ds_list[0])
            out_prism_tmean_winter_fn = os.path.join(
                outdir, gf.feat_fn + '_tmean_winter.tif')
            iolib.writeGTiff(prism_tmean_winter, out_prism_tmean_winter_fn,
                             ds_list[0])

        if site == 'hma':
            if gf.H is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_H.tif')
                iolib.writeGTiff(gf.H, temp_fn, ds_list[0])

            if gf.debris_thick is not None:
                temp_fn = os.path.join(outdir,
                                       gf.feat_fn + '_debris_thick.tif')
                iolib.writeGTiff(gf.debris_thick, temp_fn, ds_list[0])

            if gf.debris_class is not None:
                temp_fn = os.path.join(outdir,
                                       gf.feat_fn + '_debris_class.tif')
                iolib.writeGTiff(gf.debris_class, temp_fn, ds_list[0])

            if gf.vm is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_vm.tif')
                iolib.writeGTiff(gf.vm, temp_fn, ds_list[0])

            if gf.divQ is not None:
                temp_fn = os.path.join(outdir, gf.feat_fn + '_divQ.tif')
                iolib.writeGTiff(gf.divQ, temp_fn, ds_list[0])

    #Do AED for all
    #Compute mb using scaled AED vs. polygon
    #Check for valid pixel count vs. feature area, fill if appropriate

    if mb_plot and (gf.glac_area / 1E6 > min_glac_area_writeout):
        z_bin_edges = hist_plot(gf, outdir)
        gf.z1_hs = geolib.gdaldem_mem_ds(ds_list[0],
                                         processing='hillshade',
                                         returnma=True)
        gf.z2_hs = geolib.gdaldem_mem_ds(ds_list[1],
                                         processing='hillshade',
                                         returnma=True)
        map_plot(gf, z_bin_edges, outdir)

    return outlist, gf