コード例 #1
0
def make_dem_mosaic_index_ts(index_tif_fn):
    index_txt_fn = index_tif_fn + '-index-map.txt'
    out_fn = os.path.splitext(index_tif_fn)[0] + '_ts.tif'

    if not os.path.exists(index_tif_fn):
        print("Unable to find input file: %s" % index_tif_fn)
        return False
    if not os.path.exists(index_txt_fn):
        print("Unable to find input file: %s" % index_txt_fn)
        return False
    if os.path.exists(out_fn):
        print("Existing ts output found: %s" % out_fn)
        return True

    #Load dem_mosaic index tif
    index_tif_ds = iolib.fn_getds(index_tif_fn)
    index_tif = iolib.ds_getma(index_tif_ds)

    #Create output array
    #index_ts_tif = np.zeros(index_tif.shape, dtype=np.float32)
    #index_ts_tif = np.ma.masked_all_like(index_tif)
    index_ts_tif = np.ma.masked_all(index_tif.shape, dtype=np.float32)

    #Read in dem_mosaic index txt file - should be "fn, index" for each record
    index_txt = np.atleast_2d(np.genfromtxt(index_txt_fn, dtype=str))

    #Extract Python datetime object from filename (should pull out YYYYMMDD_HHMM)
    index_ts = [timelib.fn_getdatetime(fn) for fn in index_txt[:, 0]]

    #Convert to desired output timestamp format
    #Python ordinal
    #index_ts = timelib.dt2o(index_ts)
    #YYYYMMDD integer
    #index_ts = [ts.strftime('%Y%m%d') for ts in index_ts]
    #Decimal year
    index_ts = [timelib.dt2decyear(ts) for ts in index_ts]

    for n, dt in enumerate(index_ts):
        index_ts_tif[index_tif == n] = dt

    iolib.writeGTiff(index_ts_tif, out_fn, index_tif_ds, ndv=0)
    return True
コード例 #2
0

if __name__ == '__main__':
    files = RasterSource(
        '/Volumes/warehouse/projects/UofU/geohackweek/galcier_data/khumbu_DEM_32m/'
    ).file_list()

    raster_list = warplib.memwarp_multi_fn(files,
                                           extent='union',
                                           res='min',
                                           t_srs=files[0])

    band_data = [iolib.ds_getma(i).filled(np.NaN) for i in raster_list]

    x, y = geolib.get_xy_grids(raster_list[0])
    time_list = np.array([timelib.fn_getdatetime(fn) for fn in files])

    x_band_data = xr.DataArray(np.stack(band_data),
                               coords={
                                   'lat': y[::, 0],
                                   'lon': x[0, ::],
                                   'time': time_list
                               },
                               dims=('time', 'lat', 'lon'),
                               name='elevation')

    x_band_data = x_band_data.sortby('time')

    x_band_data['mean'] = x_band_data.mean(dim='time')
    x_band_data['count'] = x_band_data.count(dim='time')
    # shade = geolib.gdaldem_mem_ma(
コード例 #3
0
#dem_list = [iolib.ds_getma(i) for i in ds_list]

import matplotlib
matplotlib.pyplot.imshow(dem_2007)

matplotlib.pyplot.imshow(dem_2009)

titles = ['2007', '2009']
clim = malib.calcperc(dem_list[0], (2,98))
plot_panels(2, dem_list, clim, titles, 'inferno', 'Elevation (m WGS84)', fn='dem.png')

# Its possible sometimes to get date times from TIFFS but apparently these DEMS don't have any datetimes assigned.

# We can get the date times from the original *(unmerged) files though.

[timelib.fn_getdatetime(fn) for fn in 
 [
    '/Users/elischwat/Downloads/datasetsA/lewis_2009/dtm/lewis_2009_dtm_44.tif',
    '/Users/elischwat/Downloads/datasetsA/rainier_2007/rainier_2007_dtm_12.tif',
    '/Users/elischwat/Downloads/datasetsA/rainier_2007/rainier_2007_dtm_13.tif',
    '/Users/elischwat/Downloads/datasetsA/rainier_2007/rainier_2007_dtm_14.tif',
    '/Users/elischwat/Downloads/datasetsA/rainier_2007/rainier_2007_dtm_7.tif',
    '/Users/elischwat/Downloads/datasetsA/rainier_2007/rainier_2007_dtm_8.tif',
    '/Users/elischwat/Downloads/datasetsA/rainier_2007/rainier_2007_dtm_9.tif'
 ]]

#Extract timestamps from filenames
t_list = np.array([
    datetime.datetime(2007, 4, 22, 0, 0),
    datetime.datetime(2009, 4, 22, 0, 0)])
#Compute time differences, convert decimal years
コード例 #4
0
ファイル: dem_gallery.py プロジェクト: 3d-fun/gmbtools
#dem_clim = (1766, 3247)
#SBB
#dem_clim = (2934, 3983)
hs_clim = (1, 255)

for i, dem_fn in enumerate(dem_fn_list):
    ax = grid[i]
    print(dem_fn)
    dem_ds = iolib.fn_getds(dem_fn)
    dem = iolib.ds_getma_sub(dem_ds)
    dem_hs_fn = os.path.splitext(dem_fn)[0] + '_hs_az315.tif'
    if os.path.exists(dem_hs_fn):
        dem_hs = iolib.fn_getma_sub(dem_hs_fn)
    else:
        dem_hs = geolib.gdaldem_mem_ds(dem_ds, 'hillshade', returnma=True)
    dt = timelib.fn_getdatetime(dem_fn)
    if dt is not None:
        title = dt.strftime('%Y-%m-%d')
        t = ax.set_title(title, fontdict={'fontsize': 6})
        t.set_position([0.5, 0.95])
    hs_im = ax.imshow(dem_hs, vmin=hs_clim[0], vmax=hs_clim[1], cmap='gray')
    dem_im = ax.imshow(dem,
                       vmin=dem_clim[0],
                       vmax=dem_clim[1],
                       cmap='cpt_rainbow',
                       alpha=0.5)
    ax.set_facecolor('k')
    pltlib.hide_ticks(ax)

for ax in grid[i + 1:]:
    ax.axis('off')
コード例 #5
0
    if os.path.exists(t1_fn) and os.path.exists(t2_fn):
        constant_dt = False
        print "Preparing timestamp arrays"
        t1_ds, t2_ds = warplib.memwarp_multi_fn([t1_fn, t2_fn],
                                                extent=dem1_ds,
                                                res=dem1_ds)
        print "Loading timestamps into masked arrays"
        t1 = iolib.ds_getma(t1_ds)
        t2 = iolib.ds_getma(t2_ds)
        #Compute dt in days
        t_factor = t2 - t1
        t_factor /= 365.25
    else:
        from datetime import datetime, timedelta
        from pygeotools.lib import timelib
        t1 = timelib.fn_getdatetime(dem1_fn)
        t2 = timelib.fn_getdatetime(dem2_fn)
        if t1 is not None and t2 is not None and t1 != t2:
            constant_dt = True
            dt = t2 - t1
            #Might be better to do this with dateutil - not sure about leap years
            #from dateutil.relativedelta import relativedelta
            #dt = relativedelta(dt1, dt2))
            #dt.years
            year = timedelta(days=365.25)
            t_factor = abs(dt.total_seconds() / year.total_seconds())
            print "Time differences is %s, dh/%0.3f" % (dt, t_factor)
        else:
            print "Unable to extract timestamps for input images"
            rates = False
コード例 #6
0
            if iolib.fn_check(fn):
                temp.append(fn)
                f.write(fn + '\n')
            else:
                print("Unable to find %s" % fn)
        f = None

        if make_stacks:
            dem_fn_list = temp

            #print "Generating stack"
            #s = malib.DEMStack(fn_list=dem_fn_list, outdir=stackdir, res=res, extent=site_extent, srs=dst_srs, trend=True)

            dem_fn_list = np.array(dem_fn_list)
            dem_dt_list = np.array(
                [timelib.fn_getdatetime(i) for i in dem_fn_list])

            #Make annual and seasonal products
            if True:
                #These are OrderedDict
                summer_dict = timelib.dt_filter_rel_annual_idx(dem_dt_list,
                                                               min_rel_dt=(8,
                                                                           1),
                                                               max_rel_dt=(10,
                                                                           31))
                spring_dict = timelib.dt_filter_rel_annual_idx(dem_dt_list,
                                                               min_rel_dt=(4,
                                                                           1),
                                                               max_rel_dt=(6,
                                                                           15))
コード例 #7
0
ファイル: dem_mask.py プロジェクト: dshean/demcoreg
def get_mask(dem_ds,
             mask_list,
             dem_fn=None,
             writeout=False,
             outdir=None,
             args=None):
    mask_list = check_mask_list(mask_list)
    if not mask_list or 'none' in mask_list:
        newmask = False
    else:
        #Basename for output files
        if outdir is not None:
            if not os.path.exists(outdir):
                os.makedirs(outdir)
        else:
            outdir = os.path.split(os.path.realpath(dem_fn))[0]

        if dem_fn is not None:
            #Extract DEM timestamp
            dem_dt = timelib.fn_getdatetime(dem_fn)
            out_fn_base = os.path.join(
                outdir,
                os.path.splitext(os.path.split(dem_fn)[-1])[0])

        if args is None:
            #Get default values
            parser = getparser()
            args = parser.parse_args([
                '',
            ])

        newmask = True

        if 'glaciers' in mask_list:
            icemask = get_icemask(dem_ds)
            if writeout:
                out_fn = out_fn_base + '_ice_mask.tif'
                print("Writing out %s" % out_fn)
                iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
            newmask = np.logical_and(icemask, newmask)

        #Need to process NLCD separately, with nearest neighbor inteprolatin
        if 'nlcd' in mask_list and args.nlcd_filter != 'none':
            rs = 'near'
            nlcd_ds = gdal.Open(get_nlcd_fn())
            nlcd_ds_warp = warplib.memwarp_multi([
                nlcd_ds,
            ],
                                                 res=dem_ds,
                                                 extent=dem_ds,
                                                 t_srs=dem_ds,
                                                 r=rs)[0]
            out_fn = None
            if writeout:
                out_fn = out_fn_base + '_nlcd.tif'
            nlcdmask = get_nlcd_mask(nlcd_ds_warp,
                                     filter=args.nlcd_filter,
                                     out_fn=out_fn)
            if writeout:
                out_fn = os.path.splitext(out_fn)[0] + '_mask.tif'
                print("Writing out %s" % out_fn)
                iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
            newmask = np.logical_and(nlcdmask, newmask)

        if 'bareground' in mask_list and args.bareground_thresh > 0:
            bareground_ds = gdal.Open(get_bareground_fn())
            bareground_ds_warp = warplib.memwarp_multi([
                bareground_ds,
            ],
                                                       res=dem_ds,
                                                       extent=dem_ds,
                                                       t_srs=dem_ds,
                                                       r='cubicspline')[0]
            out_fn = None
            if writeout:
                out_fn = out_fn_base + '_bareground.tif'
            baregroundmask = get_bareground_mask(
                bareground_ds_warp,
                bareground_thresh=args.bareground_thresh,
                out_fn=out_fn)
            if writeout:
                out_fn = os.path.splitext(out_fn)[0] + '_mask.tif'
                print("Writing out %s" % out_fn)
                iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
            newmask = np.logical_and(baregroundmask, newmask)

        if 'snodas' in mask_list and args.snodas_thresh > 0:
            #Get SNODAS snow depth products for DEM timestamp
            snodas_min_dt = datetime(2003, 9, 30)
            if dem_dt >= snodas_min_dt:
                snodas_ds = get_snodas_ds(dem_dt)
                if snodas_ds is not None:
                    snodas_ds_warp = warplib.memwarp_multi([
                        snodas_ds,
                    ],
                                                           res=dem_ds,
                                                           extent=dem_ds,
                                                           t_srs=dem_ds,
                                                           r='cubicspline')[0]
                    #snow depth values are mm, convert to meters
                    snodas_depth = iolib.ds_getma(snodas_ds_warp) / 1000.
                    if snodas_depth.count() > 0:
                        print(
                            "Applying SNODAS snow depth filter (masking values >= %0.2f m)"
                            % args.snodas_thresh)
                        out_fn = None
                        if writeout:
                            out_fn = out_fn_base + '_snodas_depth.tif'
                            print("Writing out %s" % out_fn)
                            iolib.writeGTiff(snodas_depth,
                                             out_fn,
                                             src_ds=dem_ds)
                        snodas_mask = np.ma.masked_greater(
                            snodas_depth, args.snodas_thresh)
                        snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
                        if writeout:
                            out_fn = os.path.splitext(out_fn)[0] + '_mask.tif'
                            print("Writing out %s" % out_fn)
                            iolib.writeGTiff(snodas_mask,
                                             out_fn,
                                             src_ds=dem_ds)
                        newmask = np.logical_and(snodas_mask, newmask)
                    else:
                        print(
                            "SNODAS grid for input location and timestamp is empty"
                        )

        #These tiles cover CONUS
        #tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
        if 'modscag' in mask_list and args.modscag_thresh > 0:
            modscag_min_dt = datetime(2000, 2, 24)
            if dem_dt < modscag_min_dt:
                print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
                        % (dem_dt, modscag_min_dt))
            else:
                tile_list = get_modis_tile_list(dem_ds)
                print(tile_list)
                pad_days = 7
                modscag_fn_list = get_modscag_fn_list(dem_dt,
                                                      tile_list=tile_list,
                                                      pad_days=pad_days)
                if modscag_fn_list:
                    modscag_ds = proc_modscag(modscag_fn_list,
                                              extent=dem_ds,
                                              t_srs=dem_ds)
                    modscag_ds_warp = warplib.memwarp_multi([
                        modscag_ds,
                    ],
                                                            res=dem_ds,
                                                            extent=dem_ds,
                                                            t_srs=dem_ds,
                                                            r='cubicspline')[0]
                    print(
                        "Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)"
                        % args.modscag_thresh)
                    modscag_fsca = iolib.ds_getma(modscag_ds_warp)
                    out_fn = None
                    if writeout:
                        out_fn = out_fn_base + '_modscag_fsca.tif'
                        print("Writing out %s" % out_fn)
                        iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
                    modscag_mask = (modscag_fsca.filled(0) >=
                                    args.modscag_thresh)
                    modscag_mask = ~(modscag_mask)
                    if writeout:
                        out_fn = os.path.splitext(out_fn)[0] + '_mask.tif'
                        print("Writing out %s" % out_fn)
                        iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
                    newmask = np.logical_and(modscag_mask, newmask)

        #Use reflectance values to estimate snowcover
        if 'toa' in mask_list:
            #Use top of atmosphere scaled reflectance values (0-1)
            toa_ds = gdal.Open(get_toa_fn(dem_fn))
            toa_ds_warp = warplib.memwarp_multi([
                toa_ds,
            ],
                                                res=dem_ds,
                                                extent=dem_ds,
                                                t_srs=dem_ds)[0]
            toa_mask = get_toa_mask(toa_ds_warp, args.toa_thresh)
            if writeout:
                out_fn = out_fn_base + '_toa_mask.tif'
                print("Writing out %s" % out_fn)
                iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
            newmask = np.logical_and(toa_mask, newmask)

        if False:
            #Filter based on expected snowline
            #Simplest approach uses altitude cutoff
            max_elev = 1500
            newdem = np.ma.masked_greater(dem, max_elev)
            newmask = np.ma.getmaskarray(newdem)

        print(
            "Generating final mask to use for reference surfaces, and applying to input DEM"
        )
        #Now invert to use to create final masked array
        #True (1) represents "invalid" pixel to match numpy ma convetion
        newmask = ~newmask

        #Dilate the mask
        if args.dilate is not None:
            niter = args.dilate
            print("Dilating mask with %i iterations" % niter)
            from scipy import ndimage
            newmask = ~(ndimage.morphology.binary_dilation(~newmask,
                                                           iterations=niter))

    return newmask
コード例 #8
0
ファイル: compute_dz.py プロジェクト: whigg/demcoreg
def main():
    parser = getparser()
    args = parser.parse_args()

    #This is output ndv, avoid using 0 for differences
    diffndv = -9999

    dem1_fn = args.fn1
    dem2_fn = args.fn2

    if dem1_fn == dem2_fn:
        sys.exit('Input filenames are identical')

    fn_list = [dem1_fn, dem2_fn]

    print("Warping DEMs to same res/extent/proj")
    #This will check input param for validity, could do beforehand
    dem1_ds, dem2_ds = warplib.memwarp_multi_fn(fn_list, extent=args.te, res=args.tr, t_srs=args.t_srs)

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.dirname(os.path.abspath(dem1_fn))

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = os.path.splitext(os.path.split(dem1_fn)[1])[0]+'_'+os.path.splitext(os.path.split(dem2_fn)[1])[0]

    print("Loading input DEMs into masked arrays")
    dem1 = iolib.ds_getma(dem1_ds, 1)
    dem2 = iolib.ds_getma(dem2_ds, 1)

    #Compute dz/dt rates if possible, in m/yr
    rates = True 
    if rates:
        #Extract basename
        #This was a hack to work with timestamp array filenames that have geoid offset applied
        adj = ''
        if '-adj' in dem1_fn:
            adj = '-adj' 
        dem1_fn_base = re.sub(adj, '', os.path.splitext(dem1_fn)[0]) 
        dem2_fn_base = re.sub(adj, '', os.path.splitext(dem2_fn)[0]) 

        #Attempt to load timestamp arrays (for mosaics) if present
        t1_fn = dem1_fn_base+'_ts.tif'
        t2_fn = dem2_fn_base+'_ts.tif'
        if os.path.exists(t1_fn) and os.path.exists(t2_fn):
            print("Preparing timestamp arrays")
            t1_ds, t2_ds = warplib.memwarp_multi_fn([t1_fn, t2_fn], extent=dem1_ds, res=dem1_ds)
            print("Loading timestamps into masked arrays")
            t1 = iolib.ds_getma(t1_ds)
            t2 = iolib.ds_getma(t2_ds)
            #Compute dt in days
            t_factor = t2 - t1
            t_factor /= 365.25
        else:
            #Attempt to extract timestamps from input filenames
            t1 = timelib.fn_getdatetime(dem1_fn)
            t2 = timelib.fn_getdatetime(dem2_fn)
            if t1 is not None and t2 is not None and t1 != t2:  
                dt = t2 - t1
                year = timedelta(days=365.25)
                t_factor = abs(dt.total_seconds()/year.total_seconds()) 
                print("Time differences is %s, dh/%0.3f" % (dt, t_factor))
            else:
                print("Unable to extract timestamps for input images")
                rates = False

    #Compute relative elevation difference with Eulerian approach 
    print("Computing eulerian elevation difference")
    diff_euler = dem2 - dem1

    #Check to make sure inputs actually intersect
    #if not np.any(~dem1.mask*~dem2.mask):
    if diff_euler.count() == 0:
        sys.exit("No valid overlap between input DEMs")

    if True:
        print("Eulerian elevation difference stats:")
        diff_euler_stats = malib.print_stats(diff_euler)
        diff_euler_med = diff_euler_stats[5]

    if True:
        print("Writing Eulerian elevation difference map")
        dst_fn = os.path.join(outdir, outprefix+'_dz_eul.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_euler, dst_fn, dem1_ds, ndv=diffndv)
        if rates:
            print("Writing Eulerian rate map")
            dst_fn = os.path.join(outdir, outprefix+'_dz_eul_rate.tif')
            print(dst_fn)
            iolib.writeGTiff(diff_euler/t_factor, dst_fn, dem1_ds, ndv=diffndv)

    if False:
        print("Writing Eulerian relative elevation difference map")
        diff_euler_rel = diff_euler - diff_euler_med
        dst_fn = os.path.join(outdir, outprefix+'_dz_eul_rel.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_euler_rel, dst_fn, dem1_ds, ndv=diffndv)

    if False:
        print("Writing out DEM2 with median elevation difference removed")
        dst_fn = os.path.splitext(dem2_fn)[0]+'_med'+diff_euler_med+'.tif'
        print(dst_fn)
        iolib.writeGTiff(dem2 - diff_euler_med, dst_fn, dem1_ds, ndv=diffndv)

    if False:
        print("Writing Eulerian elevation difference percentage map")
        diff_euler_perc = 100.0*diff_euler/dem1
        dst_fn = os.path.join(outdir, outprefix+'_dz_eul_perc.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_euler_perc, dst_fn, dem1_ds, ndv=diffndv)
コード例 #9
0
ファイル: dem_mask.py プロジェクト: whigg/demcoreg
def main():
    parser = getparser()
    args = parser.parse_args()

    #Write out all mask products for the input DEM
    writeall = True

    mask_glaciers = True
    if args.no_icemask:
        mask_glaciers = False

    #Define top-level directory containing DEM
    topdir = os.getcwd()

    #This directory should contain nlcd grid, glacier outlines
    datadir = iolib.get_datadir()

    dem_fn = args.dem_fn
    dem_ds = gdal.Open(dem_fn)
    print(dem_fn)

    #Extract DEM timestamp
    dem_dt = timelib.fn_getdatetime(dem_fn)

    #This will hold datasets for memwarp and output processing
    ds_dict = OrderedDict()
    ds_dict['dem'] = dem_ds

    ds_dict['lulc'] = None
    #lulc_source = get_lulc_source(dem_ds)
    #lulc_ds_full = get_lulc_ds_full(dem_ds)
    #ds_dict['lulc'] = lulc_ds_full

    ds_dict['snodas'] = None
    if args.snodas:
        #Get SNODAS snow depth products for DEM timestamp
        snodas_min_dt = datetime(2003, 9, 30)
        if dem_dt >= snodas_min_dt:
            snodas_outdir = os.path.join(datadir, 'snodas')
            if not os.path.exists(snodas_outdir):
                os.makedirs(snodas_outdir)
            snodas_ds = get_snodas(dem_dt, snodas_outdir)
            if snodas_ds is not None:
                ds_dict['snodas'] = snodas_ds

    ds_dict['modscag'] = None
    #Get MODSCAG products for DEM timestamp
    #These tiles cover CONUS
    #tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
    if args.modscag:
        modscag_min_dt = datetime(2000, 2, 24)
        if dem_dt < modscag_min_dt:
            print("\nWarning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)\nSkipping..." \
                    % (dem_dt, modscag_min_dt))
        else:
            tile_list = get_modis_tile_list(dem_ds)
            print(tile_list)
            pad_days = 7
            modscag_outdir = os.path.join(datadir, 'modscag')
            if not os.path.exists(modscag_outdir):
                os.makedirs(modscag_outdir)
            modscag_fn_list = get_modscag(dem_dt, modscag_outdir, tile_list,
                                          pad_days)
            if modscag_fn_list:
                modscag_ds = proc_modscag(modscag_fn_list,
                                          extent=dem_ds,
                                          t_srs=dem_ds)
                ds_dict['modscag'] = modscag_ds

    #TODO: need to clean this up
    #Better error handling
    #Disabled for now
    #Use reflectance values to estimate snowcover
    ds_dict['toa'] = None
    if args.toa:
        #Use top of atmosphere scaled reflectance values (0-1)
        toa_ds = get_toa_ds(dem_fn)
        ds_dict['toa'] = toa_ds

    #Cull all of the None ds from the ds_dict
    for k, v in ds_dict.items():
        if v is None:
            del ds_dict[k]

    #Warp all masks to DEM extent/res
    #Note: use cubicspline here to avoid artifacts with negative values
    if len(ds_dict) > 0:
        ds_list = warplib.memwarp_multi(ds_dict.values(),
                                        res=dem_ds,
                                        extent=dem_ds,
                                        t_srs=dem_ds,
                                        r='cubicspline')
        #Update
        for n, key in enumerate(ds_dict.keys()):
            ds_dict[key] = ds_list[n]

    #lulc_ds_warp = get_lulc_ds_warp(dem_ds)
    #ds_dict['lulc'] = lulc_ds_warp

    print(' ')
    #Need better handling of ds order based on input ds here

    dem = iolib.ds_getma(ds_dict['dem'])

    #Initialize the mask
    #True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
    newmask = ~(np.ma.getmaskarray(dem))

    #Basename for output files
    out_fn_base = os.path.splitext(dem_fn)[0]

    #Generate a rockmask
    if args.filter == 'none' or args.bareground_thresh == 0:
        print("Skipping LULC filter")
    else:
        #Note: these now have RGI glacier polygons removed
        #if 'lulc' in ds_dict.keys():
        #We are almost always going to want LULC mask
        rockmask = get_lulc_mask(dem_ds, mask_glaciers=mask_glaciers, \
                filter=args.filter, bareground_thresh=args.bareground_thresh, out_fn=out_fn_base)
        if writeall:
            out_fn = out_fn_base + '_rockmask.tif'
            print("Writing out %s\n" % out_fn)
            iolib.writeGTiff(rockmask, out_fn, src_ds=ds_dict['dem'])
        newmask = np.logical_and(rockmask, newmask)

    if 'snodas' in ds_dict.keys():
        #SNODAS snow depth filter
        snodas_thresh = args.snodas_thresh
        #snow depth values are mm, convert to meters
        snodas_depth = iolib.ds_getma(ds_dict['snodas']) / 1000.
        if snodas_depth.count() > 0:
            print(
                "Applying SNODAS snow depth filter (masking values >= %0.2f m)"
                % snodas_thresh)
            if writeall:
                out_fn = out_fn_base + '_snodas_depth.tif'
                print("Writing out %s" % out_fn)
                iolib.writeGTiff(snodas_depth, out_fn, src_ds=ds_dict['dem'])
            snodas_mask = np.ma.masked_greater(snodas_depth, snodas_thresh)
            #This should be 1 for valid surfaces with no snow, 0 for snowcovered surfaces
            snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
            if writeall:
                out_fn = out_fn_base + '_snodas_mask.tif'
                print("Writing out %s\n" % out_fn)
                iolib.writeGTiff(snodas_mask, out_fn, src_ds=ds_dict['dem'])
            newmask = np.logical_and(snodas_mask, newmask)
        else:
            print(
                "SNODAS grid for input location and timestamp is empty!\nSkipping...\n"
            )

    if 'modscag' in ds_dict.keys():
        #MODSCAG percent snowcover
        modscag_thresh = args.modscag_thresh
        print(
            "Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)"
            % modscag_thresh)
        modscag_perc = iolib.ds_getma(ds_dict['modscag'])
        if writeall:
            out_fn = out_fn_base + '_modscag_perc.tif'
            print("Writing out %s" % out_fn)
            iolib.writeGTiff(modscag_perc, out_fn, src_ds=ds_dict['dem'])
        modscag_mask = (modscag_perc.filled(0) >= modscag_thresh)
        #This should be 1 for valid surfaces with no snow, 0 for snowcovered surfaces
        modscag_mask = ~(modscag_mask)
        if writeall:
            out_fn = out_fn_base + '_modscag_mask.tif'
            print("Writing out %s\n" % out_fn)
            iolib.writeGTiff(modscag_mask, out_fn, src_ds=ds_dict['dem'])
        newmask = np.logical_and(modscag_mask, newmask)

    if 'toa' in ds_dict.keys():
        #TOA reflectance filter
        #This should be 1 for valid surfaces, 0 for snowcovered surfaces
        toa_mask = get_toa_mask(ds_dict['toa'], args.toa_thresh)
        if writeall:
            out_fn = out_fn_base + '_toamask.tif'
            print("Writing out %s\n" % out_fn)
            iolib.writeGTiff(toa_mask, out_fn, src_ds=ds_dict['dem'])
        newmask = np.logical_and(toa_mask, newmask)

    if False:
        #Filter based on expected snowline
        #Simplest approach uses altitude cutoff
        max_elev = 1500
        newdem = np.ma.masked_greater(dem, max_elev)
        newmask = np.ma.getmaskarray(newdem)

    print(
        "Generating final mask to use for reference surfaces, and applying to input DEM"
    )
    #Now invert to use to create final masked array
    newmask = ~newmask

    #Dilate the mask
    if args.dilate is not None:
        niter = args.dilate
        print("Dilating mask with %i iterations" % niter)
        from scipy import ndimage
        newmask = ~(ndimage.morphology.binary_dilation(~newmask,
                                                       iterations=niter))

    #Check that we have enough pixels, good distribution

    #Apply mask to original DEM - use these surfaces for co-registration
    newdem = np.ma.array(dem, mask=newmask)

    min_validpx_count = 100
    min_validpx_std = 10
    validpx_count = newdem.count()
    validpx_std = newdem.std()
    print("\n%i valid pixels in output ref.tif" % validpx_count)
    print("%0.2f m std output ref.tif\n" % validpx_std)
    #if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
    if (validpx_count > min_validpx_count):
        #Write out final mask
        out_fn = out_fn_base + '_ref.tif'
        print("Writing out %s\n" % out_fn)
        iolib.writeGTiff(newdem, out_fn, src_ds=ds_dict['dem'])
    else:
        print("Not enough valid pixels!")
コード例 #10
0
ファイル: swe.py プロジェクト: jmichellehu/snowtools
    parser.add_argument('-dem1_fn', type=str, help='DEM(t1) filename')
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('-dem2_fn', type=str, default=None, help='DEM(t2) filename')
    group.add_argument('-dz_fn', type=str, default=None, help='Snow depth map filename')
    density_choices = ['sturm', 'snotel', 'constant']
    parser.add_argument('-density', nargs=1, type=float, default=None, help='Specify density (g/cc)')
    parser.add_argument('-filter', action='store_true', help='Filter SWE map to remove blunders, smooth, and fill gaps')
    parser.add_argument('-prism', action='store_true', help='Include PRISM precip in analysis/plots')
    return parser

#def main():
parser = getparser()
args = parser.parse_args()

dem1_fn = args.dem1_fn
dem1_ts = timelib.fn_getdatetime(dem1_fn)
res = 'min'
save = True 
#For testing
#res = 64

if args.dem2_fn is not None:
    dem2_fn = args.dem2_fn
    print("Warping DEMs to same res/extent/proj")
    #This will check input param for validity, could do beforehand
    dem1_ds, dem2_ds = warplib.memwarp_multi_fn([dem1_fn, dem2_fn], extent='intersection', res=res, t_srs='first')
    print("Loading input DEMs into masked arrays")
    dem1 = iolib.ds_getma(dem1_ds, 1)
    dem2 = iolib.ds_getma(dem2_ds, 1)
    dem2_ts = timelib.fn_getdatetime(dem2_fn)
    dz = dem2 - dem1
コード例 #11
0
ファイル: rainier_dem.py プロジェクト: whigg/raster
titles = ['1970', '2008', '2015']
clim = malib.calcperc(dem_list[0], (2, 98))
plot3panel(dem_list,
           clim,
           titles,
           'inferno',
           'Elevation (m WGS84)',
           fn='dem.png')

#ddem_1970_2015 = dem_1970 - dem_2015
#ddem_2008_2015 = dem_2008 - dem_2015
#ddem_1970_2008 = dem_1970 - dem_2008

#Extract timestamps from filenames
t_list = np.array([timelib.fn_getdatetime(fn) for fn in dem_fn_list])
#Compute time differences, convert decimal years
dt_list = [timelib.timedelta2decyear(d) for d in np.diff(t_list)]
dt_list.append(dt_list[0] + dt_list[1])

#Calculate elevation difference for each time period
dh_list = [dem_2008 - dem_1970, dem_2015 - dem_2008, dem_2015 - dem_1970]
titles = [
    '1970 to 2008 (%0.1f yr)' % dt_list[0],
    '2008 to 2015 (%0.1f yr)' % dt_list[1],
    '1970 to 2015 (%0.1f yr)' % dt_list[2]
]
plot3panel(dh_list, (-30, 30),
           titles,
           'RdBu',
           'Elevation Change (m)',
コード例 #12
0
def parse_pc_align_log(fn):
    import re
    error_dict = None
    #Determine log filename
    import glob
    log_fn = glob.glob(fn.rsplit('-DEM', 1)[0]+'*.log')
    if not log_fn:
        log_fn = glob.glob(fn.rsplit('-DEM', 1)[0]+'*align/*.log')

    if not log_fn:
        print "Failed to locate align log for %s" % fn
    else:
        log_fn = log_fn[0]
        print(log_fn)
        f = open(log_fn)

        error_dict = {}
        error_dict['File'] = fn
        error_dict['Date'] = timelib.fn_getdatetime(fn)

        #This handles cases where no sampling was performed
        error_dict['Input Sampled 16th Percentile Error'] = np.nan 
        error_dict['Input Sampled Median Error'] = np.nan 
        error_dict['Input Sampled 84th Percentile Error'] = np.nan 
        error_dict['Input Sampled Error Spread'] = np.nan 
        error_dict['Output Sampled 16th Percentile Error'] = np.nan 
        error_dict['Output Sampled Median Error'] = np.nan 
        error_dict['Output Sampled 84th Percentile Error'] = np.nan 
        error_dict['Output Sampled Error Spread'] = np.nan 
        #error_dict['Translation vector (North-East-Down, meters)'] = [np.nan, np.nan, np.nan]

        #Set default reference type to point
        error_dict['Ref type'] = 'point'

        temp = []
        for line in f:
            key = 'Loaded points'
            if key in line:
                temp.append(int(re.split(':', line.rstrip())[1]))
            key = 'Number of errors'
            if key in line:
                error_dict[key] = int(re.split(':', line.rstrip())[1])
            key = 'Input: error percentile'
            if key in line:
                line_a = re.split(': |, ', line.rstrip())
                error_dict['Input 16th Percentile Error'] = float(line_a[3])
                error_dict['Input Median Error'] = float(line_a[5])
                error_dict['Input 84th Percentile Error'] = float(line_a[7])
            """
            key = 'Input: error mean'
            if key in line:
                line_a = re.split(': |, ', line.rstrip())
                error_dict['Input Mean Error'] = float(line_a[2])
                error_dict['Input Std Error'] = float(line_a[4])
            """
            #This pulls the line 
            #Input: mean of smallest errors: 25%: 7.82061, 50%: 9.71931, 75%: 10.9917, 100%: 12.2715
            #Want the final value
            key = 'Input: mean'
            if key in line:
                line_a = re.split(': |, ', line.rstrip())
                error_dict['Input Mean Error'] = float(line_a[-1])
            key = 'Output: error percentile'
            if key in line:
                line_a = re.split(': |, ', line.rstrip())
                error_dict['Output 16th Percentile Error'] = float(line_a[3])
                error_dict['Output Median Error'] = float(line_a[5])
                error_dict['Output 84th Percentile Error'] = float(line_a[7])
            """
            key = 'Output: error mean'
            if key in line:
                line_a = re.split(': |, ', line.rstrip())
                error_dict['Output Mean Error'] = float(line_a[2])
                error_dict['Output Std Error'] = float(line_a[4])
            """
            key = 'Output: mean'
            if key in line:
                line_a = re.split(': |, ', line.rstrip())
                error_dict['Output Mean Error'] = float(line_a[-1])
            key = 'Translation vector (Cartesian, meters)'
            #Previous versions of pc_align output this
            #key = 'Translation vector (meters)'
            if key in line:
                error_dict['Translation vector (Cartesian, meters)'] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(',')) 
                #error_dict['Translation vector (meters)'] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(',')) 
            key = 'Translation vector (North-East-Down, meters)'
            if key in line:
                error_dict['Translation vector (North-East-Down, meters)'] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(',')) 
            key = 'Translation vector magnitude (meters)'
            if key in line:
                error_dict[key] = float(re.split(':', line.rstrip())[1])
            key = 'Translation vector (lat,lon,z)'
            if key in line:
                error_dict[key] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(',')) 
                shift_proj = shift_ll2proj(fn, error_dict[key])
                key = 'Translation vector (Proj meters)'
                error_dict[key] = shift_proj

            #This is the output from the point sampling post-alignment
            key = 'Error percentiles'
            if key in line:
                #This is a hack to capture both sampling of input and output
                if 'Output Sampled 16th Percentile Error' in error_dict:
                    error_dict['Input Sampled 16th Percentile Error'] = error_dict['Output Sampled 16th Percentile Error']
                    error_dict['Input Sampled Median Error'] = error_dict['Output Sampled Median Error']
                    error_dict['Input Sampled 84th Percentile Error'] = error_dict['Output Sampled 84th Percentile Error']
                    error_dict['Input Sampled Error Spread'] = error_dict['Output Sampled Error Spread']
                line_a = re.split(': |, ', line.rstrip())
                error_dict['Output Sampled 16th Percentile Error'] = float(line_a[2])
                error_dict['Output Sampled Median Error'] = float(line_a[4])
                error_dict['Output Sampled 84th Percentile Error'] = float(line_a[6])
                error_dict['Output Sampled Error Spread'] = float(line_a[6]) - float(line_a[2])
            #key = 'compute_dh'
            #Note: these are not computed for absolute values by compute_dh
            key = 'count:'
            if key in line:
                error_dict['Ref type'] = 'grid'
                #This is a hack to capture both sampling of input and output
                if 'Output Sampled 16th Percentile Error' in error_dict:
                    error_dict['Input Sampled 16th Percentile Error'] = error_dict['Output Sampled 16th Percentile Error']
                    error_dict['Input Sampled Median Error'] = error_dict['Output Sampled Median Error']
                    error_dict['Input Sampled 84th Percentile Error'] = error_dict['Output Sampled 84th Percentile Error']
                    error_dict['Input Sampled Error Spread'] = error_dict['Output Sampled Error Spread']
                #Assume the following format for stats:
                #count: 349835 min: -51.39 max: 22.00 mean: 0.29 std: 0.49 med: 0.28 mad: 0.37 \
                #q1: 0.04 q2: 0.54 iqr: 0.50 mode: 0.29 p16: -0.07 p84: 0.66 spread: 0.37
                line_a = re.split(': | ', line.rstrip())
                error_dict['Output Sampled 16th Percentile Error'] = float(line_a[23])
                error_dict['Output Sampled Median Error'] = float(line_a[11])
                error_dict['Output Sampled 84th Percentile Error'] = float(line_a[25])
                error_dict['Output Sampled Error Spread'] = float(line_a[25]) - float(line_a[23])
            key = 'Mean error'
            if key in line:
                if 'Output Sampled Mean Error' in error_dict:
                    error_dict['Input Sampled Mean Error'] = error_dict['Output Sampled Mean Error']
                error_dict['Output Sampled Mean Error'] = float(re.split(':', line.rstrip())[1])
            key = 'RMSE'
            if key in line:
                if 'Output Sampled RMSE' in error_dict:
                    error_dict['Input Sampled RMSE'] = error_dict['Output Sampled RMSE']
                error_dict['Output Sampled RMSE'] = float(re.split(':', line.rstrip())[1])
            key = 'Absolute Median Error'
            if key in line:
                if 'Output Absolute Median Error' in error_dict:
                    error_dict['Input Absolute Median Error'] = error_dict['Output Absolute Median Error']
                error_dict['Output Absolute Median Error'] = float(re.split(':', line.rstrip())[1])
                
        error_dict['Source points'] = temp[0] 
        error_dict['Reference points'] = temp[1] 
    
    return error_dict
コード例 #13
0
def main():
    parser = getparser()
    args = parser.parse_args()

    #Define top-level directory containing raster
    topdir = os.getcwd()

    #This directory will store SNODAS products
    #Use centralized directory, default is $HOME/data/
    #datadir = iolib.get_datadir()
    datadir = args.datadir
    if not os.path.exists(datadir):
        os.makedirs(datadir)

    fn = args.fn
    ds = gdal.Open(fn)
    print(fn)

    #Extract timestamp from input filename
    dt = timelib.fn_getdatetime(fn)
    #If date is specified, extract timestamp
    if args.date is not None:
        dt = timelib.fn_getdatetime(args.date)

    out_fn_base = os.path.splitext(fn)[0]

    snodas_min_dt = datetime(2003, 9, 30)
    if dt < snodas_min_dt:
        sys.exit("Timestamp is earlier than valid SNODAS model range")

    #snow depth values are mm, convert to meters
    snodas_outdir = os.path.join(datadir, 'snodas')
    if not os.path.exists(snodas_outdir):
        os.makedirs(snodas_outdir)
    snodas_ds_full = get_snodas(dt, snodas_outdir)
    snodas_ds = warplib.memwarp_multi([
        snodas_ds_full,
    ],
                                      res='source',
                                      extent=ds,
                                      t_srs=ds,
                                      r='cubicspline')[0]
    snodas_depth = iolib.ds_getma(snodas_ds) / 1000.

    if snodas_depth.count() > 0:
        #Write out at original resolution
        out_fn = out_fn_base + '_snodas_depth.tif'
        print("Writing out %s" % out_fn)
        iolib.writeGTiff(snodas_depth, out_fn, src_ds=snodas_ds)

        #Warp to match input raster
        #Note: use cubicspline here to avoid artifacts with negative values
        ds_out = warplib.memwarp_multi([
            snodas_ds,
        ],
                                       res=ds,
                                       extent=ds,
                                       t_srs=ds,
                                       r='cubicspline')[0]

        #Write out warped version
        snodas_depth = iolib.ds_getma(ds_out) / 1000.
        out_fn = out_fn_base + '_snodas_depth_warp.tif'
        print("Writing out %s" % out_fn)
        iolib.writeGTiff(snodas_depth, out_fn, src_ds=ds_out)
    else:
        print("SNODAS grid for input location and timestamp is empty!")
コード例 #14
0
def main():
    parser = getparser()
    args = parser.parse_args()

    #This is output ndv, avoid using 0 for differences
    diffndv = -9999

    r1_fn = args.fn1
    r2_fn = args.fn2

    if r1_fn == r2_fn:
        sys.exit('Input filenames are identical')

    fn_list = [r1_fn, r2_fn]

    outdir = args.outdir
    if outdir is None:
        outdir = os.path.dirname(os.path.abspath(r1_fn))

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    outprefix = os.path.splitext(os.path.split(r1_fn)[1])[0]+'_'+os.path.splitext(os.path.split(r2_fn)[1])[0]

    #Compute dz/dt rate if possible, in m/yr
    if args.rate:
        #Extract basename
        #This was a hack to work with timestamp array filenames that have geoid offset applied
        adj = ''
        if '-adj' in r1_fn:
            adj = '-adj' 
        r1_fn_base = re.sub(adj, '', os.path.splitext(r1_fn)[0]) 
        r2_fn_base = re.sub(adj, '', os.path.splitext(r2_fn)[0]) 

        #Attempt to load ordinal timestamp arrays (for mosaics) if present
        """
        import glob
        t1_fn = glob.glob(r1_fn_base+'*_ts*.tif')
        t2_fn = glob.glob(r1_fn_base+'*_ts*.tif')
        t_unit = 'year'
        """
        t1_fn = r1_fn_base+'_ts.tif'
        t2_fn = r2_fn_base+'_ts.tif'
        t_unit = 'day'
        if not os.path.exists(t1_fn) and not os.path.exists(t2_fn):
            #Try to find processed output from r_mosaic index
            #These are decimal years
            t1_fn = r1_fn_base+'index_ts.tif'
            t2_fn = r2_fn_base+'index_ts.tif'
            t_unit = 'year'
        print(t1_fn, t2_fn)
        if os.path.exists(t1_fn) and os.path.exists(t2_fn):
            fn_list.extend([t1_fn, t2_fn])
        else:
            #Attempt to extract timestamps from input filenames
            t1 = timelib.fn_getdatetime(r1_fn)
            t2 = timelib.fn_getdatetime(r2_fn)
            if t1 is not None and t2 is not None and t1 != t2:  
                dt = t2 - t1
                year = timedelta(days=365.25)
                t_factor = abs(dt.total_seconds()/year.total_seconds()) 
                print("Time differences is %s, dh/%0.3f" % (dt, t_factor))
            else:
                print("Unable to extract timestamps for input images")
                args.rate = False


    print("Warping rasters to same res/extent/proj")
    #This will check input param for validity, could do beforehand
    ds_list = warplib.memwarp_multi_fn(fn_list, extent=args.te, res=args.tr, t_srs=args.t_srs, r='cubic')
    r1_ds = ds_list[0]
    r2_ds = ds_list[1]

    print("Loading input rasters into masked arrays")
    r1 = iolib.ds_getma(r1_ds, 1)
    r2 = iolib.ds_getma(r2_ds, 1)

    #Compute relative difference 
    print("Computing raster difference")
    diff = r2 - r1

    #Check to make sure inputs actually intersect
    if diff.count() == 0:
        sys.exit("No valid overlap between input rasters")

    if len(fn_list) == 4:
        t1_ds = ds_list[2]
        t2_ds = ds_list[3]
        print("Loading timestamps into masked arrays")
        t1 = iolib.ds_getma(t1_ds)
        t2 = iolib.ds_getma(t2_ds)
        #Compute dt in years 
        t_factor = t2 - t1
        if t_unit == 'day':
            t_factor /= 365.25

    if True:
        print("Raster difference stats:")
        diff_stats = malib.print_stats(diff)
        diff_med = diff_stats[5]

    if True:
        print("Writing raster difference map")
        dst_fn = os.path.join(outdir, outprefix+'_diff.tif')
        print(dst_fn)
        iolib.writeGTiff(diff, dst_fn, r1_ds, ndv=diffndv)
        if args.rate:
            print("Writing rate map")
            dst_fn = os.path.join(outdir, outprefix+'_diff_rate.tif')
            print(dst_fn)
            iolib.writeGTiff(diff/t_factor, dst_fn, r1_ds, ndv=diffndv)
            if len(fn_list) == 4:
                print("Writing time difference map")
                dst_fn = os.path.join(outdir, outprefix+'_diff_dt.tif')
                print(dst_fn)
                iolib.writeGTiff(t_factor, dst_fn, r1_ds, ndv=diffndv)

    if False:
        print("Writing relative raster difference map")
        diff_rel = diff - diff_med
        dst_fn = os.path.join(outdir, outprefix+'_diff_rel.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_rel, dst_fn, r1_ds, ndv=diffndv)

    if False:
        print("Writing out raster2 with median difference removed")
        dst_fn = os.path.splitext(r2_fn)[0]+'_med'+diff_med+'.tif'
        print(dst_fn)
        iolib.writeGTiff(r2 - diff_med, dst_fn, r1_ds, ndv=diffndv)

    if False:
        print("Writing raster difference percentage map (relative to raster1)")
        diff_perc = 100.0*diff/r1
        dst_fn = os.path.join(outdir, outprefix+'_diff_perc.tif')
        print(dst_fn)
        iolib.writeGTiff(diff_perc, dst_fn, r1_ds, ndv=diffndv)