예제 #1
0
def get_raster_idx(x_vect, y_vect, pt_srs, ras_ds, max_slope=20):
    """Get raster index corresponding to the set of X,Y locations
    """
    #Convert input xy coordinates to raster coordinates
    mX_fltr, mY_fltr, mZ = geolib.cT_helper(x_vect, y_vect, 0, pt_srs, geolib.get_ds_srs(ras_ds))
    pX_fltr, pY_fltr = geolib.mapToPixel(mX_fltr, mY_fltr, ras_ds.GetGeoTransform())
    pX_fltr = np.atleast_1d(pX_fltr)
    pY_fltr = np.atleast_1d(pY_fltr)

    #Sample raster
    #This returns median and mad for ICESat footprint
    samp = geolib.sample(ras_ds, mX_fltr, mY_fltr, pad=pad)
    samp_idx = ~(np.ma.getmaskarray(samp[:,0]))
    npts = samp_idx.nonzero()[0].size

    if False:
        print("Applying slope filter, masking points with slope > %0.1f" % max_slope)
        slope_ds = geolib.gdaldem_mem_ds(ras_ds, processing='slope', returnma=False)
        slope_samp = geolib.sample(slope_ds, mX_fltr, mY_fltr, pad=pad)
        slope_samp_idx = (slope_samp[:,0] <= max_slope).data
        samp_idx = np.logical_and(slope_samp_idx, samp_idx)

    return samp, samp_idx, npts, pX_fltr, pY_fltr
예제 #2
0
#! /usr/bin/env python

#Extract sample values at training point locations

import numpy as np
from pygeotools.lib import iolib, geolib

pt_fn = 'trainingpts_utm_xycode.csv'
r_fn = 'NOAA2017_DEM-0000014848-0000000000.tif'
r_ds = iolib.fn_getds(r_fn)
#xy_srs = geolib.wgs_srs
xcol = 0
ycol = 1

print("Loading points: %s" % pt_fn)
#This needs to return a header
pts = iolib.readcsv(pt_fn)

out_samp = []
for b in range(1, 5):
    samp = geolib.sample(r_ds,
                         pts[:, xcol],
                         pts[:, ycol],
                         bn=b,
                         pad=1,
                         count=True)
    out_samp.append(samp)

#Not finished...
예제 #3
0
def main():
    parser = getparser()
    args = parser.parse_args()

    fn = args.fn
    sitename = args.sitename
    #User-specified output extent
    #Note: not checked, untested
    if args.extent is not None:
        if args.extent == 'read':
            import get_raster_extent
            extent = get_raster_extent.get_lat_lon_extent(args.refdem_fn)
        else:
            extent = (args.extent).split()
            extent = [float(item) for item in extent]
    else:
        extent = (geolib.site_dict[sitename]).extent
    if args.refdem_fn is not None:
        refdem_fn = args.refdem_fn
    else:
        refdem_fn = (geolib.site_dict[sitename]).refdem_fn
    
    #Max elevation difference between shot and sampled DEM
    max_z_DEM_diff = 200
    #Max elevation std for sampled DEM values in padded window around shot
    max_DEMhiresArElv_std = 50.0

    f = h5py.File(fn)
    t = f.get('Data_40HZ/Time/d_UTCTime_40')[:]

    #pyt0 = datetime(1, 1, 1, 0, 0)
    #utct0 = datetime(1970, 1, 1, 0, 0)
    #t0 = datetime(2000, 1, 1, 12, 0, 0)
    #offset_s = (t0 - utct0).total_seconds()
    offset_s = 946728000.0
    t += offset_s
    dt = timelib.np_utc2dt(t)
    dt_o = timelib.dt2o(dt)
    #dts = timelib.np_print_dt(dt)
    #dt_decyear = timelib.np_dt2decyear(dt)
    dt_int = np.array([ts.strftime('%Y%m%d') for ts in dt], dtype=np.int64)

    lat = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lat')[:], 1.7976931348623157e+308)
    lon = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lon')[:], 1.7976931348623157e+308)
    # not working for beiluhe, Tibetan Plateau
    # lon = geolib.lon360to180(lon)
    z = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_elev')[:], 1.7976931348623157e+308)

    print('Input: %i' % z.count())
    print('max lax %lf, min lat %lf' % (lat.max(), lat.min()))
    print('max lon %lf, min lon %lf' % (lon.max(), lon.min()))

    #Now spatial filter - should do this up front
    x = lon
    y = lat

    xmin, xmax, ymin, ymax = extent
    #This is True if point is within extent
    valid_idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax))

    #Prepare output array
    #out = np.ma.vstack([dt_decyear, dt_o, dt_int, lat, lon, z]).T
    out = np.ma.vstack([dt_o, dt_int, lat, lon, z]).T
    #Create a mask to ensure all four values are valid for each point
    mask = ~(np.any(np.ma.getmaskarray(out), axis=1))
    mask *= valid_idx
    out = out[mask]
    valid_idx = ~(np.any(np.ma.getmaskarray(out), axis=1))

    #Lon and lat indices
    xcol = 3
    ycol = 2
    zcol = 4

    if out.shape[0] == 0:
        sys.exit("No points within specified extent\n")
    else:
        print("Spatial filter: %i" % out.shape[0])

    #out_fmt = ['%0.8f', '%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] 
    #out_hdr = ['dt_decyear, dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']
    out_fmt = ['%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] 
    out_hdr = ['dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']

    """
    ICESat-1 filters
    """
    #Saturation Correction Flag
    #These are 0 to 5, not_saturated inconsequential applicable not_computed not_applicable
    sat_corr_flg = f.get('Data_40HZ/Quality/sat_corr_flg')[mask]
    #valid_idx *= (sat_corr_flg < 2)

    #Correction to elevation for saturated waveforms
    #Notes suggest this might not be desirable over land
    satElevCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_satElevCorr')[mask], 1.7976931348623157e+308)
    #z[sat_corr_flg < 3] += satElevCorr.filled(0.0)[sat_corr_flg < 3]
    out[:,zcol] += satElevCorr.filled(0.0)

    #Correction to elevation based on post flight analysis for biases determined for each campaign
    ElevBiasCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_ElevBiasCorr')[mask], 1.7976931348623157e+308)
    out[:,zcol] += ElevBiasCorr.filled(0.0)

    #Surface elevation (T/P ellipsoid) minus surface elevation (WGS84 ellipsoid).
    #Approximately 0.7 m, so WGS is lower; need to subtract from d_elev
    deltaEllip = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_deltaEllip')[mask], 1.7976931348623157e+308)
    out[:,zcol] -= deltaEllip

    #These are 1 for valid, 0 for invalid
    valid_idx *= ~(np.ma.getmaskarray(out[:,zcol]))
    print("z corrections: %i" % valid_idx.nonzero()[0].size)

    if False:
        #Reflectivity, not corrected for atmospheric effects
        reflctUC = np.ma.masked_equal(f.get('Data_40HZ/Reflectivity/d_reflctUC')[mask], 1.7976931348623157e+308)
        #This was minimum used for ice sheets
        min_reflctUC = 0.025
        valid_idx *= (reflctUC > min_reflctUC).data
        print("reflctUC: %i" % valid_idx.nonzero()[0].size)

    if False:
        #The Standard deviation of the difference between the functional fit and the received echo \
        #using alternate parameters. It is directly taken from GLA05 parameter d_wfFitSDev_1
        LandVar = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_LandVar')[mask], 1.7976931348623157e+308)
        #This was max used for ice sheets
        max_LandVar = 0.04
        valid_idx *= (LandVar < max_LandVar).data
        print("LandVar: %i" % valid_idx.nonzero()[0].size)

    if True:
        #Flag indicating whether the elevations on this record should be used.
        #0 = valid, 1 = not valid
        elev_use_flg = f.get('Data_40HZ/Quality/elev_use_flg')[mask].astype('Bool')
        valid_idx *= ~elev_use_flg
        print("elev_use_flg: %i" % valid_idx.nonzero()[0].size)

    if False:
        #Cloud contamination; Indicates if Gain > flag value, indicating probable cloud contamination.
        elv_cloud_flg = f.get('Data_40HZ/Elevation_Flags/elv_cloud_flg')[mask].astype('Bool')
        valid_idx *= ~elv_cloud_flg
        print("elv_cloud_flg: %i" % valid_idx.nonzero()[0].size)

    if False: 
        #Full resolution 1064 Quality Flag; 0 - 12 indicate Cloud detected
        FRir_qa_flg = f.get('Data_40HZ/Atmosphere/FRir_qa_flg')[mask]
        valid_idx *= (FRir_qa_flg == 15).data
        print("FRir_qa_flg: %i" % valid_idx.nonzero()[0].size)

    if False:
        #This is elevation extracted from SRTM30
        DEM_elv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEM_elv')[mask], 1.7976931348623157e+308)
        z_DEM_diff = np.abs(out[:,zcol] - DEM_elv)
        valid_idx *= (z_DEM_diff < max_z_DEM_diff).data
        print("z_DEM_diff: %i" % valid_idx.nonzero()[0].size)

        #d_DEMhiresArElv is a 9 element array of high resolution DEM values. The array index corresponds to the position of the DEM value relative to the spot. (5) is the footprint center.
        DEMhiresArElv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEMhiresArElv')[mask], 1.7976931348623157e+308)
        DEMhiresArElv_std = np.ma.std(DEMhiresArElv, axis=1)
        valid_idx *= (DEMhiresArElv_std < max_DEMhiresArElv_std).data
        print("max_DEMhiresArElv_std: %i" % valid_idx.nonzero()[0].size)
        #Compute slope

    #Apply cumulative filter to output
    out = out[valid_idx]

    out_fn = os.path.splitext(fn)[0]+'_%s.csv' % sitename
    print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
    out_fmt_str = ', '.join(out_fmt)
    out_hdr_str = ', '.join(out_hdr)
    np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
    iolib.writevrt(out_fn, x='lon', y='lat')

    #Extract our own DEM values - should be better than default GLAS reference DEM stats
    if True:
        print("Loading reference DEM: %s" % refdem_fn)
        dem_ds = gdal.Open(refdem_fn)
        print("Converting coords for DEM")
        dem_mX, dem_mY = geolib.ds_cT(dem_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
        print("Sampling")
        dem_samp = geolib.sample(dem_ds, dem_mX, dem_mY, pad='glas')
        abs_dem_z_diff = np.abs(out[:,zcol] - dem_samp[:,0])

        valid_idx *= ~(np.ma.getmaskarray(abs_dem_z_diff))
        print("Valid DEM extract: %i" % valid_idx.nonzero()[0].size)
        valid_idx *= (abs_dem_z_diff < max_z_DEM_diff).data
        print("Valid abs DEM diff: %i" % valid_idx.nonzero()[0].size)
        valid_idx *= (dem_samp[:,1] < max_DEMhiresArElv_std).data
        print("Valid DEM mad: %i" % valid_idx.nonzero()[0].size)

        if valid_idx.nonzero()[0].size == 0:
            sys.exit("No valid points remain")

        out = np.ma.hstack([out, dem_samp])
        out_fmt.extend(['%0.2f', '%0.2f'])
        out_hdr.extend(['z_refdem_med_WGS84', 'z_refdem_nmad'])

        #Apply cumulative filter to output
        out = out[valid_idx]

        out_fn = os.path.splitext(out_fn)[0]+'_refdemfilt.csv'
        print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
        out_fmt_str = ', '.join(out_fmt)
        out_hdr_str = ', '.join(out_hdr)
        np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
        iolib.writevrt(out_fn, x='lon', y='lat')

    #This will sample land-use/land-cover or percent bareground products
    #Can be used to isolate points over exposed rock
    #if args.rockfilter: 
    if False:
        #This should automatically identify appropriate LULC source based on refdem extent
        lulc_source = dem_mask.get_lulc_source(dem_ds)
        #Looks like NED extends beyond NCLD, force use NLCD for conus
        #if sitename == 'conus':
        #    lulc_source = 'nlcd'
        lulc_ds = dem_mask.get_lulc_ds_full(dem_ds, lulc_source)
        print("Converting coords for LULC")
        lulc_mX, lulc_mY = geolib.ds_cT(lulc_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
        print("Sampling LULC: %s" % lulc_source)
        #Note: want to make sure we're not interpolating integer values for NLCD
        #Should be safe with pad=0, even with pad>0, should take median, not mean
        lulc_samp = geolib.sample(lulc_ds, lulc_mX, lulc_mY, pad=0)
        l = lulc_samp[:,0].data
        if lulc_source == 'nlcd':
            #This passes rock and ice pixels
            valid_idx = np.logical_or((l==31),(l==12))
        elif lulc_source == 'bareground':
            #This preserves pixels with bareground percentation >85%
            minperc = 85
            valid_idx = (l >= minperc)
        else:
            print("Unknown LULC source")
        print("LULC: %i" % valid_idx.nonzero()[0].size)
        if l.ndim == 1:
            l = l[:,np.newaxis]
        out = np.ma.hstack([out, l])
        out_fmt.append('%i')
        out_hdr.append('lulc')

        #Apply cumulative filter to output
        out = out[valid_idx]

        out_fn = os.path.splitext(out_fn)[0]+'_lulcfilt.csv'
        print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
        out_fmt_str = ', '.join(out_fmt)
        out_hdr_str = ', '.join(out_hdr)
        np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
        iolib.writevrt(out_fn, x='lon', y='lat')
예제 #4
0
        print("Loading Masked DEM: %s" % dem_mask_fn)
        dem_mask_ds = gdal.Open(dem_mask_fn) 
        dem_mask = iolib.ds_getma(dem_mask_ds) 
    else:
        dem_mask_ds = dem_ds
        dem_mask = dem_ma

    #Convert input xy coordinates to raster coordinates
    mX_fltr, mY_fltr, mZ = geolib.cT_helper(x_fltr, y_fltr, 0, pt_srs, geolib.get_ds_srs(dem_mask_ds))
    pX_fltr, pY_fltr = geolib.mapToPixel(mX_fltr, mY_fltr, dem_mask_ds.GetGeoTransform())
    pX_fltr = np.atleast_1d(pX_fltr)
    pY_fltr = np.atleast_1d(pY_fltr)

    #Sample raster
    #This returns median and mad for ICESat footprint
    samp = geolib.sample(dem_mask_ds, mX_fltr, mY_fltr, pad=pad)
    samp_idx = ~(np.ma.getmaskarray(samp[:,0]))
    npts = samp_idx.nonzero()[0].size
    if npts < min_pts:
        print("Not enough points after sampling valud pixels, post bareground mask (%i < %i)" % (npts, min_pts))
        continue
       
    if True:
        print("Applying slope filter, masking points with slope > %0.1f" % max_slope)
        slope_ds = geolib.gdaldem_mem_ds(dem_mask_ds, processing='slope', returnma=False)
        slope_samp = geolib.sample(slope_ds, mX_fltr, mY_fltr, pad=pad)
        slope_samp_idx = (slope_samp[:,0] <= max_slope).data
        samp_idx = np.logical_and(slope_samp_idx, samp_idx)

    npts = samp_idx.nonzero()[0].size
    if npts < min_pts:
#Assume that all of our points are preselected to fall withing raster extent
#Can add option for spatial filter, see filter_glas.py

r_ds = gdal.Open(r_fn)

print("\nInput raster: %s" % r_fn)
print("Input points: %s" % pt_fn)
print("\nSampling %i points\n" % pts.shape[0])

#This returns median and mad arrays for all values within pad
#Should add option to return number of pixels in sample
#Use 'glas' here to sample 70 m spot
samp = geolib.sample(r_ds,
                     pts[:, xcol],
                     pts[:, ycol],
                     xy_srs=xy_srs,
                     pad='glas',
                     count=True)
samp_idx = ~(np.ma.getmaskarray(samp[:, 0]))
nsamp = samp_idx.nonzero()[0].size

if nsamp == 0:
    sys.exit("No valid samples")
else:
    pts_mask = np.ma.hstack([
        pts[samp_idx], samp[samp_idx],
        (pts[samp_idx, zcol] - samp[samp_idx, 0])[:, np.newaxis]
    ])

    print("Sample difference (raster - point) statistics:")
    malib.print_stats(pts_mask[:, -1])
예제 #6
0
snowdepth_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_snowdepth_20170606/snowdepth_tif/snowdepth_20170201-20170317_mos-tile-0_filt5px.tif'

#Load and clip to common extent
dem_ds, hs_ds, snowdepth_ds = warplib.memwarp_multi_fn(
    [dem_fn, hs_fn, snowdepth_fn], extent='union')
dem = iolib.ds_getma(dem_ds)
hs = iolib.ds_getma(hs_ds)
snowdepth = iolib.ds_getma(snowdepth_ds)

#Pixel coordinates of sample sites
x, y = geolib.mapToPixel(b[:, 1], b[:, 2], dem_ds.GetGeoTransform())
depth = b[:, 3]
rho = b[:, 4]

#Sample DEM snow depth
samp = geolib.sample(snowdepth_ds, b[:, 1], b[:, 2], pad=5)

#Filter to throw out samples with significant roughness over sampled area
samp_perc_thresh = 0.3
samp_mask = (samp[:, 1] / samp[:, 0]) > samp_perc_thresh
depth_diff = depth - samp[:, 0]
depth_diff[samp_mask] = np.ma.masked
idx = np.ma.argsort(np.abs(depth_diff))[::-1]
x = x[idx]
y = y[idx]
depth = depth[idx]
rho = rho[idx]
samp = samp[idx]
depth_diff = depth_diff[idx]

#Scatter point size