コード例 #1
0
ファイル: ndinterp.py プロジェクト: whigg/iceflow
def pad_stack(s, dt_offset=timedelta(365.25)):
    o = s.ma_stack.shape
    new_ma_stack = np.ma.vstack((s.ma_stack[0:1], s.ma_stack, s.ma_stack[-1:]))
    new_date_list = np.ma.hstack((s.date_list[0:1] - dt_offset, s.date_list,
                                  s.date_list[-1:] + dt_offset))
    new_date_list_o = timelib.dt2o(new_date_list)
    return new_ma_stack, new_date_list_o
コード例 #2
0
def main():
    parser = getparser()
    args = parser.parse_args()

    fn = args.fn
    sitename = args.sitename
    #User-specified output extent
    #Note: not checked, untested
    if args.extent is not None:
        if args.extent == 'read':
            import get_raster_extent
            extent = get_raster_extent.get_lat_lon_extent(args.refdem_fn)
        else:
            extent = (args.extent).split()
            extent = [float(item) for item in extent]
    else:
        extent = (geolib.site_dict[sitename]).extent
    if args.refdem_fn is not None:
        refdem_fn = args.refdem_fn
    else:
        refdem_fn = (geolib.site_dict[sitename]).refdem_fn
    
    #Max elevation difference between shot and sampled DEM
    max_z_DEM_diff = 200
    #Max elevation std for sampled DEM values in padded window around shot
    max_DEMhiresArElv_std = 50.0

    f = h5py.File(fn)
    t = f.get('Data_40HZ/Time/d_UTCTime_40')[:]

    #pyt0 = datetime(1, 1, 1, 0, 0)
    #utct0 = datetime(1970, 1, 1, 0, 0)
    #t0 = datetime(2000, 1, 1, 12, 0, 0)
    #offset_s = (t0 - utct0).total_seconds()
    offset_s = 946728000.0
    t += offset_s
    dt = timelib.np_utc2dt(t)
    dt_o = timelib.dt2o(dt)
    #dts = timelib.np_print_dt(dt)
    #dt_decyear = timelib.np_dt2decyear(dt)
    dt_int = np.array([ts.strftime('%Y%m%d') for ts in dt], dtype=np.int64)

    lat = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lat')[:], 1.7976931348623157e+308)
    lon = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lon')[:], 1.7976931348623157e+308)
    # not working for beiluhe, Tibetan Plateau
    # lon = geolib.lon360to180(lon)
    z = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_elev')[:], 1.7976931348623157e+308)

    print('Input: %i' % z.count())
    print('max lax %lf, min lat %lf' % (lat.max(), lat.min()))
    print('max lon %lf, min lon %lf' % (lon.max(), lon.min()))

    #Now spatial filter - should do this up front
    x = lon
    y = lat

    xmin, xmax, ymin, ymax = extent
    #This is True if point is within extent
    valid_idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax))

    #Prepare output array
    #out = np.ma.vstack([dt_decyear, dt_o, dt_int, lat, lon, z]).T
    out = np.ma.vstack([dt_o, dt_int, lat, lon, z]).T
    #Create a mask to ensure all four values are valid for each point
    mask = ~(np.any(np.ma.getmaskarray(out), axis=1))
    mask *= valid_idx
    out = out[mask]
    valid_idx = ~(np.any(np.ma.getmaskarray(out), axis=1))

    #Lon and lat indices
    xcol = 3
    ycol = 2
    zcol = 4

    if out.shape[0] == 0:
        sys.exit("No points within specified extent\n")
    else:
        print("Spatial filter: %i" % out.shape[0])

    #out_fmt = ['%0.8f', '%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] 
    #out_hdr = ['dt_decyear, dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']
    out_fmt = ['%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] 
    out_hdr = ['dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']

    """
    ICESat-1 filters
    """
    #Saturation Correction Flag
    #These are 0 to 5, not_saturated inconsequential applicable not_computed not_applicable
    sat_corr_flg = f.get('Data_40HZ/Quality/sat_corr_flg')[mask]
    #valid_idx *= (sat_corr_flg < 2)

    #Correction to elevation for saturated waveforms
    #Notes suggest this might not be desirable over land
    satElevCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_satElevCorr')[mask], 1.7976931348623157e+308)
    #z[sat_corr_flg < 3] += satElevCorr.filled(0.0)[sat_corr_flg < 3]
    out[:,zcol] += satElevCorr.filled(0.0)

    #Correction to elevation based on post flight analysis for biases determined for each campaign
    ElevBiasCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_ElevBiasCorr')[mask], 1.7976931348623157e+308)
    out[:,zcol] += ElevBiasCorr.filled(0.0)

    #Surface elevation (T/P ellipsoid) minus surface elevation (WGS84 ellipsoid).
    #Approximately 0.7 m, so WGS is lower; need to subtract from d_elev
    deltaEllip = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_deltaEllip')[mask], 1.7976931348623157e+308)
    out[:,zcol] -= deltaEllip

    #These are 1 for valid, 0 for invalid
    valid_idx *= ~(np.ma.getmaskarray(out[:,zcol]))
    print("z corrections: %i" % valid_idx.nonzero()[0].size)

    if False:
        #Reflectivity, not corrected for atmospheric effects
        reflctUC = np.ma.masked_equal(f.get('Data_40HZ/Reflectivity/d_reflctUC')[mask], 1.7976931348623157e+308)
        #This was minimum used for ice sheets
        min_reflctUC = 0.025
        valid_idx *= (reflctUC > min_reflctUC).data
        print("reflctUC: %i" % valid_idx.nonzero()[0].size)

    if False:
        #The Standard deviation of the difference between the functional fit and the received echo \
        #using alternate parameters. It is directly taken from GLA05 parameter d_wfFitSDev_1
        LandVar = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_LandVar')[mask], 1.7976931348623157e+308)
        #This was max used for ice sheets
        max_LandVar = 0.04
        valid_idx *= (LandVar < max_LandVar).data
        print("LandVar: %i" % valid_idx.nonzero()[0].size)

    if True:
        #Flag indicating whether the elevations on this record should be used.
        #0 = valid, 1 = not valid
        elev_use_flg = f.get('Data_40HZ/Quality/elev_use_flg')[mask].astype('Bool')
        valid_idx *= ~elev_use_flg
        print("elev_use_flg: %i" % valid_idx.nonzero()[0].size)

    if False:
        #Cloud contamination; Indicates if Gain > flag value, indicating probable cloud contamination.
        elv_cloud_flg = f.get('Data_40HZ/Elevation_Flags/elv_cloud_flg')[mask].astype('Bool')
        valid_idx *= ~elv_cloud_flg
        print("elv_cloud_flg: %i" % valid_idx.nonzero()[0].size)

    if False: 
        #Full resolution 1064 Quality Flag; 0 - 12 indicate Cloud detected
        FRir_qa_flg = f.get('Data_40HZ/Atmosphere/FRir_qa_flg')[mask]
        valid_idx *= (FRir_qa_flg == 15).data
        print("FRir_qa_flg: %i" % valid_idx.nonzero()[0].size)

    if False:
        #This is elevation extracted from SRTM30
        DEM_elv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEM_elv')[mask], 1.7976931348623157e+308)
        z_DEM_diff = np.abs(out[:,zcol] - DEM_elv)
        valid_idx *= (z_DEM_diff < max_z_DEM_diff).data
        print("z_DEM_diff: %i" % valid_idx.nonzero()[0].size)

        #d_DEMhiresArElv is a 9 element array of high resolution DEM values. The array index corresponds to the position of the DEM value relative to the spot. (5) is the footprint center.
        DEMhiresArElv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEMhiresArElv')[mask], 1.7976931348623157e+308)
        DEMhiresArElv_std = np.ma.std(DEMhiresArElv, axis=1)
        valid_idx *= (DEMhiresArElv_std < max_DEMhiresArElv_std).data
        print("max_DEMhiresArElv_std: %i" % valid_idx.nonzero()[0].size)
        #Compute slope

    #Apply cumulative filter to output
    out = out[valid_idx]

    out_fn = os.path.splitext(fn)[0]+'_%s.csv' % sitename
    print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
    out_fmt_str = ', '.join(out_fmt)
    out_hdr_str = ', '.join(out_hdr)
    np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
    iolib.writevrt(out_fn, x='lon', y='lat')

    #Extract our own DEM values - should be better than default GLAS reference DEM stats
    if True:
        print("Loading reference DEM: %s" % refdem_fn)
        dem_ds = gdal.Open(refdem_fn)
        print("Converting coords for DEM")
        dem_mX, dem_mY = geolib.ds_cT(dem_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
        print("Sampling")
        dem_samp = geolib.sample(dem_ds, dem_mX, dem_mY, pad='glas')
        abs_dem_z_diff = np.abs(out[:,zcol] - dem_samp[:,0])

        valid_idx *= ~(np.ma.getmaskarray(abs_dem_z_diff))
        print("Valid DEM extract: %i" % valid_idx.nonzero()[0].size)
        valid_idx *= (abs_dem_z_diff < max_z_DEM_diff).data
        print("Valid abs DEM diff: %i" % valid_idx.nonzero()[0].size)
        valid_idx *= (dem_samp[:,1] < max_DEMhiresArElv_std).data
        print("Valid DEM mad: %i" % valid_idx.nonzero()[0].size)

        if valid_idx.nonzero()[0].size == 0:
            sys.exit("No valid points remain")

        out = np.ma.hstack([out, dem_samp])
        out_fmt.extend(['%0.2f', '%0.2f'])
        out_hdr.extend(['z_refdem_med_WGS84', 'z_refdem_nmad'])

        #Apply cumulative filter to output
        out = out[valid_idx]

        out_fn = os.path.splitext(out_fn)[0]+'_refdemfilt.csv'
        print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
        out_fmt_str = ', '.join(out_fmt)
        out_hdr_str = ', '.join(out_hdr)
        np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
        iolib.writevrt(out_fn, x='lon', y='lat')

    #This will sample land-use/land-cover or percent bareground products
    #Can be used to isolate points over exposed rock
    #if args.rockfilter: 
    if False:
        #This should automatically identify appropriate LULC source based on refdem extent
        lulc_source = dem_mask.get_lulc_source(dem_ds)
        #Looks like NED extends beyond NCLD, force use NLCD for conus
        #if sitename == 'conus':
        #    lulc_source = 'nlcd'
        lulc_ds = dem_mask.get_lulc_ds_full(dem_ds, lulc_source)
        print("Converting coords for LULC")
        lulc_mX, lulc_mY = geolib.ds_cT(lulc_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
        print("Sampling LULC: %s" % lulc_source)
        #Note: want to make sure we're not interpolating integer values for NLCD
        #Should be safe with pad=0, even with pad>0, should take median, not mean
        lulc_samp = geolib.sample(lulc_ds, lulc_mX, lulc_mY, pad=0)
        l = lulc_samp[:,0].data
        if lulc_source == 'nlcd':
            #This passes rock and ice pixels
            valid_idx = np.logical_or((l==31),(l==12))
        elif lulc_source == 'bareground':
            #This preserves pixels with bareground percentation >85%
            minperc = 85
            valid_idx = (l >= minperc)
        else:
            print("Unknown LULC source")
        print("LULC: %i" % valid_idx.nonzero()[0].size)
        if l.ndim == 1:
            l = l[:,np.newaxis]
        out = np.ma.hstack([out, l])
        out_fmt.append('%i')
        out_hdr.append('lulc')

        #Apply cumulative filter to output
        out = out[valid_idx]

        out_fn = os.path.splitext(out_fn)[0]+'_lulcfilt.csv'
        print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
        out_fmt_str = ', '.join(out_fmt)
        out_hdr_str = ', '.join(out_hdr)
        np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
        iolib.writevrt(out_fn, x='lon', y='lat')
コード例 #3
0
import os
import sys
import numpy as np
from datetime import datetime
from pygeotools.lib import timelib, iolib

#SRTM, then systematic timestamps
dt_list = [datetime(2000,2,11), datetime(2000,5,31), datetime(2009,5,31), datetime(2018,5,31)]
stack_fn=sys.argv[1]

#Use tif on disk if available
trend_fn=os.path.splitext(stack_fn)[0]+'_trend.tif'
intercept_fn=os.path.splitext(stack_fn)[0]+'_intercept.tif'
#Otherwise load stack and compute trend/intercept if necessary

trend_ds = iolib.fn_getds(trend_fn)
trend = iolib.ds_getma(trend_ds)/365.25
intercept = iolib.fn_getma(intercept_fn)

#Can vectorize
#dt_list_o = timelib.dt2o(dt_list)
#z_list = trend*dt_list_o[:,None,None]+intercept

for dt in dt_list:
    dt_o = timelib.dt2o(dt)
    z = trend*dt_o+intercept
    out_fn=os.path.splitext(stack_fn)[0]+'_%s.tif' % dt.strftime('%Y%m%d')
    print("Writing out: %s" % out_fn)
    iolib.writeGTiff(z, out_fn, trend_ds)
コード例 #4
0
    print("Output pixel count: %s" % trend_filt.count())

    #Gaussian filter (smooth)
    #trend_filt = filtlib.gauss_fltr_astropy(trend_filt, size=size, origmask=True, fill_interior=True)
    trend_filt = filtlib.gauss_fltr_astropy(trend_filt, size=size)
    print("Output pixel count: %s" % trend_filt.count())

    trend_fn=out_fn+'_trend_%spx_filt.tif' % size
    print("Writing out: %s" % trend_fn)
    iolib.writeGTiff(trend_filt*365.25, trend_fn, trend_ds)

    #Update intercept using new filtered slope values
    #Need to update for different periods?
    #dt_pivot = timelib.mean_date(datetime(2000,5,31), datetime(2009,5,31))
    #dt_pivot = timelib.mean_date(datetime(2009,5,31), datetime(2018,5,31))
    dt_pivot = timelib.dt2o(datetime(2009, 5, 31))
    intercept_filt = dt_pivot * (trend - trend_filt) + intercept
    intercept_fn=out_fn+'_intercept_%spx_filt.tif' % size
    print("Writing out: %s" % intercept_fn)
    iolib.writeGTiff(intercept_filt*365.25, intercept_fn, trend_ds)

    trend = trend_filt
    intercept = intercept_filt

for dt in dt_list:
    dt_o = timelib.dt2o(dt)
    z = trend*dt_o+intercept
    #Remove any values outsize global limits
    #Could also do local range filter here
    z = filtlib.range_fltr(z, zlim)
    print("Output pixel count: %s" % z.count())
コード例 #5
0
import numpy as np
import matplotlib.pyplot as plt

from pygeotools.lib import timelib, malib, iolib, warplib, geolib
from imview.lib import pltlib

#Output from snowex_pit_proc.py
csv_fn = 'snowex_pit_out.csv'

#a = np.genfromtxt(csv_fn, delimiter=',', names=True, dtype=None)
#b = a['datetime', 'x_utm13n', 'y_utm13n', 'depth_m', 'density_kgm3']

a = np.loadtxt(csv_fn, delimiter=',', skiprows=1, dtype=object)
b = a[:, [2, 3, 4, 5, 6]]
b[:, 0] = timelib.dt2o(
    [datetime.strptime(x, '%Y-%m-%d %H:%M:%S') for x in b[:, 0]])
b = np.ma.fix_invalid(b.astype(np.float))

#This should be moved to snowex_pit_proc.py
xlim = (100000, 300000)
b[:, 1][(b[:, 1] > xlim[1]) | (b[:, 1] < xlim[0])] = np.ma.masked
ylim = (4100000, 4500000)
b[:, 2][(b[:, 2] > ylim[1]) | (b[:, 2] < ylim[0])] = np.ma.masked

#Only pass pits with valid x and y coord
b = b[b[:, 1:3].count(axis=1) == 2]

#Stereo2SWE preliminary products
dem_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_mos_20170504/gm_8m-tile-0.tif'
hs_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_mos_20170504/gm_8m-tile-0_hs_az315.tif'
#snowdepth_fn = '/Users/dshean/Documents/UW/SnowEx/preliminary_snowdepth_20170606/snowdepth_20170201-20170317_mos-tile-0.tif'
コード例 #6
0
ファイル: ndinterp.py プロジェクト: whigg/iceflow
def main():

    if len(sys.argv) < 2:
        sys.exit("Usage: %s stack.npz [mask.tif]" %
                 os.path.basename(sys.argv[0]))

    #This will attempt to load cached files on disk
    load_existing = False
    #Limit spatial interpolation to input mask
    clip_to_mask = True

    #This expects a DEMStack object, see pygeotools/lib/malib.py or pygeotools/make_stack.py
    stack_fn = sys.argv[1]
    #Expects shp polygon as valid mask, in same projection as input raster
    mask_fn = sys.argv[2]

    stack = malib.DEMStack(stack_fn=stack_fn,
                           save=False,
                           trend=True,
                           med=True,
                           stats=True)
    #Get times of original obs
    t = stack.date_list_o.data
    t = t.astype(int)
    t[0] -= 0.1
    t[-1] += 0.1

    if clip_to_mask:
        m = geolib.shp2array(mask_fn, res=stack.res, extent=stack.extent)
        #Expand mask - hardcoded to 6 km
        import scipy.ndimage
        it = int(np.ceil(6000. / stack.res))
        m = ~(scipy.ndimage.morphology.binary_dilation(~m, iterations=it))
        apply_mask(stack.ma_stack, m)

    #This is used frome here on out
    test = stack.ma_stack
    test_ptp = stack.dt_stack_ptp
    test_source = np.array(stack.source)
    res = stack.res
    gt = np.copy(stack.gt)

    #Probably don't need rull-res stack
    if True:
        stride = 2
        test = test[:, ::stride, ::stride]
        test_ptp = test_ptp[::stride, ::stride]
        res *= stride
        print("Using a stride of %i (%0.1f m)" % (stride, res))
        gt[[1, 5]] *= stride

    print("Orig shape: ", test.shape)
    #Check to make sure all t have valid data
    tcount = test.reshape(test.shape[0],
                          test.shape[1] * test.shape[2]).count(axis=1)
    validt_idx = (tcount > 0).nonzero()[0]
    test = test[validt_idx]
    test_source = test_source[validt_idx]
    t = t[validt_idx]
    print("New shape: ", test.shape)

    y, x = (test.count(axis=0) > 1).nonzero()
    x = x.astype(int)
    y = y.astype(int)
    #vm_t = test.reshape(test.shape[0], test.shape[1]*test.shape[2])
    vm_t = test[:, y, x]
    vm_t_flat = vm_t.ravel()
    idx = ~np.ma.getmaskarray(vm_t_flat)
    #These are values
    VM = vm_t_flat[idx]

    #Determine scaling factors for x and y coords
    #Should be the same for both
    xy_scale = max(x.ptp(), y.ptp())
    xy_offset = min(x.min(), y.min())

    #This scales t to encourage interpolation along the time axis rather than spatial axis
    t_factor = 16.
    t_scale = t.ptp() * t_factor
    t_offset = t.min()

    xn = rangenorm(x, xy_offset, xy_scale)
    yn = rangenorm(y, xy_offset, xy_scale)
    tn = rangenorm(t, t_offset, t_scale)

    X = np.tile(xn, t.size)[idx]
    Y = np.tile(yn, t.size)[idx]
    T = np.repeat(tn, x.size)[idx]
    #These are coords
    pts = np.vstack((X, Y, T)).T

    #Step size in days
    #ti_dt = 91.3125
    #ti_dt = 121.75
    ti_dt = 365.25

    #Set min and max times for interpolation
    #ti = np.arange(t.min(), t.max(), ti_dt)
    ti_min = timelib.dt2o(datetime(2008, 1, 1))
    ti_max = timelib.dt2o(datetime(2015, 1, 1))

    #Interpolate at these times
    ti = np.arange(ti_min, ti_max, ti_dt)
    #Annual
    #ti = timelib.dt2o([datetime(2008,1,1), datetime(2009,1,1), datetime(2010,1,1), datetime(2011,1,1), datetime(2012,1,1), datetime(2013,1,1), datetime(2014,1,1), datetime(2015,1,1)])

    tin = rangenorm(ti, t_offset, t_scale)
    """
    #Never got this working efficiently, but preserved for reference
    #Radial basis function interpolation
    #Need to normalize to input cube  
    print "Running Rbf interpolation for %i points" % X.size
    rbfi = scipy.interpolate.Rbf(Xn,Yn,Tn,VM, function='linear', smooth=0.1)
    #rbfi = scipy.interpolate.Rbf(Xn,Yn,Tn,VM, function='gaussian', smooth=0.000001)
    #rbfi = scipy.interpolate.Rbf(Xn,Yn,Tn,VM, function='inverse', smooth=0.00001)
    print "Sampling result at %i points" % xin.size
    vmi_rbf = rbfi(xin, yin, tin.repeat(x.size))
    vmi_rbf_ma[:,y,x] = np.ma.fix_invalid(vmi_rbf.reshape((ti.size, x.shape[0])))
    """

    #Attempt to load cached interpolation function
    int_fn = '%s_LinearNDint_%i_%i.pck' % (os.path.splitext(stack_fn)[0],
                                           test.shape[1], test.shape[2])
    print(int_fn)
    if load_existing and os.path.exists(int_fn):
        print("Loading pickled interpolation function: %s" % int_fn)
        f = open(int_fn, 'rb')
        linNDint = pickle.load(f)
    else:
        #NearestND interpolation (fast)
        #print "Running NearestND interpolation for %i points" % X.size
        #NearNDint = scipy.interpolate.NearestNDInterpolator(pts, VM, rescale=True)
        #LinearND interpolation
        print("Running LinearND interpolation for %i points" % X.size)
        #Note: this breaks qhull for lots of input points
        linNDint = scipy.interpolate.LinearNDInterpolator(pts,
                                                          VM,
                                                          rescale=False)
        print("Saving pickled interpolation function: %s" % int_fn)
        f = open(int_fn, 'wb')
        pickle.dump(linNDint, f, protocol=2)
        f.close()

    vmi_fn = '%s_%iday.npy' % (os.path.splitext(int_fn)[0], ti_dt)
    if load_existing and os.path.exists(vmi_fn):
        print('Loading existing interpolated stack: %s' % vmi_fn)
        vmi_ma = np.ma.fix_invalid(np.load(vmi_fn)['arr_0'])
    else:
        #Once tesselation is complete, sample each timestep in parallel
        print("Sampling %i points at %i timesteps, %i total" %
              (x.size, ti.size, x.size * ti.size))
        #Prepare array to hold output
        vmi_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
        """
        #This does all points at once
        #vmi = linNDint(ptsi)
        #vmi_ma[:,y,x] = np.ma.fix_invalid(vmi.reshape((ti.size, x.shape[0])))
        #This does interpolation serially by timestep
        for n, i in enumerate(ti):
            print n, i, timelib.o2dt(i)
            vmi_ma[n,y,x] = linNDint(x, y, i.repeat(x.size)).T
        """
        #Parallel processing
        pool = mp.Pool(processes=None)
        results = [
            pool.apply_async(dto_interp, args=(linNDint, xn, yn, i))
            for i in tin
        ]
        results = [p.get() for p in results]
        results.sort()
        for n, r in enumerate(results):
            t_rescale = r[0] * t_scale + t_offset
            print(n, t_rescale, timelib.o2dt(t_rescale))
            vmi_ma[n, y, x] = r[1]

        vmi_ma = np.ma.fix_invalid(vmi_ma)
        print('Saving interpolated stack: %s' % vmi_fn)
        np.save(vmi_fn, vmi_ma.filled(np.nan))

    origt = False
    if origt:
        print("Sampling %i points at %i original timesteps" % (x.size, t.size))
        vmi_ma_origt = np.ma.masked_all((t.size, test.shape[1], test.shape[2]))
        #Parallel
        pool = mp.Pool(processes=None)
        results = [
            pool.apply_async(dto_interp, args=(linNDint, x, y, i)) for i in t
        ]
        results = [p.get() for p in results]
        results.sort()
        for n, r in enumerate(results):
            print(n, r[0], timelib.o2dt(r[0]))
            vmi_ma_origt[n, y, x] = r[1]
        vmi_ma_origt = np.ma.fix_invalid(vmi_ma_origt)
        #print 'Saving interpolated stack: %s' % vmi_fn
        #np.save(vmi_fn, vmi_ma.filled(np.nan))

    #Write out a proper stack, for use by stack_melt and flux gate mass budget
    if True:
        out_stack = deepcopy(stack)
        out_stack.stats = False
        out_stack.trend = False
        out_stack.datestack = False
        out_stack.write_stats = False
        out_stack.write_trend = False
        out_stack.write_datestack = False
        out_stack.ma_stack = vmi_ma
        out_stack.stack_fn = os.path.splitext(vmi_fn)[0] + '.npz'
        out_stack.date_list_o = np.ma.array(ti)
        out_stack.date_list = np.ma.array(timelib.o2dt(ti))
        out_fn_list = [
            timelib.print_dt(i) + '_LinearNDint.tif'
            for i in out_stack.date_list
        ]
        out_stack.fn_list = out_fn_list
        out_stack.error = np.zeros_like(out_stack.date_list_o)
        out_stack.source = np.repeat('LinearNDint', ti.size)
        out_stack.gt = gt
        out_stack.res = res
        out_stack.savestack()

    sys.exit()
    """
    #Other interpolation methods
    #vmi = scipy.interpolate.griddata(pts, VM, ptsi, method='linear', rescale=True)

    #Kriging
    #Should explore this more - likely the best option
    #http://connor-johnson.com/2014/03/20/simple-kriging-in-python/
    #http://resources.esri.com/help/9.3/arcgisengine/java/gp_toolref/geoprocessing_with_3d_analyst/using_kriging_in_3d_analyst.htm

    #PyKrige does moving window Kriging, but only in 2D
    #https://github.com/bsmurphy/PyKrige/pull/5

    #Could do tiled kriging with overlap in parallel
    #Split along x and y direction, preserve all t
    #Need to generate semivariogram globally though, then pass to each tile
    #See malib sliding_window
    wx = wy = 30
    wz = test.shape[0]
    overlap = 0.5
    dwx = dwy = int(overlap*wx)
    gp_slices = malib.nanfill(test, malib.sliding_window, ws=(wz,wy,wx), ss=(0,dwy,dwx))

    vmi_gp_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
    vmi_gp_mse_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))

    out = []
    for i in gp_slices:
        y, x = (i.count(axis=0) > 0).nonzero()
        x = x.astype(int)
        y = y.astype(int)
        vm_t = test[:,y,x]
        vm_t_flat = vm_t.ravel()
        idx = ~np.ma.getmaskarray(vm_t_flat)
        #These are values
        VM = vm_t_flat[idx]

        #These are coords
        X = np.tile(x, t.size)[idx]
        Y = np.tile(y, t.size)[idx]
        T = np.repeat(t, x.size)[idx]
        pts = np.vstack((X,Y,T)).T

        xi = np.tile(x, ti.size)
        yi = np.tile(y, ti.size)
        ptsi = np.array((xi, yi, ti.repeat(x.size))).T

        #gp = GaussianProcess(regr='linear', verbose=True, normalize=True, theta0=0.1, nugget=2)
        gp = GaussianProcess(regr='linear', verbose=True, normalize=True, nugget=2)
        gp.fit(pts, VM)
        vmi_gp, vmi_gp_mse = gp.predict(ptsi, eval_MSE=True)
        vmi_gp_ma = np.ma.masked_all((ti.size, i.shape[1], i.shape[2]))
        vmi_gp_ma[:,y,x] = np.array(vmi_gp.reshape((ti.size, x.shape[0])))
        vmi_gp_mse_ma = np.ma.masked_all((ti.size, i.shape[1], i.shape[2]))
        vmi_gp_mse_ma[:,y,x] = np.array(vmi_gp_mse.reshape((ti.size, x.shape[0])))
        out.append(vmi_gp_ma)
    #Now combine intelligently

    print "Gaussian Process regression"
    pts2d_vm = vm_t[1]
    pts2d = np.vstack((x,y))[~(np.ma.getmaskarray(pts2d_vm))].T
    pts2di = np.vstack((x,y)).T
    gp = GaussianProcess(regr='linear', verbose=True, normalize=True, theta0=0.1, nugget=1)
    gp.fit(pts, VM)
    print "Gaussian Process prediction"
    vmi_gp, vmi_gp_mse = gp.predict(ptsi, eval_MSE=True)
    print "Converting to stack"
    vmi_gp_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
    vmi_gp_ma[:,y,x] = np.array(vmi_gp.reshape((ti.size, x.shape[0])))
    vmi_gp_mse_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
    vmi_gp_mse_ma[:,y,x] = np.array(vmi_gp_mse.reshape((ti.size, x.shape[0])))
    sigma = np.sqrt(vmi_gp_mse_ma)
    """
    """