コード例 #1
0
def raster_shpclip(r_fn, shp_fn, extent='raster'):
    r_ds = iolib.fn_getds(r_fn)
    r_srs = geolib.get_ds_srs(r_ds)
    r_extent = geolib.ds_extent(r_ds)

    shp_ds = ogr.Open(shp_fn)
    lyr = shp_ds.GetLayer()
    shp_srs = lyr.GetSpatialRef()
    shp_extent = lyr.GetExtent()

    #Define the output - can set to either raster or shp
    #Accept as cl arg
    out_srs = r_srs

    if extent == 'raster':
        out_extent = r_extent 
    elif extent == 'shp':
        out_extent = shp_extent

    #r = iolib.ds_getma(r_ds)
    r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic')
    r = iolib.ds_getma(r_ds)

    mask = geolib.shp2array(shp_fn, r_ds)

    r = np.ma.array(r, mask=mask)
    return r
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(description="Utility to compute hypsometry for input DEM")
    parser.add_argument('-mask_fn', type=str, default=None, help='Glacier Polygon filename (mask.shp)')
    parser.add_argument('-bin_width', type=float, default=100.0, help='Elevation bin with (default: %(default)s)')
    parser.add_argument('dem_fn', type=str, help='Input DEM filename')
    args = parser.parse_args()

    #Input DEM
    dem_fn = args.dem_fn
    #Extract GDAL dataset from input dem_fn
    dem_ds = iolib.fn_getds(dem_fn)
    #Extract NumPy masked array from dem_ds
    print("Loading input DEM: %s" % args.dem_fn)
    dem = iolib.ds_getma(dem_ds)
    #Fill dem?
    #Extract DEM resolution (m)
    dem_res = geolib.get_res(dem_ds, square=True)[0]

    #Generate glacier mask from shp 
    if args.mask_fn is not None:
        print("Masking input DEM using: %s" % args.mask_fn)
        #This calls gdal_rasterize with parameters of dem_ds
        mask = geolib.shp2array(args.mask_fn, r_ds=dem_ds)
        #Apply mask to DEM
        dem = np.ma.array(dem, mask=mask)

    #Generate aed 
    print("Generating AED")
    bin_centers, bin_areas = aed(dem, dem_res, args.bin_width) 
    #Write out to csv
    csv_fn = os.path.splitext(dem_fn)[0]+'_aed.csv'
    write_aed(bin_centers, bin_areas, csv_fn)
    #Generate plot
    plot_dem_aed(dem, bin_centers, bin_areas)
コード例 #3
0
ファイル: dem_mask.py プロジェクト: dshean/demcoreg
def get_icemask(ds, glac_shp_fn=None):
    """Generate glacier polygon raster mask for input Dataset res/extent
    """
    print("Masking glaciers")
    if glac_shp_fn is None:
        glac_shp_fn = get_glacier_poly()

    if not os.path.exists(glac_shp_fn):
        print("Unable to locate glacier shp: %s" % glac_shp_fn)
    else:
        print("Found glacier shp: %s" % glac_shp_fn)

    #All of the proj, extent, handling should now occur in shp2array
    icemask = geolib.shp2array(glac_shp_fn, ds)
    return icemask
コード例 #4
0
def get_icemask(ds, glac_shp_fn=None, erode=False):
    """Generate glacier polygon raster mask for input Dataset res/extent
    """
    print("Masking glaciers")
    if glac_shp_fn is None:
        glac_shp_fn = get_glacier_poly()

    if not os.path.exists(glac_shp_fn):
        print("Unable to locate glacier shp: %s" % glac_shp_fn)
    else:
        print("Found glacier shp: %s" % glac_shp_fn)

    #All of the proj, extent, handling should now occur in shp2array
    icemask = geolib.shp2array(glac_shp_fn, ds)
    if erode is True:
    	n_iter = 33
    	icemask = ndimage.morphology.binary_erosion(icemask, iterations=n_iter)
    	# icemask = ndimage.morphology.binary_erosion(icemask, iterations=n_iter)
    return icemask
コード例 #5
0
titles = ['2007 to 2009 (%0.1f yr)' % dt_list[0]]
plot_panels(1, dh_list, (-30, 30), titles, 'RdBu', 'Elevation Change (m)', fn='dem_dh.png')

#Calculate annual rate of change
dhdt_list = np.ma.array(dh_list)/np.array(dt_list)[:,np.newaxis,np.newaxis]
plot_panels(1,dhdt_list, (-3, 3), titles, 'RdBu', 'Elevation Change Rate (m/yr)', fn='dem_dhdt.png')

sns.distplot(dhdt_list.compressed())

# Remove glacier areas by gettin a shapefile with glaciers, creating a mask from it, and inverting the mask

# +
#Let's clip our map to the glaciers using polygons from the Randolph Glacier Inventory (RGI)
shp_fn = '/Users/elischwat/data/rgi60/regions/02_rgi60_WesternCanadaUS.shp'
#Create binary mask from polygon shapefile to match our warped raster datasets
shp_mask = geolib.shp2array(shp_fn, ds_list[0])

# REVERSE THE MASK because we want to focus on NOT glaciers
shp_mask = ~shp_mask

#Now apply the mask to each array 
dhdt_list_shpclip = [np.ma.array(dhdt, mask=shp_mask) for dhdt in dhdt_list]
plot_panels(1, dhdt_list_shpclip, (-3, 3), titles, 'RdBu', 'Elevation Change Rate (m/yr)', fn='dem_dhdt_shpclip.png')
# -

# To integrate the changes over all pixels, which are 1 x 1 m, 
# simply sum the data to estimate the net volumetric change.
#
# In m^3:

dhdt_list_shpclip[0].compressed().sum()
コード例 #6
0
ファイル: disp2v.py プロジェクト: dshean/vmap
def main():
    parser = getparser()
    args = parser.parse_args()

    t_unit = args.dt
    plot = args.plot
    remove_offsets = args.remove_offsets
    mask_fn = args.mask_fn
    if mask_fn is not None:
        remove_offsets = True

    #Input is 3-band disparity map, extract bands directly
    src_fn = args.disp_fn
    if not iolib.fn_check(src_fn):
        sys.exit("Unable to locate input file: %s" % src_fn)

    src_ds = iolib.fn_getds(src_fn)
    if src_ds.RasterCount != 3:
        sys.exit("Input file must be ASP disparity map (3 bands: x, y, mask)")
    #Extract pixel resolution
    h_res, v_res = geolib.get_res(src_ds)

    #Horizontal scale factor
    #If running on disparity_view output (gdal_translate -outsize 5% 5% F.tif F_5.tif)
    #h_res /= 20
    #v_res /= 20

    #Load horizontal and vertical disparities
    h = iolib.ds_getma(src_ds, bnum=1)
    v = iolib.ds_getma(src_ds, bnum=2)

    #ASP output has northward motion as negative values in band 2
    v *= -1

    t1, t2 = timelib.fn_getdatetime_list(src_fn)
    dt = t2 - t1
    #Default t_factor is in 1/years
    t_factor = timelib.get_t_factor(t1, t2)

    #Input timestamp arrays if inputs are mosaics
    if False:
        t1_fn = ''
        t2_fn = ''
        if os.path.exists(t1_fn) and os.path.exists(t2_fn):
            t_factor = timelib.get_t_factor_fn(t1_fn, t2_fn)
        if t_factor is None:
            sys.exit("Unable to determine input timestamps")

    if t_unit == 'day':
        t_factor *= 365.25

    print("Input dates:")
    print(t1)
    print(t2)
    print(dt)
    print(t_factor, t_unit)

    #Scale values for polar stereographic distortion
    srs = geolib.get_ds_srs(src_ds)
    proj_scale_factor = 1.0
    #Want to scale to get correct distances for polar sterographic
    if srs.IsSame(geolib.nps_srs) or srs.IsSame(geolib.sps_srs):
        proj_scale_factor = geolib.scale_ps_ds(src_ds)

    #Convert disparity values in pixels to m/t_unit
    h_myr = h * h_res * proj_scale_factor / t_factor
    h = None
    v_myr = v * v_res * proj_scale_factor / t_factor
    v = None

    #Velocity Magnitude
    m = np.ma.sqrt(h_myr**2 + v_myr**2)
    print("Velocity Magnitude stats")
    malib.print_stats(m)

    #Remove x and y offsets over control surfaces
    offset_str = ''
    if remove_offsets:
        if mask_fn is None:
            from demcoreg.dem_mask import get_mask
            print(
                "\nUsing demcoreg to prepare mask of stable control surfaces\n"
            )
            #TODO: Accept mask_list as in demcoreg
            #mask_list = args.mask_list
            # for now keep it simple, limit to non-glacier surfaces
            mask_list = [
                'glaciers',
            ]
            mask = get_mask(src_ds, mask_list=mask_list, dem_fn=src_fn)
        else:
            print("\nWarping input raster mask")
            #This can be from previous dem_mask.py run (e.g. *rockmask.tif)
            mask_ds = warplib.memwarp_multi_fn([
                mask_fn,
            ],
                                               res=src_ds,
                                               extent=src_ds,
                                               t_srs=src_ds)[0]
            mask = iolib.ds_getma(mask_ds)
            #The default from ds_getma is a masked array, so need to isolate boolean mask
            #Assume input is 0 for masked, 1 for unmasked (valid control surface)
            mask = mask.filled().astype('bool')
            #This should work, as the *rockmask.py is 1 for unmasked, 0 for masked, with ndv=0
            #mask = np.ma.getmaskarray(mask)
            #Vector mask - untested
            if os.path.splitext(mask_fn)[1] == 'shp':
                mask = geolib.shp2array(mask_fn, src_ds)

        print("\nRemoving median x and y offset over static control surfaces")
        h_myr_count = h_myr.count()
        h_myr_static_count = np.ma.array(h_myr, mask=mask).count()
        h_myr_mad, h_myr_med = malib.mad(np.ma.array(h_myr, mask=mask),
                                         return_med=True)
        v_myr_mad, v_myr_med = malib.mad(np.ma.array(v_myr, mask=mask),
                                         return_med=True)

        print("Static pixel count: %i (%0.1f%%)" %
              (h_myr_static_count,
               100 * float(h_myr_static_count) / h_myr_count))
        print("median (+/-NMAD)")
        print("x velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (h_myr_med, h_myr_mad, t_unit))
        print("y velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (v_myr_med, v_myr_mad, t_unit))
        h_myr -= h_myr_med
        v_myr -= v_myr_med
        offset_str = '_offsetcorr_h%0.2f_v%0.2f' % (h_myr_med, v_myr_med)
        #Velocity Magnitude
        m = np.ma.sqrt(h_myr**2 + v_myr**2)
        print("Velocity Magnitude stats after correction")
        malib.print_stats(m)

    if plot:
        fig_fn = os.path.splitext(src_fn)[0] + '.png'
        label = 'Velocity (m/%s)' % t_unit
        f, ax = make_plot(m, fig_fn, label)
        plotvec(h_myr, v_myr)
        plt.tight_layout()
        plt.savefig(fig_fn,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0,
                    edgecolor='none')

    print("Writing out files")
    gt = src_ds.GetGeoTransform()
    proj = src_ds.GetProjection()
    dst_fn = os.path.splitext(src_fn)[0] + '_vm%s.tif' % offset_str
    iolib.writeGTiff(m, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vx%s.tif' % offset_str
    iolib.writeGTiff(h_myr, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vy%s.tif' % offset_str
    iolib.writeGTiff(v_myr, dst_fn, create=True, gt=gt, proj=proj)
    src_ds = None
コード例 #7
0
ファイル: ndinterp.py プロジェクト: whigg/iceflow
def main():

    if len(sys.argv) < 2:
        sys.exit("Usage: %s stack.npz [mask.tif]" %
                 os.path.basename(sys.argv[0]))

    #This will attempt to load cached files on disk
    load_existing = False
    #Limit spatial interpolation to input mask
    clip_to_mask = True

    #This expects a DEMStack object, see pygeotools/lib/malib.py or pygeotools/make_stack.py
    stack_fn = sys.argv[1]
    #Expects shp polygon as valid mask, in same projection as input raster
    mask_fn = sys.argv[2]

    stack = malib.DEMStack(stack_fn=stack_fn,
                           save=False,
                           trend=True,
                           med=True,
                           stats=True)
    #Get times of original obs
    t = stack.date_list_o.data
    t = t.astype(int)
    t[0] -= 0.1
    t[-1] += 0.1

    if clip_to_mask:
        m = geolib.shp2array(mask_fn, res=stack.res, extent=stack.extent)
        #Expand mask - hardcoded to 6 km
        import scipy.ndimage
        it = int(np.ceil(6000. / stack.res))
        m = ~(scipy.ndimage.morphology.binary_dilation(~m, iterations=it))
        apply_mask(stack.ma_stack, m)

    #This is used frome here on out
    test = stack.ma_stack
    test_ptp = stack.dt_stack_ptp
    test_source = np.array(stack.source)
    res = stack.res
    gt = np.copy(stack.gt)

    #Probably don't need rull-res stack
    if True:
        stride = 2
        test = test[:, ::stride, ::stride]
        test_ptp = test_ptp[::stride, ::stride]
        res *= stride
        print("Using a stride of %i (%0.1f m)" % (stride, res))
        gt[[1, 5]] *= stride

    print("Orig shape: ", test.shape)
    #Check to make sure all t have valid data
    tcount = test.reshape(test.shape[0],
                          test.shape[1] * test.shape[2]).count(axis=1)
    validt_idx = (tcount > 0).nonzero()[0]
    test = test[validt_idx]
    test_source = test_source[validt_idx]
    t = t[validt_idx]
    print("New shape: ", test.shape)

    y, x = (test.count(axis=0) > 1).nonzero()
    x = x.astype(int)
    y = y.astype(int)
    #vm_t = test.reshape(test.shape[0], test.shape[1]*test.shape[2])
    vm_t = test[:, y, x]
    vm_t_flat = vm_t.ravel()
    idx = ~np.ma.getmaskarray(vm_t_flat)
    #These are values
    VM = vm_t_flat[idx]

    #Determine scaling factors for x and y coords
    #Should be the same for both
    xy_scale = max(x.ptp(), y.ptp())
    xy_offset = min(x.min(), y.min())

    #This scales t to encourage interpolation along the time axis rather than spatial axis
    t_factor = 16.
    t_scale = t.ptp() * t_factor
    t_offset = t.min()

    xn = rangenorm(x, xy_offset, xy_scale)
    yn = rangenorm(y, xy_offset, xy_scale)
    tn = rangenorm(t, t_offset, t_scale)

    X = np.tile(xn, t.size)[idx]
    Y = np.tile(yn, t.size)[idx]
    T = np.repeat(tn, x.size)[idx]
    #These are coords
    pts = np.vstack((X, Y, T)).T

    #Step size in days
    #ti_dt = 91.3125
    #ti_dt = 121.75
    ti_dt = 365.25

    #Set min and max times for interpolation
    #ti = np.arange(t.min(), t.max(), ti_dt)
    ti_min = timelib.dt2o(datetime(2008, 1, 1))
    ti_max = timelib.dt2o(datetime(2015, 1, 1))

    #Interpolate at these times
    ti = np.arange(ti_min, ti_max, ti_dt)
    #Annual
    #ti = timelib.dt2o([datetime(2008,1,1), datetime(2009,1,1), datetime(2010,1,1), datetime(2011,1,1), datetime(2012,1,1), datetime(2013,1,1), datetime(2014,1,1), datetime(2015,1,1)])

    tin = rangenorm(ti, t_offset, t_scale)
    """
    #Never got this working efficiently, but preserved for reference
    #Radial basis function interpolation
    #Need to normalize to input cube  
    print "Running Rbf interpolation for %i points" % X.size
    rbfi = scipy.interpolate.Rbf(Xn,Yn,Tn,VM, function='linear', smooth=0.1)
    #rbfi = scipy.interpolate.Rbf(Xn,Yn,Tn,VM, function='gaussian', smooth=0.000001)
    #rbfi = scipy.interpolate.Rbf(Xn,Yn,Tn,VM, function='inverse', smooth=0.00001)
    print "Sampling result at %i points" % xin.size
    vmi_rbf = rbfi(xin, yin, tin.repeat(x.size))
    vmi_rbf_ma[:,y,x] = np.ma.fix_invalid(vmi_rbf.reshape((ti.size, x.shape[0])))
    """

    #Attempt to load cached interpolation function
    int_fn = '%s_LinearNDint_%i_%i.pck' % (os.path.splitext(stack_fn)[0],
                                           test.shape[1], test.shape[2])
    print(int_fn)
    if load_existing and os.path.exists(int_fn):
        print("Loading pickled interpolation function: %s" % int_fn)
        f = open(int_fn, 'rb')
        linNDint = pickle.load(f)
    else:
        #NearestND interpolation (fast)
        #print "Running NearestND interpolation for %i points" % X.size
        #NearNDint = scipy.interpolate.NearestNDInterpolator(pts, VM, rescale=True)
        #LinearND interpolation
        print("Running LinearND interpolation for %i points" % X.size)
        #Note: this breaks qhull for lots of input points
        linNDint = scipy.interpolate.LinearNDInterpolator(pts,
                                                          VM,
                                                          rescale=False)
        print("Saving pickled interpolation function: %s" % int_fn)
        f = open(int_fn, 'wb')
        pickle.dump(linNDint, f, protocol=2)
        f.close()

    vmi_fn = '%s_%iday.npy' % (os.path.splitext(int_fn)[0], ti_dt)
    if load_existing and os.path.exists(vmi_fn):
        print('Loading existing interpolated stack: %s' % vmi_fn)
        vmi_ma = np.ma.fix_invalid(np.load(vmi_fn)['arr_0'])
    else:
        #Once tesselation is complete, sample each timestep in parallel
        print("Sampling %i points at %i timesteps, %i total" %
              (x.size, ti.size, x.size * ti.size))
        #Prepare array to hold output
        vmi_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
        """
        #This does all points at once
        #vmi = linNDint(ptsi)
        #vmi_ma[:,y,x] = np.ma.fix_invalid(vmi.reshape((ti.size, x.shape[0])))
        #This does interpolation serially by timestep
        for n, i in enumerate(ti):
            print n, i, timelib.o2dt(i)
            vmi_ma[n,y,x] = linNDint(x, y, i.repeat(x.size)).T
        """
        #Parallel processing
        pool = mp.Pool(processes=None)
        results = [
            pool.apply_async(dto_interp, args=(linNDint, xn, yn, i))
            for i in tin
        ]
        results = [p.get() for p in results]
        results.sort()
        for n, r in enumerate(results):
            t_rescale = r[0] * t_scale + t_offset
            print(n, t_rescale, timelib.o2dt(t_rescale))
            vmi_ma[n, y, x] = r[1]

        vmi_ma = np.ma.fix_invalid(vmi_ma)
        print('Saving interpolated stack: %s' % vmi_fn)
        np.save(vmi_fn, vmi_ma.filled(np.nan))

    origt = False
    if origt:
        print("Sampling %i points at %i original timesteps" % (x.size, t.size))
        vmi_ma_origt = np.ma.masked_all((t.size, test.shape[1], test.shape[2]))
        #Parallel
        pool = mp.Pool(processes=None)
        results = [
            pool.apply_async(dto_interp, args=(linNDint, x, y, i)) for i in t
        ]
        results = [p.get() for p in results]
        results.sort()
        for n, r in enumerate(results):
            print(n, r[0], timelib.o2dt(r[0]))
            vmi_ma_origt[n, y, x] = r[1]
        vmi_ma_origt = np.ma.fix_invalid(vmi_ma_origt)
        #print 'Saving interpolated stack: %s' % vmi_fn
        #np.save(vmi_fn, vmi_ma.filled(np.nan))

    #Write out a proper stack, for use by stack_melt and flux gate mass budget
    if True:
        out_stack = deepcopy(stack)
        out_stack.stats = False
        out_stack.trend = False
        out_stack.datestack = False
        out_stack.write_stats = False
        out_stack.write_trend = False
        out_stack.write_datestack = False
        out_stack.ma_stack = vmi_ma
        out_stack.stack_fn = os.path.splitext(vmi_fn)[0] + '.npz'
        out_stack.date_list_o = np.ma.array(ti)
        out_stack.date_list = np.ma.array(timelib.o2dt(ti))
        out_fn_list = [
            timelib.print_dt(i) + '_LinearNDint.tif'
            for i in out_stack.date_list
        ]
        out_stack.fn_list = out_fn_list
        out_stack.error = np.zeros_like(out_stack.date_list_o)
        out_stack.source = np.repeat('LinearNDint', ti.size)
        out_stack.gt = gt
        out_stack.res = res
        out_stack.savestack()

    sys.exit()
    """
    #Other interpolation methods
    #vmi = scipy.interpolate.griddata(pts, VM, ptsi, method='linear', rescale=True)

    #Kriging
    #Should explore this more - likely the best option
    #http://connor-johnson.com/2014/03/20/simple-kriging-in-python/
    #http://resources.esri.com/help/9.3/arcgisengine/java/gp_toolref/geoprocessing_with_3d_analyst/using_kriging_in_3d_analyst.htm

    #PyKrige does moving window Kriging, but only in 2D
    #https://github.com/bsmurphy/PyKrige/pull/5

    #Could do tiled kriging with overlap in parallel
    #Split along x and y direction, preserve all t
    #Need to generate semivariogram globally though, then pass to each tile
    #See malib sliding_window
    wx = wy = 30
    wz = test.shape[0]
    overlap = 0.5
    dwx = dwy = int(overlap*wx)
    gp_slices = malib.nanfill(test, malib.sliding_window, ws=(wz,wy,wx), ss=(0,dwy,dwx))

    vmi_gp_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
    vmi_gp_mse_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))

    out = []
    for i in gp_slices:
        y, x = (i.count(axis=0) > 0).nonzero()
        x = x.astype(int)
        y = y.astype(int)
        vm_t = test[:,y,x]
        vm_t_flat = vm_t.ravel()
        idx = ~np.ma.getmaskarray(vm_t_flat)
        #These are values
        VM = vm_t_flat[idx]

        #These are coords
        X = np.tile(x, t.size)[idx]
        Y = np.tile(y, t.size)[idx]
        T = np.repeat(t, x.size)[idx]
        pts = np.vstack((X,Y,T)).T

        xi = np.tile(x, ti.size)
        yi = np.tile(y, ti.size)
        ptsi = np.array((xi, yi, ti.repeat(x.size))).T

        #gp = GaussianProcess(regr='linear', verbose=True, normalize=True, theta0=0.1, nugget=2)
        gp = GaussianProcess(regr='linear', verbose=True, normalize=True, nugget=2)
        gp.fit(pts, VM)
        vmi_gp, vmi_gp_mse = gp.predict(ptsi, eval_MSE=True)
        vmi_gp_ma = np.ma.masked_all((ti.size, i.shape[1], i.shape[2]))
        vmi_gp_ma[:,y,x] = np.array(vmi_gp.reshape((ti.size, x.shape[0])))
        vmi_gp_mse_ma = np.ma.masked_all((ti.size, i.shape[1], i.shape[2]))
        vmi_gp_mse_ma[:,y,x] = np.array(vmi_gp_mse.reshape((ti.size, x.shape[0])))
        out.append(vmi_gp_ma)
    #Now combine intelligently

    print "Gaussian Process regression"
    pts2d_vm = vm_t[1]
    pts2d = np.vstack((x,y))[~(np.ma.getmaskarray(pts2d_vm))].T
    pts2di = np.vstack((x,y)).T
    gp = GaussianProcess(regr='linear', verbose=True, normalize=True, theta0=0.1, nugget=1)
    gp.fit(pts, VM)
    print "Gaussian Process prediction"
    vmi_gp, vmi_gp_mse = gp.predict(ptsi, eval_MSE=True)
    print "Converting to stack"
    vmi_gp_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
    vmi_gp_ma[:,y,x] = np.array(vmi_gp.reshape((ti.size, x.shape[0])))
    vmi_gp_mse_ma = np.ma.masked_all((ti.size, test.shape[1], test.shape[2]))
    vmi_gp_mse_ma[:,y,x] = np.array(vmi_gp_mse.reshape((ti.size, x.shape[0])))
    sigma = np.sqrt(vmi_gp_mse_ma)
    """
    """