예제 #1
0
# Load velocity records
xvel_HG = np.arange(np.min(ximage_HG), np.max(ximage_HG), 100)
yvel_HG = np.arange(np.min(yimage_HG), np.max(yimage_HG), 100)
vx,vy = vellib.inversion_3D('Helheim',xvel_HG,yvel_HG,imagetime_HG,\
        dir_velocity_out='none',blur=False)
vel_HG = np.sqrt(vx**2 + vy**2)

xvel_KG = np.arange(np.min(ximage_KG), np.max(ximage_KG), 100)
yvel_KG = np.arange(np.min(yimage_KG), np.max(yimage_KG), 100)
vx,vy = vellib.inversion_3D('Kanger',xvel_KG,yvel_KG,imagetime_KG,\
        dir_velocity_out='none',blur=False)
vel_KG = np.sqrt(vx**2 + vy**2)
del vx, vy

# Load masks
xmask,ymask,mask = masklib.load_grid('Helheim',np.min(xvel_HG),np.max(xvel_HG),\
        np.min(yvel_HG),np.max(yvel_HG),100,icefront_time=imagetime_HG)
mask[:, xmask > 277000 + 38e3] = 1
vel_masked_HG = np.ma.masked_array(vel_HG, mask)

xmask,ymask,mask = masklib.load_grid('Kanger',np.min(xvel_KG),np.max(xvel_KG),\
        np.min(yvel_KG),np.max(yvel_KG),100,icefront_time=imagetime_KG)
mask[:, xmask > 462000 + 38e3] = 1
vel_masked_KG = np.ma.masked_array(vel_KG, mask)
del vel_KG, vel_HG, xmask, ymask, mask

# Load min velocities for showing region where velocity is above cutoff
x_cutoff_KG, y_cutoff_KG, vsurfini_cutoff_KG, ind_cutoff_grid, ind_cutoff  = inverselib.get_velocity_cutoff('Kanger',\
        velocity_cutoff=velocity_cutoff,model_dir='INV_SSA_ModelT')
x_cutoff_HG, y_cutoff_HG, vsurfini_cutoff_HG, ind_cutoff_grid, ind_cutoff  = inverselib.get_velocity_cutoff('Helheim',\
        velocity_cutoff=velocity_cutoff,model_dir='INV_SSA_ModelT')
예제 #2
0
dhdx = np.zeros([len(y), len(x), len(timedem)])
dhdy = np.zeros([len(y), len(x), len(timedem)])
H = np.zeros([len(y), len(x), len(timedem)])
taud = np.zeros([len(y), len(x), len(timedem)])
taud_u = np.zeros([len(y), len(x), len(timedem)])
H[:, :, :] = float('nan')
taud[:, :, :] = float('nan')
taud_u[:, :, ] = float('nan')
dhdx[:, :, :] = float('nan')
dhdy[:, :, :] = float('nan')
u, v = vellib.inversion_3D(glacier, x, y, timedem[-1])
for k in range(0, len(timedem)):
    xmask, ymask, zmask = masklib.load_grid(glacier,
                                            x[0],
                                            x[-1],
                                            y[0],
                                            y[-1],
                                            y[1] - y[0],
                                            icefront_time=timedem[k],
                                            ice=1)

    for i in range(1, len(x) - 1):
        for j in range(1, len(y) - 1):
            if (zmask[j, i] == 1) and (zs[j, i, k] > zbed_int[j, i]):
                dhdx[j, i, k] = (zs[j, i + 1, k] -
                                 zs[j, i - 1, k]) / (x[i + 1] - x[i - 1])
                dhdy[j, i, k] = (zs[j + 1, i, k] -
                                 zs[j - 1, i, k]) / (y[j + 1] - y[j - 1])
                H[j, i, k] = zs[j, i, k] - zbed_int[j, i]
                taud[j, i,
                     k] = rho_i * g * H[j, i, k] * np.sqrt((dhdx[j, i, k])**2 +
                                                           (dhdy[j, i, k])**2)
예제 #3
0
    xvel = np.arange(np.min(ximage), np.max(ximage), 100)
    yvel = np.arange(np.min(yimage), np.max(yimage), 100)
    vx, vy = vellib.inversion_3D(glacier,
                                 xvel,
                                 yvel,
                                 imagetime,
                                 dir_velocity_out='none',
                                 blur=False)
    vel = np.sqrt(vx**2 + vy**2)
    del vx, vy

    # Load mask
    xmask, ymask, mask = masklib.load_grid(
        glacier,
        np.min(xvel),
        np.max(xvel),
        np.min(yvel),
        np.max(yvel),
        100,
        icefront_time=datelib.date_to_fracyear(2014, 7, 4))
    vel_masked = np.ma.masked_array(vel, mask)

    fig = plt.figure(figsize=(2.5, 2.5))

    cx = cubehelix.cmap(start=1.2, rot=-1.1, reverse=True, minLight=0.1, sat=2)
    p = plt.imshow(
        vel_masked / 1e3,
        extent=[np.min(xvel),
                np.max(xvel),
                np.min(yvel),
                np.max(yvel)],
        origin='lower',
예제 #4
0
파일: vellib.py 프로젝트: tarynblack/big3
def variability(glacier, time1, time2):

    ''
    ''

    DIR_TSX = os.path.join(os.getenv("DATA_HOME"),
                           "Velocity/TSX/" + glacier + "/")

    if glacier == 'Helheim':
        xmin = 270000.0
        xmax = 354900.0
        ymin = -2601000.0
        ymax = -2541000.0
    elif glacier == 'Kanger':
        xmin = 457000.0
        xmax = 517000.0
        ymin = -2319100.0
        ymax = -2247100.0

    dx = dy = 100.
    nx = int(np.ceil((xmax - xmin) / dx) + 1)
    x = np.linspace(xmin, (nx - 1) * dx + xmin, nx)
    ny = int(np.ceil((ymax - ymin) / dx) + 1)
    y = np.linspace(ymin, (ny - 1) * dy + ymin, ny)
    xgrid, ygrid = np.meshgrid(x, y)
    coords = np.column_stack([ygrid.flatten(), xgrid.flatten()])

    #################
    # LOAD TSX Data #
    #################

    DIRs = os.listdir(DIR_TSX)

    # Get number of velocity files
    nt = 0
    for DIR in DIRs:
        if DIR.startswith('track'):
            nt = nt + 1

    # Set up variables
    velgrid = np.zeros([ny, nx, nt])
    mask = np.zeros([ny, nx, nt])
    velgrid_mask = np.zeros([ny, nx, nt])
    time = np.zeros(nt)
    ergrid = np.zeros([ny, nx, nt])

    # Load velocity and mask
    count = 0
    for j in range(0, len(DIRs)):
        DIR = DIRs[j]
        if DIR.startswith('track'):
            # Load velocity
            x1, y1, v1, vx1, vy1, ex1, ey1, time_file, interval1 = geodatlib.readvelocity(
                DIR_TSX, DIR, "mosaicOffsets")

            time[count] = time_file
            year, month, day = datelib.fracyear_to_date(time_file)

            xind1 = np.argmin(abs(x1 - xmin))
            xind2 = np.argmin(abs(x1 - xmax)) + 1
            yind1 = np.argmin(abs(y1 - ymin))
            yind2 = np.argmin(abs(y1 - ymax)) + 1

            # Load velocity
            try:
                # If the input and output grids have the same dimensions...
                velgrid[:, :, count] = v1[yind1:yind2, xind1:xind2]
            except:
                # Otherwise interpolate onto output grid
                f_dem = scipy.interpolate.RegularGridInterpolator(
                    [y1, x1],
                    v1,
                    bounds_error=False,
                    method='linear',
                    fill_value=float('nan'))
                v_flatten = f_dem(coords)

                # Reshape to grid
                velgrid[:, :, count] = np.reshape(v_flatten, (ny, nx))

            # Load mask
            date = "%04d%02d%02d" % (year, month, day)
            maskfile = DIR_TSX + 'TIF/' + DIR + '_' + date + '_' + 'mask.tif'
            if os.path.isfile(maskfile):
                xmask, ymask, mask[:, :, count] = geotifflib.read(maskfile)
            else:
                xmask, ymask, mask[:, :, count] = masklib.load_grid(
                    glacier, xmin, xmax, ymin, ymax, dx, icefront_time=time1)
                geotifflib.write_from_grid(xmask, ymask,
                                           np.flipud(mask[:, :, count]),
                                           float('nan'), maskfile)

            velgrid_mask[:, :, count] = np.array(velgrid[:, :, count])
            velgrid_mask[mask[:, :, count] == 1, count] = float('nan')

            count = count + 1

    del count, maskfile, date, xind1, yind1, xind2, yind2, year, month, x1, y1, vx1, vy1, ex1, ey1, time_file, interval1

    # Throw out obvious outliers
    ind = np.where(velgrid > 16.0e3)
    velgrid[ind[0], ind[1], ind[2]] = float('nan')
    velgrid_mask[ind[0], ind[1], ind[2]] = float('nan')
    print "Throwing out velocities above 16 km/yr to deal with outliers in Kanger record"

    # Only keep data that falls between time1 and time2, and sort that data by time
    sortind = np.argsort(time)
    time = time[sortind]
    velgrid_mask = velgrid_mask[:, :, sortind]
    velgrid = velgrid[:, :, sortind]

    ind = np.where((time > time1) & (time < time2))[0]
    velgrid_mask = velgrid_mask[:, :, ind]
    time = time[ind]
    velgrid = velgrid[:, :, ind]

    # Get average and std values
    velmean = np.nanmean(velgrid_mask, axis=2)

    # Get linear trends
    veltrend = np.zeros_like(velmean)
    veltrend_time1 = np.zeros_like(velmean)
    veltrend_time2 = np.zeros_like(velmean)
    veltrend_count = np.zeros_like(velmean)
    veltrend_p = np.zeros_like(velmean)
    veltrend_error = np.zeros_like(velmean)
    veltrend_r = np.zeros_like(velmean)
    veltrend_intercept = np.zeros_like(velmean)
    veltrend_p[:, :] = float('nan')
    veltrend[:, :] = float('nan')
    veltrend_error[:, :] = float('nan')
    veltrend_r[:, :] = float('nan')
    veltrend_intercept[:, :] = float('nan')
    for j in range(0, len(y)):
        for i in range(0, len(x)):
            nonnan = np.where((~(np.isnan(velgrid_mask[j, i, :]))))[0]
            if len(nonnan) > 0.75 * len(time):
                if (np.floor(np.min(time[nonnan])) == time1) and np.ceil(
                        np.max(time[nonnan])) == time2:
                    slope, intercept, r, p, std_err = stats.linregress(
                        time[nonnan], velgrid_mask[j, i, nonnan])
                    veltrend_count[j, i] = len(nonnan)
                    veltrend[j, i] = slope
                    veltrend_p[j, i] = p
                    veltrend_error[j, i] = std_err
                    veltrend_time1[j, i] = np.min(time[nonnan])
                    veltrend_time2[j, i] = np.max(time[nonnan])
                    veltrend_r[j, i] = r
                    veltrend_intercept[j, i] = intercept

    # Detrend velocity timeseries
    veldetrend = np.zeros_like(velgrid_mask)
    for i in range(0, len(time)):
        trend = veltrend_intercept + time[i] * veltrend
        veldetrend[:, :, i] = velgrid_mask[:, :, i] - trend

    # Calculate range of observed values
    velrange = np.zeros_like(velmean)
    velrange[:, :] = float('nan')
    for i in range(0, len(x)):
        for j in range(0, len(y)):
            nonnan = np.where(~(np.isnan(veldetrend[j, i, :])))[0]
            if len(nonnan) > 1:
                velrange[j, i] = np.max(veldetrend[j, i, nonnan]) - np.min(
                    veldetrend[j, i, nonnan])

    # Remove insignifcant trends
    ind = np.where(veltrend_p > 0.05)
    veltrend[ind] = float('nan')
    veltrend_error[ind] = float('nan')

    # Get number of nonnan velocities for each pixel
    velcount = np.zeros([ny, nx])
    for j in range(0, ny):
        for i in range(0, nx):
            nonnan = len(np.where(~(np.isnan(velgrid_mask[j, i, :])))[0])
            velcount[j, i] = nonnan

    sortind = np.argsort(time)
    velgrid_mask = velgrid_mask[:, :, sortind]
    time = time[sortind]

    return x, y, velgrid_mask, veltrend, veldetrend, velrange, velcount, veltrend_error, time