Esempio n. 1
0
def mad_fltr(dem, mad_sigma=2):
    """Median absolute deviation * factor filter
    """
    med = np.ma.median(dem)
    mad = malib.mad(dem)
    rangelim = (med - mad_sigma * mad, med + mad_sigma * mad)
    print('Excluding values outside of range defined by {0} mad sigma: {1:0.1f} to {2:0.1f} m'.format(mad_sigma, *rangelim))
    out = range_fltr(dem, rangelim)
    return out
Esempio n. 2
0
def mad_fltr(dem, n=3):
    """Median absolute deviation * factor filter

    Robust outlier removal
    """
    mad, med = malib.mad(dem, return_med=True)
    print('Excluding values outside of range: {1:0.3f} +/- {0}*{2:0.3f}'.format(n, med, mad))
    rangelim = (med - n*mad, med + n*mad)
    out = range_fltr(dem, rangelim)
    return out
def make_plot(x,y,yerr=None,c='k',ms=4,label=None,abs=False):
    y_mean = y.mean()
    y_std = y.std()
    y_med = np.ma.median(y)
    y_nmad = malib.mad(y)
    #plt.plot(x, y, label=label, color=c, marker='o', linestyle='None')
    plt.scatter(x, y, label=label, color=c, marker='o', s=ms)
    if yerr is not None:
        plt.errorbar(x, y, yerr=yerr, color=c, linestyle='None', elinewidth=0.5, capsize=np.sqrt(ms), alpha=0.5)
    plt.axhline(y_med, color=c, linestyle='--', alpha=0.5)
    plt.axhline(y_med + y_nmad, color=c, linestyle=':', alpha=0.5)
    plt.axhline(y_med - y_nmad, color=c, linestyle=':', alpha=0.5)
    plt.axhline(0, color='k', linewidth=0.5, linestyle='-', alpha=0.5)
    ax = plt.gca()
    plt.minorticks_on()
    #ax.tick_params(axis='y',which='minor',left='on')
    if abs:
        ax.set_ylim(bottom=0.0)
Esempio n. 4
0
        z1_bin_areas = z1_bin_counts * ds_res[0] * ds_res[1] / 1E6
        z2_bin_counts, z2_bin_edges = np.histogram(z2, bins=z_bin_edges)
        z2_bin_areas = z2_bin_counts * ds_res[0] * ds_res[1] / 1E6

        #dz_bin_edges, dz_bin_centers = get_bins(dz, 1.)
        #dz_bin_counts, dz_bin_edges = np.histogram(dz, bins=dz_bin_edges)
        #dz_bin_areas = dz_bin_counts * ds_res * ds_res / 1E6
        dz_bin_med = np.ma.masked_all_like(z1_bin_areas)
        dz_bin_mad = np.ma.masked_all_like(z1_bin_areas)
        idx = np.digitize(z1, z_bin_edges)
        for bin_n in range(z_bin_centers.size):
            dz_bin_samp = mb[(idx == bin_n + 1)]
            #dz_bin_samp = dhdt[(idx == n+1)]
            if dz_bin_samp.count() > 0:
                dz_bin_med[bin_n] = malib.fast_median(dz_bin_samp)
                dz_bin_mad[bin_n] = malib.mad(dz_bin_samp)
                dz_bin_med[bin_n] = dz_bin_samp.mean()
                dz_bin_mad[bin_n] = dz_bin_samp.std()

        print("Generating map plot")
        f, axa = plt.subplots(1, 3, figsize=(10, 7.5))
        f.suptitle(feat_fn)
        alpha = 1.0
        hs = True
        if hs:
            z1_hs = geolib.gdaldem_wrapper(out_z1_fn,
                                           product='hs',
                                           returnma=True)
            z2_hs = geolib.gdaldem_wrapper(out_z2_fn,
                                           product='hs',
                                           returnma=True)
Esempio n. 5
0
        plt.figure()
        ax = plt.gca()
        ax.set_xlabel('Mass balance (m we/yr)')
        ax.set_ylabel('Number of glaciers')
        hist_clim = (-1.0, 1.0)
        ax.set_xlim(*hist_clim)
        glac_df_mb['mb_mwea'].hist(range=hist_clim,
                                   bins=256,
                                   label='Before outlier filter')

    outlier_perc = (0.01, 0.99)
    #outlier_perc = (0.001, 0.999)
    #outlier_clim = (glac_df_mb['mb_mwea'].quantile(outlier_perc[0]), glac_df_mb['mb_mwea'].quantile(outlier_perc[1]))
    std_f = 3.0
    #outlier_clim = glac_df_mb['mb_mwea'].mean() - std_f*glac_df_mb['mb_mwea'].std()
    outlier_clim = glac_df_mb['mb_mwea'].median() - std_f * malib.mad(
        glac_df_mb['mb_mwea'].values)
    outlier_clim = (outlier_clim, -outlier_clim)
    print("Removing outliers (%0.2f, %0.2f)" % (outlier_clim))
    #inlier_idx = np.abs(glac_df_mb['mb_mwea'] - glac_df_mb['mb_mwea'].mean()) <= (3*glac_df_mb['mb_mwea'].std())
    inlier_idx = (glac_df_mb['mb_mwea'] >=
                  outlier_clim[0]) & (glac_df_mb['mb_mwea'] <= outlier_clim[1])

    print("%i records before outlier removal" % (glac_df_mb.shape[0]))
    glac_df_mb = glac_df_mb[inlier_idx]
    print("%i records after outlier removal" % (glac_df_mb.shape[0]))

    if plot:
        glac_df_mb['mb_mwea'].hist(range=hist_clim,
                                   bins=256,
                                   label='After outlier filter')
        ax.axvline(0, linewidth=0.5, color='k')
Esempio n. 6
0
def hist_plot(gf, outdir, bin_width=10.0):
    #print("Generating histograms")
    #Create bins for full range of input data and specified bin width

    #NOTE: these counts/areas are for valid pixels only
    #Not necessarily a true representation of actual glacier hypsometry
    #Need a void-filled DEM for this

    z_bin_edges, z_bin_centers = malib.get_bins(gf.z1, bin_width)
    z1_bin_counts, z1_bin_edges = np.histogram(gf.z1, bins=z_bin_edges)
    z1_bin_areas = z1_bin_counts * gf.res[0] * gf.res[1] / 1E6
    #RGI standard is integer thousandths of glaciers total area
    #Should check to make sure sum of bin areas equals total area
    z1_bin_areas_perc = 100. * z1_bin_areas / np.sum(z1_bin_areas)

    z2_bin_counts, z2_bin_edges = np.histogram(gf.z2, bins=z_bin_edges)
    z2_bin_areas = z2_bin_counts * gf.res[0] * gf.res[1] / 1E6
    z2_bin_areas_perc = 100. * z2_bin_areas / np.sum(z2_bin_areas)

    #Create arrays to store output
    mb_bin_med = np.ma.masked_all_like(z1_bin_areas)
    mb_bin_mad = np.ma.masked_all_like(z1_bin_areas)
    mb_bin_mean = np.ma.masked_all_like(z1_bin_areas)
    mb_bin_std = np.ma.masked_all_like(z1_bin_areas)
    dz_bin_med = np.ma.masked_all_like(z1_bin_areas)
    dz_bin_mad = np.ma.masked_all_like(z1_bin_areas)
    dz_bin_mean = np.ma.masked_all_like(z1_bin_areas)
    dz_bin_std = np.ma.masked_all_like(z1_bin_areas)
    if gf.debris_class is not None:
        perc_clean = np.ma.masked_all_like(z1_bin_areas)
        perc_debris = np.ma.masked_all_like(z1_bin_areas)
        perc_pond = np.ma.masked_all_like(z1_bin_areas)
        debris_thick_med = np.ma.masked_all_like(z1_bin_areas)
        debris_thick_mad = np.ma.masked_all_like(z1_bin_areas)

    #Loop through each bin and extract stats
    idx = np.digitize(gf.z1, z_bin_edges)
    for bin_n in range(z_bin_centers.size):
        mb_bin_samp = gf.mb[(idx == bin_n + 1)]
        if mb_bin_samp.count() > 0:
            mb_bin_med[bin_n] = malib.fast_median(mb_bin_samp)
            mb_bin_mad[bin_n] = malib.mad(mb_bin_samp)
            mb_bin_mean[bin_n] = mb_bin_samp.mean()
            mb_bin_std[bin_n] = mb_bin_samp.std()
        dz_bin_samp = gf.dhdt[(idx == bin_n + 1)]
        if dz_bin_samp.count() > 0:
            dz_bin_med[bin_n] = malib.fast_median(dz_bin_samp)
            dz_bin_mad[bin_n] = malib.mad(dz_bin_samp)
            dz_bin_mean[bin_n] = dz_bin_samp.mean()
            dz_bin_std[bin_n] = dz_bin_samp.std()
        if gf.debris_class is not None:
            debris_class_bin_samp = gf.debris_class[(idx == bin_n + 1)]
            if debris_class_bin_samp.count() > 0:
                perc_clean[bin_n] = 100. * (
                    debris_class_bin_samp
                    == 1).sum() / debris_class_bin_samp.count()
                perc_debris[bin_n] = 100. * (
                    debris_class_bin_samp
                    == 2).sum() / debris_class_bin_samp.count()
                perc_pond[bin_n] = 100. * (
                    debris_class_bin_samp
                    == 3).sum() / debris_class_bin_samp.count()

            debris_thick_bin_samp = gf.debris_thick[(idx == bin_n + 1)]
            debris_thick_med[bin_n] = malib.fast_median(debris_thick_bin_samp)
            debris_thick_mad[bin_n] = malib.mad(debris_thick_bin_samp)

    outbins_header = 'bin_center_elev_m, z1_bin_count_valid, z1_bin_area_valid_km2, z1_bin_area_perc, z2_bin_count_valid, z2_bin_area_valid_km2, z2_bin_area_perc, dhdt_bin_med_ma, dhdt_bin_mad_ma, dhdt_bin_mean_ma, dhdt_bin_std_ma, mb_bin_med_mwea, mb_bin_mad_mwea, mb_bin_mean_mwea, mb_bin_std_mwea'
    fmt = '%0.1f, %i, %0.3f, %0.2f, %i, %0.3f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f'
    outbins = [
        z_bin_centers, z1_bin_counts, z1_bin_areas, z1_bin_areas_perc,
        z2_bin_counts, z2_bin_areas, z2_bin_areas_perc, dz_bin_med, dz_bin_mad,
        dz_bin_mean, dz_bin_std, mb_bin_med, mb_bin_mad, mb_bin_mean,
        mb_bin_std
    ]

    if gf.debris_class is not None:
        outbins_header += ', debris_thick_med_m, debris_thick_mad_m, perc_debris, perc_pond, perc_clean'
        fmt += ', %0.2f, %0.2f, %0.2f, %0.2f, %0.2f'
        debris_thick_med[debris_thick_med == -(np.inf)] = 0.00
        debris_thick_mad[debris_thick_mad == -(np.inf)] = 0.00
        outbins.extend([
            debris_thick_med, debris_thick_mad, perc_debris, perc_pond,
            perc_clean
        ])

    #print(len(outbins), len(fmt.split(',')), len(outbins_header.split(',')))
    outbins = np.ma.array(outbins).T.astype('float32')
    np.ma.set_fill_value(outbins, -9999.0)
    outbins_fn = os.path.join(outdir, gf.feat_fn + '_mb_bins.csv')
    #print(outbins.shape)
    np.savetxt(outbins_fn,
               outbins,
               fmt=fmt,
               delimiter=',',
               header=outbins_header)

    #print("Generating aed plot")
    #f,axa = plt.subplots(1,2, figsize=(6, 6))
    f, axa = plt.subplots(1, 3, figsize=(10, 7.5))
    f.suptitle(gf.feat_fn)
    axa[0].plot(z1_bin_areas, z_bin_centers, label='%0.2f' % gf.t1)
    axa[0].plot(z2_bin_areas, z_bin_centers, label='%0.2f' % gf.t2)
    axa[0].axhline(gf.z1_ela, ls=':', c='C0')
    axa[0].axhline(gf.z2_ela, ls=':', c='C1')
    axa[0].legend(prop={'size': 8}, loc='upper right')
    axa[0].set_ylabel('Elevation (m WGS84)')
    axa[0].set_xlabel('Area $\mathregular{km^2}$')
    pltlib.minorticks_on(axa[0])
    axa[1].axvline(0, lw=1.0, c='k')
    axa[1].axvline(gf.mb_mean,
                   lw=0.5,
                   ls=':',
                   c='k',
                   label='%0.2f m w.e./yr' % gf.mb_mean)
    axa[1].legend(prop={'size': 8}, loc='upper right')
    axa[1].plot(mb_bin_med, z_bin_centers, color='k')
    axa[1].fill_betweenx(z_bin_centers,
                         mb_bin_med - mb_bin_mad,
                         mb_bin_med + mb_bin_mad,
                         color='k',
                         alpha=0.1)
    axa[1].fill_betweenx(z_bin_centers,
                         0,
                         mb_bin_med,
                         where=(mb_bin_med < 0),
                         color='r',
                         alpha=0.2)
    axa[1].fill_betweenx(z_bin_centers,
                         0,
                         mb_bin_med,
                         where=(mb_bin_med > 0),
                         color='b',
                         alpha=0.2)
    #axa[1].set_ylabel('Elevation (m WGS84)')
    #axa[1].set_xlabel('dh/dt (m/yr)')
    axa[1].set_xlabel('mb (m w.e./yr)')
    pltlib.minorticks_on(axa[1])
    #Hide y-axis labels
    axa[1].axes.yaxis.set_ticklabels([])
    #axa[1].set_xlim(-2.0, 2.0)
    #axa[1].set_xlim(-8.0, 8.0)
    axa[1].set_xlim(-3.0, 3.0)
    if gf.debris_class is not None:
        axa[2].errorbar(debris_thick_med * 100.,
                        z_bin_centers,
                        xerr=debris_thick_mad * 100,
                        color='k',
                        fmt='o',
                        ms=3,
                        label='Thickness',
                        alpha=0.6)
        axa[2].plot(perc_debris,
                    z_bin_centers,
                    color='sienna',
                    label='Debris Coverage')
        axa[2].plot(perc_pond,
                    z_bin_centers,
                    color='turquoise',
                    label='Pond Coverage')
        axa[2].set_xlim(0, 100)
        pltlib.minorticks_on(axa[2])
        axa[2].legend(prop={'size': 8}, loc='upper right')
        axa[2].set_xlabel('Debris thickness (cm), coverage (%)')
        axa[2].yaxis.tick_right()
        axa[2].yaxis.set_label_position("right")
    plt.tight_layout()
    #Make room for suptitle
    plt.subplots_adjust(top=0.95)
    #print("Saving aed plot")
    fig_fn = os.path.join(outdir, gf.feat_fn + '_mb_aed.png')
    plt.savefig(fig_fn, bbox_inches='tight', dpi=300)
    plt.close(f)
    return z_bin_edges
Esempio n. 7
0
def main():
    parser = getparser()
    args = parser.parse_args()

    t_unit = args.dt
    plot = args.plot
    remove_offsets = args.remove_offsets
    mask_fn = args.mask_fn
    if mask_fn is not None:
        remove_offsets = True

    #Input is 3-band disparity map, extract bands directly
    src_fn = args.disp_fn
    if not iolib.fn_check(src_fn):
        sys.exit("Unable to locate input file: %s" % src_fn)

    src_ds = iolib.fn_getds(src_fn)
    if src_ds.RasterCount != 3:
        sys.exit("Input file must be ASP disparity map (3 bands: x, y, mask)")
    #Extract pixel resolution
    h_res, v_res = geolib.get_res(src_ds)

    #Horizontal scale factor
    #If running on disparity_view output (gdal_translate -outsize 5% 5% F.tif F_5.tif)
    #h_res /= 20
    #v_res /= 20

    #Load horizontal and vertical disparities
    h = iolib.ds_getma(src_ds, bnum=1)
    v = iolib.ds_getma(src_ds, bnum=2)

    #ASP output has northward motion as negative values in band 2
    v *= -1

    t1, t2 = timelib.fn_getdatetime_list(src_fn)
    dt = t2 - t1
    #Default t_factor is in 1/years
    t_factor = timelib.get_t_factor(t1, t2)

    #Input timestamp arrays if inputs are mosaics
    if False:
        t1_fn = ''
        t2_fn = ''
        if os.path.exists(t1_fn) and os.path.exists(t2_fn):
            t_factor = timelib.get_t_factor_fn(t1_fn, t2_fn)
        if t_factor is None:
            sys.exit("Unable to determine input timestamps")

    if t_unit == 'day':
        t_factor *= 365.25

    print("Input dates:")
    print(t1)
    print(t2)
    print(dt)
    print(t_factor, t_unit)

    #Scale values for polar stereographic distortion
    srs = geolib.get_ds_srs(src_ds)
    proj_scale_factor = 1.0
    #Want to scale to get correct distances for polar sterographic
    if srs.IsSame(geolib.nps_srs) or srs.IsSame(geolib.sps_srs):
        proj_scale_factor = geolib.scale_ps_ds(src_ds)

    #Convert disparity values in pixels to m/t_unit
    h_myr = h * h_res * proj_scale_factor / t_factor
    h = None
    v_myr = v * v_res * proj_scale_factor / t_factor
    v = None

    #Velocity Magnitude
    m = np.ma.sqrt(h_myr**2 + v_myr**2)
    print("Velocity Magnitude stats")
    malib.print_stats(m)

    #Remove x and y offsets over control surfaces
    offset_str = ''
    if remove_offsets:
        if mask_fn is None:
            from demcoreg.dem_mask import get_mask
            print(
                "\nUsing demcoreg to prepare mask of stable control surfaces\n"
            )
            #TODO: Accept mask_list as in demcoreg
            #mask_list = args.mask_list
            # for now keep it simple, limit to non-glacier surfaces
            mask_list = [
                'glaciers',
            ]
            mask = get_mask(src_ds, mask_list=mask_list, dem_fn=src_fn)
        else:
            print("\nWarping input raster mask")
            #This can be from previous dem_mask.py run (e.g. *rockmask.tif)
            mask_ds = warplib.memwarp_multi_fn([
                mask_fn,
            ],
                                               res=src_ds,
                                               extent=src_ds,
                                               t_srs=src_ds)[0]
            mask = iolib.ds_getma(mask_ds)
            #The default from ds_getma is a masked array, so need to isolate boolean mask
            #Assume input is 0 for masked, 1 for unmasked (valid control surface)
            mask = mask.filled().astype('bool')
            #This should work, as the *rockmask.py is 1 for unmasked, 0 for masked, with ndv=0
            #mask = np.ma.getmaskarray(mask)
            #Vector mask - untested
            if os.path.splitext(mask_fn)[1] == 'shp':
                mask = geolib.shp2array(mask_fn, src_ds)

        print("\nRemoving median x and y offset over static control surfaces")
        h_myr_count = h_myr.count()
        h_myr_static_count = np.ma.array(h_myr, mask=mask).count()
        h_myr_mad, h_myr_med = malib.mad(np.ma.array(h_myr, mask=mask),
                                         return_med=True)
        v_myr_mad, v_myr_med = malib.mad(np.ma.array(v_myr, mask=mask),
                                         return_med=True)

        print("Static pixel count: %i (%0.1f%%)" %
              (h_myr_static_count,
               100 * float(h_myr_static_count) / h_myr_count))
        print("median (+/-NMAD)")
        print("x velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (h_myr_med, h_myr_mad, t_unit))
        print("y velocity offset: %0.2f (+/-%0.2f) m/%s" %
              (v_myr_med, v_myr_mad, t_unit))
        h_myr -= h_myr_med
        v_myr -= v_myr_med
        offset_str = '_offsetcorr_h%0.2f_v%0.2f' % (h_myr_med, v_myr_med)
        #Velocity Magnitude
        m = np.ma.sqrt(h_myr**2 + v_myr**2)
        print("Velocity Magnitude stats after correction")
        malib.print_stats(m)

    if plot:
        fig_fn = os.path.splitext(src_fn)[0] + '.png'
        label = 'Velocity (m/%s)' % t_unit
        f, ax = make_plot(m, fig_fn, label)
        plotvec(h_myr, v_myr)
        plt.tight_layout()
        plt.savefig(fig_fn,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0,
                    edgecolor='none')

    print("Writing out files")
    gt = src_ds.GetGeoTransform()
    proj = src_ds.GetProjection()
    dst_fn = os.path.splitext(src_fn)[0] + '_vm%s.tif' % offset_str
    iolib.writeGTiff(m, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vx%s.tif' % offset_str
    iolib.writeGTiff(h_myr, dst_fn, create=True, gt=gt, proj=proj)
    dst_fn = os.path.splitext(src_fn)[0] + '_vy%s.tif' % offset_str
    iolib.writeGTiff(v_myr, dst_fn, create=True, gt=gt, proj=proj)
    src_ds = None
Esempio n. 8
0
def sample_stack(ex, ey, geoid_offset=False, pad=3):
    if ex > m.shape[2]-1 or ey > m.shape[1]-1:
        print "Input coordinates are outside stack extent:"
        print ex, ey
        print m.shape
        v = None
    else:
        print "Sampling with pad: %i" % pad
        if pad == 0:
            v = m[:,ey,ex]
        else:
            window_x = np.around(np.clip([ex-pad, ex+pad+1], 0, m.shape[2]-1)).astype(int)
            window_y = np.around(np.clip([ey-pad, ey+pad+1], 0, m.shape[1]-1)).astype(int)
            print window_x
            print window_y
            v = m[:,window_y[0]:window_y[1],window_x[0]:window_x[1]].reshape(m.shape[0], np.ptp(window_x)*np.ptp(window_y))
            #v = v.mean(axis=1)
            v = np.ma.median(v, axis=1)
        if v.count() == 0:
            print "No valid values"
        else:
            mx, my = geolib.pixelToMap(ex, ey, gt)
            print ex, ey, mx, my
            print "Count: %i" % v.count()
            #Hack to get elevations relative to geoid
            #Note: this can be added multiple times if clicked quickly
            if geoid_offset:
                #geoid_offset = geolib.sps2geoid(mx, my, 0.0)[2]
                geoid_offset = geolib.nps2geoid(mx, my, 0.0)[2]
                print "Removing geoid offset: %0.1f" % geoid_offset
                v += geoid_offset
        #Should filter here
        #RS1 has some values that are many 1000s of m/yr below neighbors
        if filter_outliers:
            if True:
                med = malib.fast_median(v)
                mad = malib.mad(v)
                min_v = med - mad*4
                f_idx = (v < min_v).filled(False)
                if np.any(f_idx):
                    print med, mad
                    print "Outliers removed by absolute filter: (val < %0.1f)" % min_v
                    print timelib.o2dt(d[f_idx])
                    print v[f_idx]
                    v[f_idx] = np.ma.masked
            if True:
                v_idx = (~np.ma.getmaskarray(v)).nonzero()[0]
                #This tries to maintain fixed window in time
                f = filtlib.rolling_fltr(v, size=7)
                #This uses fixed number of neighbors
                f = filtlib.rolling_fltr(v[v_idx], size=7)
                #f_diff = np.abs(f - v)
                #Note: the issue is usually that the velocity values are too low
                #f_diff = f - v
                f_diff = f - v[v_idx]
                diff_thresh = 2000
                #f_idx = (f_diff > diff_thresh).filled(False)
                #f_idx = (f_diff < diff_thresh).filled(False)
                f_idx = np.zeros_like(v.data).astype(bool)
                f_idx[v_idx] = (f_diff > diff_thresh)
                if np.any(f_idx):
                    print "Outliers removed by rolling median filter: (val < %0.1f)" % diff_thresh
                    print timelib.o2dt(d[f_idx])
                    print v[f_idx]
                    v[f_idx] = np.ma.masked
    return v
Esempio n. 9
0
def hist_plot(gf, outdir, bin_width=10.0):
    #print("Generating histograms")
    z_bin_edges, z_bin_centers = get_bins(gf.z1, bin_width)
    z1_bin_counts, z1_bin_edges = np.histogram(gf.z1, bins=z_bin_edges)
    z1_bin_areas = z1_bin_counts * gf.res[0] * gf.res[1] / 1E6
    #RGI standard is integer thousandths of glaciers total area
    #Should check to make sure sum of bin areas equals total area
    z1_bin_areas_perc = 100. * z1_bin_areas / np.sum(z1_bin_areas)
    z2_bin_counts, z2_bin_edges = np.histogram(gf.z2, bins=z_bin_edges)
    z2_bin_areas = z2_bin_counts * gf.res[0] * gf.res[1] / 1E6
    z2_bin_areas_perc = 100. * z2_bin_areas / np.sum(z2_bin_areas)

    #dz_bin_edges, dz_bin_centers = get_bins(dz, 1.)
    #dz_bin_counts, dz_bin_edges = np.histogram(dz, bins=dz_bin_edges)
    #dz_bin_areas = dz_bin_counts * res * res / 1E6
    mb_bin_med = np.ma.masked_all_like(z1_bin_areas)
    mb_bin_mad = np.ma.masked_all_like(z1_bin_areas)
    dz_bin_med = np.ma.masked_all_like(z1_bin_areas)
    dz_bin_mad = np.ma.masked_all_like(z1_bin_areas)
    idx = np.digitize(gf.z1, z_bin_edges)
    for bin_n in range(z_bin_centers.size):
        mb_bin_samp = gf.mb[(idx == bin_n + 1)]
        if mb_bin_samp.count() > 0:
            mb_bin_med[bin_n] = malib.fast_median(mb_bin_samp)
            mb_bin_mad[bin_n] = malib.mad(mb_bin_samp)
            mb_bin_med[bin_n] = mb_bin_samp.mean()
            mb_bin_mad[bin_n] = mb_bin_samp.std()
        dz_bin_samp = gf.dhdt[(idx == bin_n + 1)]
        if dz_bin_samp.count() > 0:
            dz_bin_med[bin_n] = malib.fast_median(dz_bin_samp)
            dz_bin_mad[bin_n] = malib.mad(dz_bin_samp)
            dz_bin_med[bin_n] = dz_bin_samp.mean()
            dz_bin_mad[bin_n] = dz_bin_samp.std()

    #Should also export original dh/dt numbers, not mb
    #outbins_header = 'bin_center_elev, bin_count, dhdt_bin_med, dhdt_bin_mad, mb_bin_med, mb_bin_mad'
    #outbins = np.ma.dstack([z_bin_centers, z1_bin_counts, dz_bin_med, dz_bin_mad, mb_bin_med, mb_bin_mad]).astype('float32')[0]
    #fmt='%0.2f'
    outbins_header = 'bin_center_elev_m, z1_bin_count_valid, z1_bin_area_valid_km2, z1_bin_area_perc, z2_bin_count_valid, z2_bin_area_valid_km2, z2_bin_area_perc, dhdt_bin_med_ma, dhdt_bin_mad_ma, mb_bin_med_mwe, mb_bin_mad_mwe'
    fmt = '%0.1f, %i, %0.3f, %0.2f, %i, %0.3f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f'
    outbins = np.ma.dstack([
        z_bin_centers, z1_bin_counts, z1_bin_areas, z1_bin_areas_perc,
        z2_bin_counts, z2_bin_areas, z2_bin_areas_perc, dz_bin_med, dz_bin_mad,
        mb_bin_med, mb_bin_mad
    ]).astype('float32')[0]
    np.ma.set_fill_value(outbins, -9999.0)
    outbins_fn = os.path.join(outdir, gf.feat_fn + '_mb_bins.csv')
    np.savetxt(outbins_fn,
               outbins,
               fmt=fmt,
               delimiter=',',
               header=outbins_header)

    #print("Generating aed plot")
    f, axa = plt.subplots(1, 2, figsize=(6, 6))
    f.suptitle(gf.feat_fn)
    axa[0].plot(z1_bin_areas, z_bin_centers, label='%0.2f' % gf.t1)
    axa[0].plot(z2_bin_areas, z_bin_centers, label='%0.2f' % gf.t2)
    axa[0].axhline(gf.z1_ela, ls=':', c='C0')
    axa[0].axhline(gf.z2_ela, ls=':', c='C1')
    axa[0].legend(prop={'size': 8}, loc='upper right')
    axa[0].set_ylabel('Elevation (m WGS84)')
    axa[0].set_xlabel('Area $\mathregular{km^2}$')
    axa[0].minorticks_on()
    axa[1].yaxis.tick_right()
    axa[1].yaxis.set_label_position("right")
    axa[1].axvline(0, lw=1.0, c='k')
    axa[1].axvline(gf.mb_mean,
                   lw=0.5,
                   ls=':',
                   c='k',
                   label='%0.2f m w.e./yr' % gf.mb_mean)
    axa[1].legend(prop={'size': 8}, loc='upper right')
    axa[1].plot(mb_bin_med, z_bin_centers, color='k')
    axa[1].fill_betweenx(z_bin_centers,
                         0,
                         mb_bin_med,
                         where=(mb_bin_med < 0),
                         color='r',
                         alpha=0.2)
    axa[1].fill_betweenx(z_bin_centers,
                         0,
                         mb_bin_med,
                         where=(mb_bin_med > 0),
                         color='b',
                         alpha=0.2)
    #axa[1].set_ylabel('Elevation (m WGS84)')
    #axa[1].set_xlabel('dh/dt (m/yr)')
    axa[1].set_xlabel('mb (m w.e./yr)')
    axa[1].minorticks_on()
    axa[1].set_xlim(-2.0, 2.0)
    #axa[1].set_xlim(-8.0, 8.0)
    plt.tight_layout()
    #Make room for suptitle
    plt.subplots_adjust(top=0.95)
    #print("Saving aed plot")
    fig_fn = os.path.join(outdir, gf.feat_fn + '_mb_aed.png')
    plt.savefig(fig_fn, bbox_inches='tight', dpi=300)
    return z_bin_edges
Esempio n. 10
0
def make_plot3d(x, y, z, title=None, orthogonal_fig=True):
    cmean = np.mean([x, y, z], axis=1)
    cstd = np.std([x, y, z], axis=1)
    cmed = np.median([x, y, z], axis=1)
    cnmad = malib.mad([x, y, z], axis=1)
    x_corr = x - cmean[0]
    y_corr = y - cmean[1]
    z_corr = z - cmean[2]

    ce90 = geolib.CE90(x, y)
    ce90_corr = geolib.CE90(x_corr, y_corr)
    le90 = geolib.LE90(z)
    le90_corr = geolib.LE90(z_corr)

    coefs = [ce90, ce90, le90]
    #maxdim = np.ceil(np.max([np.max(np.abs([x, y, z])), ce90, le90]))
    maxdim = np.ceil(np.max([np.percentile(np.abs([x, y, z]), 99), ce90,
                             le90]))

    if orthogonal_fig:
        from matplotlib.patches import Ellipse
        #fig_ortho, axa = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(10,5))
        fig_ortho, axa = plt.subplots(1, 3, figsize=(10, 5))
        title = 'Co-registration Translation Vector Components, n=%i\n' % x.shape[
            0]
        title += 'mean: (%0.2f, %0.2f, %0.2f), std: (%0.2f, %0.2f, %0.2f)\n' % (
            tuple(cmean) + tuple(cstd))
        title += 'med: (%0.2f, %0.2f, %0.2f), nmad: (%0.2f, %0.2f, %0.2f)\n' % (
            tuple(cmed) + tuple(cnmad))
        title += 'CE90: %0.2f (Bias-corrected: %0.2f), LE90: %0.2f (Bias-corrected: %0.2f)' % (
            ce90, ce90_corr, le90, le90_corr)
        plt.suptitle(title)

        dot_prop = {
            'color': 'k',
            'linestyle': 'None',
            'marker': '.',
            'ms': 3,
            'label': 'ICP correction vector',
            'alpha': 0.5
        }
        mean_prop = {
            'color': 'r',
            'linestyle': 'None',
            'marker': 'o',
            'label': 'Mean'
        }

        for ax in axa:
            ax.set_xlim(-maxdim, maxdim)
            ax.set_ylim(-maxdim, maxdim)
            ax.minorticks_on()
            ax.set_aspect('equal')

        axa[0].plot(x, y, **dot_prop)
        axa[0].plot(cmean[0], cmean[1], **mean_prop)
        axa[0].set_xlabel('X offset (m)')
        axa[0].set_ylabel('Y offset (m)')
        e = Ellipse((0, 0), 2 * ce90, 2 * ce90, linewidth=0, alpha=0.1)
        axa[0].add_artist(e)
        axa[0].legend(prop={'size': 8}, numpoints=1, loc='upper left')

        axa[1].plot(x, z, **dot_prop)
        axa[1].plot(cmean[0], cmean[2], **mean_prop)
        axa[1].set_xlabel('X offset (m)')
        axa[1].set_ylabel('Z offset (m)')
        e = Ellipse((0, 0), 2 * ce90, 2 * le90, linewidth=0, alpha=0.1)
        axa[1].add_artist(e)

        axa[2].plot(y, z, **dot_prop)
        axa[2].plot(cmean[1], cmean[2], **mean_prop)
        axa[2].set_xlabel('X offset (m)')
        axa[2].set_ylabel('Z offset (m)')
        e = Ellipse((0, 0), 2 * ce90, 2 * le90, linewidth=0, alpha=0.1)
        axa[2].add_artist(e)

        plt.tight_layout()

        #Note: postscript doesn't properly handle tansparency
        fig_fn = '%s_translation_vec_local_meters_orthogonal.pdf' % out_fn_prefix
        plt.savefig(fig_fn, dpi=600, bbox_inches='tight')