def bin_dh_zsc_by_season(dh, zsc, date):

    season_month_bins = np.arange(1, 13, 1)

    mon = pd.DatetimeIndex(date).month.values

    med_dh = []
    nmad_dh = []
    med_zsc = []
    nmad_zsc = []
    mid_bin = []
    for i in range(len(season_month_bins)):
        ind = (mon == season_month_bins[i])
        if np.count_nonzero(ind) > 0:
            # ind = np.logical_and(mon >= season_month_bins[i], mon < season_month_bins[i + 1])
            med_dh.append(np.nanmedian(dh[ind]))
            nmad_dh.append(nmad(dh[ind]))
            med_zsc.append(np.nanmedian(zsc[ind]))
            nmad_zsc.append(nmad(zsc[ind]))

            mid_bin.append(season_month_bins[i])

    return [
        np.array(mid_bin),
        np.array(med_dh),
        np.array(nmad_dh),
        np.array(med_zsc),
        np.array(nmad_zsc)
    ]
예제 #2
0
def bin_valid_df_by_season(df,var='dh',weight_ib=1./40):

    date=df.t
    season_month_bins = np.arange(1,13,1)
    mon = pd.DatetimeIndex(date).month.values

    med, std, mid_bin, ns_ics, ns_ib = ([] for i in range(5))
    for i in range(len(season_month_bins)):
        ind = (mon == season_month_bins[i])
        df_ind = df[ind]
        nics = np.count_nonzero(df_ind.sensor == 'ICS')
        nib = np.count_nonzero(df_ind.sensor == 'IB')
        ns_ics.append(nics)
        ns_ib.append(nib)
        # med.append(np.nanmedian(df_ind[var].values))
        # std.append(nmad(df_ind[var].values))
        if nics != 0 or nib != 0:
            med.append(np.nansum((np.nanmedian(df_ind[df_ind.sensor == 'ICS'][var]) * nics,
                   np.nanmedian(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (nics + nib * weight_ib))
            std.append(np.nansum((nmad(df_ind[df_ind.sensor == 'ICS'][var]) * nics,
                                      nmad(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (
                                       nics + nib * weight_ib))
        else:
            med.append(np.nan)
            std.append(np.nan)

        mid_bin.append(season_month_bins[i])

    df_out = pd.DataFrame()
    df_out = df_out.assign(seas_dec=mid_bin,ns_ics=ns_ics,ns_ib=ns_ib)
    df_out['med_'+var]=med
    df_out['nmad_'+var]=std

    return df_out
def bin_dh_zsc_by_vals(dh, zsc, bins, bins_val):

    med_dh = []
    nmad_dh = []
    med_zsc = []
    nmad_zsc = []
    mid_bin = []
    for i in range(len(bins) - 1):
        ind = np.logical_and(bins_val >= bins[i], bins_val < bins[i + 1])
        if len(ind) > 100:
            med_dh.append(np.nanmedian(dh[ind]))
            nmad_dh.append(nmad(dh[ind]))
            # nmad_dh.append(np.nanstd(dh[ind]))
            med_zsc.append(np.nanmedian(zsc[ind]))
            nmad_zsc.append(nmad(zsc[ind]))
            # nmad_zsc.append(np.nanstd(zsc[ind]))

            mid_bin.append(bins[i] + 0.5 * (bins[i + 1] - bins[i]))

    return [
        np.array(mid_bin),
        np.array(med_dh),
        np.array(nmad_dh),
        np.array(med_zsc),
        np.array(nmad_zsc)
    ]
def bin_valid_df_by_vals(df,
                         bins,
                         bins_val,
                         list_var=['dh', 'zsc'],
                         ls_dvardt=True,
                         weight_ib=1. / 40,
                         return_ls=False):

    mid_bin, med, std, dvardt, dvardt_2std, ns_ics, ns_ib = ([]
                                                             for i in range(7))
    for i in range(len(bins) - 1):
        ind = np.logical_and(bins_val >= bins[i], bins_val < bins[i + 1])
        df_ind = df[ind]
        nics = np.count_nonzero(df_ind.sensor == 'ICS')
        nib = np.count_nonzero(df_ind.sensor == 'IB')
        ns_ics.append(nics)
        ns_ib.append(nib)
        mid_bin.append(bins[i] + 0.5 * (bins[i + 1] - bins[i]))

        sub_med = []
        sub_std = []
        sub_dvardt = []
        sub_dvardt_2std = []
        sub_mu = []
        sub_w = []
        sub_t = []
        for var in list_var:
            if weight_ib is not None:
                if nics != 0 or nib != 0:
                    sub_med.append(
                        np.nansum(
                            (np.nanmedian(
                                df_ind[df_ind.sensor == 'ICS'][var]) * nics,
                             np.nanmedian(df_ind[df_ind.sensor == 'IB'][var]) *
                             nib * weight_ib)) / (nics + nib * weight_ib))
                    sub_std.append(
                        np.nansum(
                            (nmad(df_ind[df_ind.sensor == 'ICS'][var]) * nics,
                             nmad(df_ind[df_ind.sensor == 'IB'][var]) * nib *
                             weight_ib)) / (nics + nib * weight_ib))
                else:
                    sub_med.append(np.nan)
                    sub_std.append(np.nan)
            else:
                sub_med.append(np.nanmedian(df_ind[var]))
                sub_std.append(nmad(df_ind[var].values))

            if ls_dvardt:
                list_t = sorted(list(set(list(df_ind.t.values))))
                ftime_delta = np.array([
                    (np.datetime64(t) -
                     np.datetime64('{}-01-01'.format(int(2000)))).astype(int) /
                    365.2422 for t in list_t
                ])
                mu = []
                w = []
                for val_t in list_t:
                    ind_t = df_ind.t.values == val_t
                    df_ind_t = df_ind[ind_t]
                    nics_t = np.count_nonzero(df_ind_t.sensor == 'ICS')
                    nib_t = np.count_nonzero(df_ind_t.sensor == 'IB')
                    if np.count_nonzero(ind_t) > 20:
                        med_t = np.nansum(
                            (np.nanmedian(
                                df_ind_t[df_ind_t.sensor == 'ICS'][var]) *
                             nics_t,
                             np.nanmedian(
                                 df_ind_t[df_ind_t.sensor == 'IB'][var]) *
                             nib_t * weight_ib)) / (nics_t + nib_t * weight_ib)
                        mu.append(med_t)
                        std_t = np.nansum(
                            (nmad(df_ind_t[df_ind_t.sensor == 'ICS'][var]) *
                             nics_t,
                             nmad(df_ind_t[df_ind_t.sensor == 'IB'][var]) *
                             nib_t * weight_ib)) / (nics_t + nib_t * weight_ib)
                        w.append(std_t / np.sqrt(nics_t + nib_t * weight_ib))
                    else:
                        mu.append(np.nan)
                        w.append(np.nan)
                mu = np.array(mu)
                w = np.array(w)
                if np.count_nonzero(~np.isnan(mu)) > 5:
                    # reg = LinearRegression().fit(ftime_delta[~np.isnan(mu)].reshape(-1, 1),
                    #                              mu[~np.isnan(mu)].reshape(-1, 1))

                    beta1, _, incert_slope, _, _ = ft.wls_matrix(
                        ftime_delta[~np.isnan(mu)],
                        mu[~np.isnan(mu)],
                        1. / w[~np.isnan(mu)]**2,
                        conf_slope=0.95)
                    # fig = plt.figure()
                    # plt.scatter(ftime_delta,mu_dh,color='red')
                    # plt.plot(np.arange(0,10,0.1),reg.predict(np.arange(0,10,0.1).reshape(-1,1)),color='black',label=reg)
                    # plt.ylim([-20,20])
                    # plt.text(5,0,str(reg.coef_[0]))
                    # plt.legend()
                    # coef = reg.coef_[0][0]
                    coef = beta1
                    sub_dvardt.append(coef)
                    sub_dvardt_2std.append(incert_slope)
                else:
                    sub_dvardt.append(np.nan)
                    sub_dvardt_2std.append(np.nan)

                sub_mu.append(mu)
                sub_w.append(w)
                sub_t.append(ftime_delta)
        med.append(sub_med)
        std.append(sub_std)
        dvardt.append(sub_dvardt)
        dvardt_2std.append(sub_dvardt_2std)

    df_out = pd.DataFrame()
    df_out = df_out.assign(mid_bin=mid_bin, ns_ics=ns_ics, ns_ib=ns_ib)
    for var in list_var:
        df_out['med_' + var] = list(zip(*med))[list_var.index(var)]
        df_out['nmad_' + var] = list(zip(*std))[list_var.index(var)]
        if ls_dvardt:
            df_out['d' + var + '_dt'] = list(zip(*dvardt))[list_var.index(var)]
            df_out['d' + var + '_dt_2std'] = list(
                zip(*dvardt_2std))[list_var.index(var)]

    if return_ls and ls_dvardt:
        df_ls = pd.DataFrame()
        for var in list_var:
            # print(len(sub_mu))
            df_ls['mu_' + var] = sub_mu[list_var.index(var)]
            df_ls['w_' + var] = sub_w[list_var.index(var)]
            df_ls['t_' + var] = sub_t[list_var.index(var)]
        return df_out, df_ls
    else:
        return df_out
    df_out = pd.DataFrame()
    df_out = df_out.assign(mid_bin=mid_bin, ns_ics=ns_ics, ns_ib=ns_ib)
    df_out['med_' + var] = med
    df_out['nmad_' + var] = std

    return df_out


fig = plt.figure(figsize=(9, 12))

plt.subplots_adjust(hspace=0.3)

ax = fig.add_subplot(3, 1, 1)

df = df_tot[df_tot.sensor == 'ICS']
nmad_gla = nmad(df[df.pos == 2].zsc)
nmad_stable = nmad(df[df.pos == 1].zsc)

ax.text(0.025,
        0.965,
        'a',
        transform=ax.transAxes,
        fontsize=14,
        fontweight='bold',
        va='top',
        ha='left')
ax.hist(df[df.pos == 1].zsc,
        np.arange(-5, 5, 0.1),
        label='Stable',
        alpha=0.5,
        color='tab:red')
for reg in list(set(list(df.reg))):
    df_reg = df[df.reg == reg]
    t0_n = bin_dh_zsc_by_season(df_reg.dh, df_reg.dh_ref, df_reg.t)
    coefs1, _ = scipy.optimize.curve_fit(
        lambda t, a, b, c: a**2 * np.sin(t * 2 * np.pi / 12 + c) + b,
        t0_n[0][:-1], t0_n[3][:-1])

    season_month_bins = np.arange(1, 13, 1)
    mon = pd.DatetimeIndex(df.t).month.values
    for i in range(len(season_month_bins)):
        ind = np.logical_and(mon == season_month_bins[i], df.reg == reg)
        df.dh[ind] -= coefs1[0]**2 * np.sin(season_month_bins[i] * 2 * np.pi /
                                            12 + coefs1[2]) + coefs1[1]

sl_s = bin_dh_zsc_by_vals(df.zsc, df.dh, np.arange(0, 60, 5), df.slp)
nmad_zsc = nmad(df.zsc)
# nmad_dh = nmad(df.dh)
df_corr = df.copy()
vec_slope = np.arange(0, 60, 5)
for i in np.arange(len(vec_slope) - 1):
    ind = np.logical_and(df_corr.slp >= vec_slope[i],
                         df_corr.slp < vec_slope[i + 1])
    df_corr[ind].zsc = df_corr[ind].zsc / sl_s[2][i]
    # df_corr.dh[ind] = df_corr.dh[ind]/sl_s[4][i] * nmad_dh

df = df[np.abs(df_corr.zsc) < 3 * nmad_zsc]

nmad_dh = nmad(df.dh)
df = df[np.abs(df.dh) < 3 * nmad_dh]

nproc = 64
예제 #7
0
def ddem_hypso(ddem,
               dem,
               mask,
               gsd,
               bin_type='fixed',
               bin_val=100.,
               filt='5NMAD'):

    final_mask = np.logical_and(
        np.logical_and(np.isfinite(ddem), np.isfinite(dem)), mask)

    std_stable = nmad(ddem[~mask])

    area_tot = np.count_nonzero(mask) * gsd**2

    if np.count_nonzero(final_mask) == 0:
        return np.nan, np.nan, 0, area_tot

    dem_on_mask = dem[final_mask]
    ddem_on_mask = ddem[final_mask]

    min_elev = np.min(dem_on_mask) - (np.min(dem_on_mask) % bin_val)
    max_elev = np.max(dem_on_mask) + 1

    if bin_type == 'fixed':
        bin_final = bin_val
    elif bin_type == 'percentage':
        bin_final = np.ceil(bin_val / 100. * (max_elev - min_elev))
    else:
        sys.exit('Bin type not recognized.')

    bins_on_mask = np.arange(min_elev, max_elev, bin_final)
    nb_bin = len(bins_on_mask)

    elev_bin, nmad_bin, med_bin, mean_bin, std_bin, area_tot_bin, area_meas_bin = (
        np.zeros(nb_bin) * np.nan for i in range(7))

    for i in np.arange(nb_bin):

        idx_bin = np.array(dem_on_mask >= bins_on_mask[i]) & np.array(
            dem_on_mask < (bins_on_mask[i] + bin_final))
        idx_orig = np.array(dem >= bins_on_mask[i]) & np.array(
            dem < (bins_on_mask[i] + bin_final)) & mask
        area_tot_bin[i] = np.count_nonzero(idx_orig) * gsd**2
        area_meas_bin[i] = np.count_nonzero(idx_bin) * gsd**2
        elev_bin[i] = bins_on_mask[i] + bin_final / 2.
        dh_bin = ddem_on_mask[idx_bin]

        nvalid = len(dh_bin[~np.isnan(dh_bin)])
        if nvalid > 0:

            std_bin[i] = np.nanstd(dh_bin)
            med_bin[i] = np.nanmedian(dh_bin)
            if filt and nvalid > 10:
                median_temp = np.nanmedian(dh_bin)
                MAD_temp = np.nanmedian(np.absolute(dh_bin - median_temp))
                NMAD_temp = 1.4826 * MAD_temp
                nmad_bin[i] = NMAD_temp
                dh_bin[np.absolute(dh_bin - median_temp) > 5 *
                       NMAD_temp] = np.nan
            mean_bin[i] = np.nanmean(dh_bin)

    area_meas = np.nansum(area_meas_bin)
    perc_area_meas = area_meas / area_tot
    idx_nonvoid = area_meas_bin > 0

    final_mean = np.nansum(mean_bin * area_tot_bin) / area_tot

    list_vgm = [(gsd * 5, 'Sph', std_stable**2)]

    # list_vgm = [(corr_ranges[0],'Sph',final_num_err_corr[i,3]**2),(corr_ranges[1],'Sph',final_num_err_corr[i,2]**2),
    #             (corr_ranges[2],'Sph',final_num_err_corr[i,1]**2),(500000,'Sph',final_num_err_corr[i,0]**2)]
    neff_num_tot = ot.neff_circ(area_meas, list_vgm)
    final_err = ot.std_err(std_stable, neff_num_tot)

    print(final_mean)

    return final_mean, final_err, perc_area_meas, area_tot
예제 #8
0
    else:
        sensor = 'IB'
    tmp_df = tmp_df.assign(reg=reg,sensor=sensor)
    df = df.append(tmp_df)

#we want time series minus validation, easier to conceptualize
df.zsc = -df.zsc
df.dh = -df.dh
df.dh_ref = -df.dh_ref

#glacier only
df = df[np.abs(df.dh_ref)<300]
df = df[df.pos<=1]

#remove very large outliers
nmad_gla = nmad(df.zsc)
df=df[np.abs(df.zsc-np.nanmedian(df.zsc))<10*nmad_gla]

def bin_valid_df_by_vals(df,bins,bins_val,list_var=['dh','zsc'],ls_dvardt=True,weight_ib=1./40,return_ls=False):

    mid_bin, med, std, dvardt, dvardt_2std, ns_ics, ns_ib = ([] for i in range(7))
    for i in range(len(bins)-1):
        ind = np.logical_and(bins_val >= bins[i],bins_val < bins[i+1])
        df_ind = df[ind]
        nics = np.count_nonzero(df_ind.sensor == 'ICS')
        nib=np.count_nonzero(df_ind.sensor == 'IB')
        ns_ics.append(nics)
        ns_ib.append(nib)
        mid_bin.append(bins[i] + 0.5*(bins[i+1]-bins[i]))

        sub_med = []
# rasterize_mean_shp_point(coords,vals,fn_rast_out,fn_hr_out,500,100,fn_ref_raster=fn_ref_raster)

# latlon_coords_to_buff_point_shp(list(zip(df.lon.values,df.lat.values)),df.zsc.values,fn_shp_out,200.)

#AU FINAL:

fn_ref_raster = '/home/atom/ongoing/work_worldwide/dh_maps/01_02_rgi60_fig/fig3.vrt'

#get extent of interest
fn_icesat = '/home/atom/ongoing/work_worldwide/validation/old/icesat/valid_ICESat_01_0_rgi50_Alaska.csv'
df = pd.read_csv(fn_icesat, index_col=False)
ind = np.logical_and.reduce(
    (df.lon > -145, df.lon < -137, df.lat > 59, df.lat < 62))
df = df[ind]
df = df[df.pos == 2]
nmad_zsc = nmad(df.zsc)
ind = np.abs(df.zsc) < 10 * nmad_zsc
print('Removing with NMAD: ' + str(np.count_nonzero(~ind)) + ' out of ' +
      str(len(ind)))
df = df[ind]
df = df.drop(columns=['dh', 'dh_ref', 'curv', 'slp', 'dt', 't', 'pos', 'h'])
df.to_csv('/home/atom/ongoing/work_worldwide/fig3_icesat_df.csv')

#same
fn_ib = '/home/atom/ongoing/work_worldwide/validation/icebridge/valid_ILAKS1B_01_02_rgi60_50_pt.csv'
df = pd.read_csv(fn_ib, index_col=False)
ind = np.logical_and.reduce(
    (df.lon > -145, df.lon < -137, df.lat > 59, df.lat < 62))
df = df[ind]
df = df[df.pos == 2]
nmad_zsc = nmad(df.zsc)
                marker='o',
                mec='k',
                ms=kval * (df_hr_rgiid.area.values[0] / 1000000)**0.5,
                mew=0.25,
                elinewidth=0.5,
                ecolor=col,
                mfc=col,
                alpha=0.9)
    #,ecolor=colors[sites.index(data['Site'][value])]mfc=colors[sites.index(data['Site'][value])],alpha=0.5)
    diff2.append(df_hr_rgiid.dhdt.values[0] - df_gp_rgiid.dhdt.values[0])
    list_area2.append(df_hr_rgiid.area.values[0])

ax.text(-1.5,
        0,
        'Mean bias:\n' + str(np.round(np.nanmean(diff2), 2)) + '$\pm$' +
        str(np.round(2 * nmad(diff2) / np.sqrt(len(diff2)), 2)) +
        ' m yr$^{-1}$',
        ha='center',
        va='center',
        bbox=dict(boxstyle='round', facecolor='white', alpha=0.7))

print(np.nanmean(diff2))
print(
    np.nansum(np.array(diff2) * np.array(list_area2)) /
    np.nansum(np.array(list_area2)))

ax.set_ylabel('GP mean elevation change (m yr$^{-1}$)')
ax.set_xlabel('High-resolution DEMs mean elevation change (m yr$^{-1}$)')

#plt.legend(loc='upper left')
ax.set_xlim([-2.75, 0.5])