コード例 #1
0
def calc_syn_SICEXT_TS(run_type, ref_start, ref_end, monthly=True):
    out_name = "./" + run_type + "_sicext_ts.nc"
    dates = numpy.array([1899 + float(x)/12 for x in range(0, 2424)], 'f')
    lats = numpy.array([90-x for x in range(0,180)], 'f')
    d_lon = 1.0
    mv = -1e30
    if not os.path.exists(out_name):
        ptiles = numpy.array([10,25,50,75,90], 'f')
        samples = numpy.array([0,4,9,14,19], 'f')
        samples_2 = numpy.arange(0, len(ptiles)*len(samples), dtype='i')
        print len(samples_2), samples_2.dtype
        ens_ts = numpy.zeros([2, len(ptiles)*len(samples), dates.shape[0]], 'f')
        # get the synthetic SIC filename
        #
        c = 0
        for ptile in ptiles:
            for a in samples:
                syn_sst_fname = get_syn_sst_filename(run_type,ref_start,ref_end,neofs,eof_year,int(ptile),ivm,monthly)
                syn_sst_fname = syn_sst_fname[:-3] + "_s" + str(int(a)) + ".nc"
                syn_sic_fname = syn_sst_fname.replace("ssts", "sic").replace("sst", "sic")
                sic_data = load_sst_data(syn_sic_fname, "sic")
                sic_ext_nh, sic_ext_sh = calc_sea_ice_extent(sic_data, lats, d_lon, mv)
                print sic_ext_nh.shape, ens_ts.shape
                ens_ts[0,c] = sic_ext_nh
                ens_ts[1,c] = sic_ext_sh
                c += 1
        # save the timeseries
        save_ens_ts_file(out_name, ens_ts, samples_2, dates, "sic")
    else:
        ens_ts = load_data(out_name, "sic")*1.11
    
    return ens_ts, dates
コード例 #2
0
def calc_syn_GMSST_TS(run_type, ref_start, ref_end, monthly=True):
    out_name = "./" + run_type + "_gmsst_ts.nc"
    dates = numpy.array([1899 + float(x)/12 for x in range(0, 2424)], 'f')
    if not os.path.exists(out_name):
        # get the synthetic SST filename
        #
        ptiles = numpy.array([10,25,50,75,90], 'f')
        samples = numpy.array([0,4,9,14,19], 'f')
        samples_2 = numpy.arange(0, len(ptiles)*len(samples), dtype='i')
        print len(samples_2), samples_2.dtype
        ens_ts = numpy.zeros([2, len(ptiles)*len(samples), dates.shape[0]], 'f')
        c = 0
        for ptile in ptiles:
            for a in samples:
                syn_sst_fname = get_syn_sst_filename(run_type,ref_start,ref_end,neofs,eof_year,int(ptile),ivm,monthly)
                syn_sst_fname = syn_sst_fname[:-3] + "_s" + str(int(a)) + ".nc"
                sst_data = load_sst_data(syn_sst_fname, "sst")
                gmsst_nh = calc_GMSST(sst_data[:,:90,:],1)
                gmsst_sh = calc_GMSST(sst_data[:,90:,:],2)
                ens_ts[0,c] = gmsst_nh
                ens_ts[1,c] = gmsst_sh
                c += 1
        # save the timeseries
        save_ens_ts_file(out_name, ens_ts, samples_2, dates, "gmsst")
    else:
        ens_ts = load_data(out_name, "gmsst")
    
    return ens_ts, dates
コード例 #3
0
def create_cmip5_long_term_mean_timeseries(run_type,
                                           ref_start,
                                           ref_end,
                                           monthly=True,
                                           run_n=400):
    # create the cmip5 timeseries of the cmip5 ensemble mean
    # This consists of the ens mean of the cmip5 anomalies from the reference
    # plus the HadISST reference

    # check which run type we should actually load.
    # Likely is rcp45 + adjustment
    if run_type == "likely":
        load_run_type = "rcp45"
    else:
        load_run_type = run_type

    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010

    # load the ensemble mean of the anomalies
    cmip5_ens_mean_anoms_fname = get_concat_anom_sst_ens_mean_smooth_fname(
        load_run_type, ref_start, ref_end, monthly=monthly)
    cmip5_ens_mean_anoms = load_sst_data(cmip5_ens_mean_anoms_fname, "sst")

    # if we have the likely scenario then fit the period between 2016 and 2035 to the likely
    # scenario from AR5 Ch11
    if run_type == "likely":
        cmip5_ens_mean_anoms = fit_mean_to_likely(cmip5_ens_mean_anoms,
                                                  monthly)

    # add it onto the ensemble mean anomalies
    cmip5_ens_mean = cmip5_ens_mean_anoms

    return cmip5_ens_mean
コード例 #4
0
def calc_HadISST_GMSST_TS():
    in_fname = get_HadISST_input_filename(400)
    hadisst = load_sst_data(in_fname, "sst")
    dates = numpy.array([1850 + float(x)/12 for x in range(0, hadisst.shape[0])], 'f')
    gmsst_hadisst_nh = calc_GMSST(hadisst[:,:90,:],1)
    gmsst_hadisst_sh = calc_GMSST(hadisst[:,90:,:],2)
    return gmsst_hadisst_nh, gmsst_hadisst_sh, dates
コード例 #5
0
def correct_cmip5_long_term_mean_timeseries(cmip5_ts, monthly=True, run_n=400):
    # correct the ensemble mean of the CMIP5 ensemble, this is achieved by subtracting the
    # difference between the 5 year mean of HadISST and the 5 year mean of the CMIP5 ensemble 
    # mean for 2006->2010 (the overlap period) from the CMIP5 ensemble mean
    # need to load in the hadisst values to enable the correction
    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010
    
    if monthly:
        hadisst_smoothed_fname = get_HadISST_month_smooth_filename(histo_sy, hadisst_ey, run_n)
    else:
        hadisst_smoothed_fname = get_HadISST_smooth_fname(histo_sy, hadisst_ey, run_n)
    hadisst_sst = load_sst_data(hadisst_smoothed_fname, "sst")

    # if monthly calculate the overlap indices in terms of months by multiplying by 12
    if monthly:
        # correct each month individually
        ovl_idx = (hadisst_ey - histo_sy) * 12      # start offset
        mon_correct = numpy.zeros([12, cmip5_ts.shape[1], cmip5_ts.shape[2]], 'f')
        for m in range(0, 12):
            cmip5_ens_mean_monmean = numpy.mean(cmip5_ts[ovl_idx-5+m:ovl_idx+m:12], axis=0)
            hadisst_monmean = numpy.mean(hadisst_sst[ovl_idx-5+m::12], axis=0)
            mon_correct[m] = hadisst_monmean - cmip5_ens_mean_monmean
        # tile and subtract from cmip5 timeseries
        n_repeats = cmip5_ts.shape[0] / 12
        cmip5_ens_mean_correction = numpy.tile(mon_correct, [n_repeats,1,1])
    else:
        ovl_idx = hadisst_ey - histo_sy    
        cmip5_ens_mean_timmean = numpy.mean(cmip5_ts[ovl_idx-5:ovl_idx], axis=0)
        hadisst_timmean = numpy.mean(hadisst_sst[ovl_idx-5:], axis=0)
        cmip5_ens_mean_correction = hadisst_timmean - cmip5_ens_mean_timmean
    return cmip5_ts + cmip5_ens_mean_correction
コード例 #6
0
def create_cmip5_long_term_mean_timeseries(run_type, ref_start, ref_end, monthly=True, run_n=400):
    # create the cmip5 timeseries of the cmip5 ensemble mean
    # This consists of the ens mean of the cmip5 anomalies from the reference
    # plus the HadISST reference
    
    # check which run type we should actually load.
    # Likely is rcp45 + adjustment
    if run_type == "likely":
        load_run_type = "rcp45"
    else:
        load_run_type = run_type
        
    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010

    # load the ensemble mean of the anomalies
    cmip5_ens_mean_anoms_fname = get_concat_anom_sst_ens_mean_smooth_fname(load_run_type, ref_start, ref_end, monthly=monthly)
    cmip5_ens_mean_anoms = load_sst_data(cmip5_ens_mean_anoms_fname, "sst")
        
    # if we have the likely scenario then fit the period between 2016 and 2035 to the likely
    # scenario from AR5 Ch11
    if run_type == "likely":
        cmip5_ens_mean_anoms = fit_mean_to_likely(cmip5_ens_mean_anoms, monthly)

    # add it onto the ensemble mean anomalies
    cmip5_ens_mean = cmip5_ens_mean_anoms

    return cmip5_ens_mean
コード例 #7
0
def create_syn_SST_PCs(run_type, ref_start, ref_end, eof_year, neofs, nsamps, model_mean=False, monthly=False):
    # load the PCs, EOFs for this year
    pcs_fname = get_cmip5_PC_filename(run_type, ref_start, ref_end, eof_year, model_mean, monthly)
    pcs = load_data(pcs_fname, "sst")
    eof_fname = get_cmip5_EOF_filename(run_type, ref_start, ref_end, eof_year, model_mean, monthly)
    eofs = load_data(eof_fname, "sst")
    
    # load the smoothed ensemble mean
    ens_mean_fname = get_concat_anom_sst_ens_mean_smooth_fname(run_type, ref_start, ref_end, monthly)
    ens_mean = load_sst_data(ens_mean_fname, "sst")
    # we only need one ensemble mean - calculate decadal mean
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    ens_mean = ens_mean[eof_year-histo_sy]

    # transform pc data to R compatible format
    pcs = pcs.byteswap().newbyteorder()
    # create the return storage
    select_PCs = numpy.zeros([pcs.shape[0], nsamps, neofs], 'f')
    # percentile ranges
    ptiles = [0.10, 0.25, 0.50, 0.75, 0.90]
    select_PCs = numpy.random.random(select_PCs.shape)

    # now loop through each month pcs - if yearly mean then there will only be one
    for m in range(0, pcs.shape[0]):
         # fit a copula to the principle components
         pc_mvdc = fit_mvdc(pcs[m], neofs)
 
         # generate a large sample of GMSSTs and their corresponding PCs    
         sst_means_and_PCs = generate_large_sample_of_SSTs(pc_mvdc, eofs[m], ens_mean, neofs)
     
         # now sample the distribution to get nsamps number of PCs which
         # represent the distribution of GMSSTs
         select_PCs[m] = sample_SSTs(sst_means_and_PCs, neofs, nsamps, ptiles)
    
    # sort the pcs based on the first pc for each of the percentiles
    sorted_select_PCs = numpy.zeros([pcs.shape[0], nsamps, neofs], 'f')
    pts_per_pc = int(nsamps/len(ptiles))
    for m in range(0, pcs.shape[0]):
        for p in range(0, len(ptiles)):
            s = p*pts_per_pc
            e = (p+1)*pts_per_pc
            # get the first pc for this ptile
            pc0 = select_PCs[m,s:e,0]
            # sort it and get the indices
            pc0_sort = numpy.argsort(pc0)
            # sort all the pcs so that the corresponding pc0 is ascending
            for f in range(0, neofs):
                pc1 = select_PCs[m,s:e,f]
                sorted_select_PCs[m,s:e,f] = pc1[pc0_sort]
    
    # save
    out_fname = get_syn_SST_PCs_filename(run_type, ref_start, ref_end, eof_year, monthly)
    out_fname = out_fname[:-3] + "_new.nc"
    # fix the missing value meta data
    out_attrs = {"missing_value" : 2e20}
    # save the selected PCAs
    save_pcs(out_fname, sorted_select_PCs, out_attrs)
    print out_fname
コード例 #8
0
def calc_HadISST_GMSST_SICEXT():
    in_fname = get_HadISST_input_filename(400)
    hadisst = load_sst_data(in_fname, "sic")
    dates = numpy.array([1850 + float(x)/12 for x in range(0, hadisst.shape[0])], 'f')
    lats = numpy.array([90-x for x in range(0,180)], 'f')
    d_lon = 1.0
    mv = -1.0e30
    sicext_hadisst_nh, sicext_hadisst_sh = calc_sea_ice_extent(hadisst,lats, d_lon,mv)
    return sicext_hadisst_nh, sicext_hadisst_sh, dates
コード例 #9
0
def create_Ma_syn_SST_PCs(run_type, ref_start, ref_end, eof_year, neofs, ptile, model_mean=False, monthly=False):
    # load the PCs, EOFs for this year
    pcs_fname = get_cmip5_PC_filename(run_type, ref_start, ref_end, eof_year, model_mean, monthly)
    pcs = load_data(pcs_fname, "sst")
    eof_fname = get_cmip5_EOF_filename(run_type, ref_start, ref_end, eof_year, model_mean, monthly)
    eofs = load_data(eof_fname, "sst")
    
    # load the smoothed ensemble mean
    ens_mean_fname = get_concat_anom_sst_ens_mean_smooth_fname(run_type, ref_start, ref_end, monthly)
    ens_mean = load_sst_data(ens_mean_fname, "sst")
    # we only need one ensemble mean - calculate decadal mean
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    ens_mean = ens_mean[eof_year-histo_sy]

    # transform pc data to R compatible format
    pcs = pcs.byteswap().newbyteorder()
    nsamps = 100
    nmons = pcs.shape[0]
    # create the return storage
    select_PCs = numpy.zeros([pcs.shape[0], nsamps, neofs+2], 'f')

    # now loop through each month pcs - if yearly mean then there will only be one
    for m in range(0, nmons):
         # fit a copula to the principle components
         pc_mvdc = fit_mvdc(pcs[m], neofs)
 
         # generate a large sample of GMSSTs and their corresponding PCs    
         sst_means_and_PCs = generate_Ma_large_sample_of_SSTs(pc_mvdc, eofs[m], ens_mean, neofs)
     
         # now sample the distribution to get nsamps number of PCs which
         # represent the distribution of GMSSTs
         select_PCs[m] = sample_Ma_SSTs(sst_means_and_PCs, neofs, nsamps, ptile)
    
    # sort the pcs based on the first pc for each of the percentiles
    sorted_select_PCs = numpy.zeros([nmons, 2, neofs], 'f')
    for m in range(0, nmons):
        # get the NA indices for this month
        na_idxs = select_PCs[m,:,1]
        # sort it and get the indices
        na_idxs_sort = numpy.argsort(na_idxs)
        # get the first and last in the list sorted by NA indices
        # - i.e. where the North Atlantic index is the most different
        # we just want the PCs now
        for e in range(0, neofs):
            sorted_select_PCs[m,0,e] = select_PCs[m,:,2+e][na_idxs_sort[0]]
            sorted_select_PCs[m,1,e] = select_PCs[m,:,2+e][na_idxs_sort[-1]]

    # we now have two sets of PCs - one at each end of the distribution of NA SST gradient for the desired percentile
    # save
    out_fname = get_Ma_syn_SST_PCs_filename(run_type, ref_start, ref_end, eof_year, ptile, monthly)
    # fix the missing value meta data
    out_attrs = {"missing_value" : 2e20}
    # save the selected PCAs
    save_pcs(out_fname, sorted_select_PCs, out_attrs)
    print out_fname
コード例 #10
0
def create_hadisst_monthly_reference(run_type, ref_start, ref_end, n_repeats, run_n=400):
    # create the annual cycle from hadisst, repeating it a number of times
    # so as to add it onto the CMIP5 timeseries
    
    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010

    # load in the monthly smoothed reference
    mon_smooth_name = get_HadISST_monthly_reference_fname(histo_sy, hadisst_ey, ref_start, ref_end, run_n)
    mon_smooth_ref = load_sst_data(mon_smooth_name, "sst")
    mon_smooth_ref_tile = numpy.tile(mon_smooth_ref, [n_repeats,1,1])
    return mon_smooth_ref_tile
コード例 #11
0
def create_hadisst_long_term_timeseries(monthly=True, run_n=400):
    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010
    
    # create the long term trend timeseries from the monthly smoothed hadisst data
    if monthly:
        hadisst_smoothed_fname = get_HadISST_month_smooth_filename(histo_sy, hadisst_ey, run_n)
    else:
        hadisst_smoothed_fname = get_HadISST_smooth_fname(histo_sy, hadisst_ey, run_n)
    
    hadisst_sst = load_sst_data(hadisst_smoothed_fname, "sst")
    
    return hadisst_sst
コード例 #12
0
def plot_test_residuals(histo_sy, histo_ey, ref_start, ref_end, run_n):
    # load the yearly eofs and pcs
    yr_eof_fname = get_HadISST_residual_EOFs_fname(histo_sy, histo_ey, run_n)
    yr_eofs = load_sst_data(yr_eof_fname, "sst")
    yr_pcs_fname = get_HadISST_residual_PCs_fname(histo_sy, histo_ey, run_n)
    yr_pcs = load_data(yr_pcs_fname)
    
    # load the monthly eofs and pcs
    mn_eof_fname = get_HadISST_monthly_residual_EOFs_fname(histo_sy, histo_ey, run_n)
    mn_eofs = load_sst_data(mn_eof_fname, "sst")
    mn_pcs_fname = get_HadISST_monthly_residual_PCs_fname(histo_sy, histo_ey, run_n)
    mn_pcs = load_data(mn_pcs_fname)
    
    # load the smoothed hadisst data
#    smooth_fname  = get_HadISST_smooth_fname(histo_sy, histo_ey, run_n)
#    smooth_hadisst = load_sst_data(smooth_fname, "sst")
#    smooth_gmsst = calc_GMSST(smooth_hadisst)
#    smooth_gmsst = smooth_gmsst - numpy.mean(smooth_gmsst[1986-1899:2006-1899])
    
    # reconstruct the fields
    yr_resids = reconstruct_field(yr_pcs, yr_eofs, 20)
    mn_resids = reconstruct_field(mn_pcs, mn_eofs, 20)
        
    # calculate the gmsst
    yr_gmsst = calc_GMSST(yr_resids)
    mn_gmsst = calc_GMSST(mn_resids)
    
    # plot them
    yr_t = numpy.arange(1899,2011,1)
    mn_t = numpy.arange(1899,2011,1.0/12)
    
    sp = plt.subplot(111)
    sp.plot(yr_t, yr_gmsst, 'r', zorder=1)
    sp.plot(mn_t, mn_gmsst, 'k', zorder=0)
    sp.plot(yr_t, smooth_gmsst[:-1], 'b', lw=2.0)
    
    plt.savefig("hadisst_resids.pdf")
コード例 #13
0
def create_hadisst_long_term_timeseries(monthly=True, run_n=400):
    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010

    # create the long term trend timeseries from the monthly smoothed hadisst data
    if monthly:
        hadisst_smoothed_fname = get_HadISST_month_smooth_filename(
            histo_sy, hadisst_ey, run_n)
    else:
        hadisst_smoothed_fname = get_HadISST_smooth_fname(
            histo_sy, hadisst_ey, run_n)

    hadisst_sst = load_sst_data(hadisst_smoothed_fname, "sst")

    return hadisst_sst
コード例 #14
0
def create_hadisst_monthly_reference(run_type,
                                     ref_start,
                                     ref_end,
                                     n_repeats,
                                     run_n=400):
    # create the annual cycle from hadisst, repeating it a number of times
    # so as to add it onto the CMIP5 timeseries

    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010

    # load in the monthly smoothed reference
    mon_smooth_name = get_HadISST_monthly_reference_fname(
        histo_sy, hadisst_ey, ref_start, ref_end, run_n)
    mon_smooth_ref = load_sst_data(mon_smooth_name, "sst")
    mon_smooth_ref_tile = numpy.tile(mon_smooth_ref, [n_repeats, 1, 1])
    return mon_smooth_ref_tile
コード例 #15
0
def correct_cmip5_long_term_mean_timeseries(cmip5_ts, monthly=True, run_n=400):
    # correct the ensemble mean of the CMIP5 ensemble, this is achieved by subtracting the
    # difference between the 5 year mean of HadISST and the 5 year mean of the CMIP5 ensemble
    # mean for 2006->2010 (the overlap period) from the CMIP5 ensemble mean
    # need to load in the hadisst values to enable the correction
    # get the dates
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    hadisst_ey = 2010

    if monthly:
        hadisst_smoothed_fname = get_HadISST_month_smooth_filename(
            histo_sy, hadisst_ey, run_n)
    else:
        hadisst_smoothed_fname = get_HadISST_smooth_fname(
            histo_sy, hadisst_ey, run_n)
    hadisst_sst = load_sst_data(hadisst_smoothed_fname, "sst")

    # if monthly calculate the overlap indices in terms of months by multiplying by 12
    if monthly:
        # correct each month individually
        ovl_idx = (hadisst_ey - histo_sy) * 12  # start offset
        mon_correct = numpy.zeros([12, cmip5_ts.shape[1], cmip5_ts.shape[2]],
                                  'f')
        for m in range(0, 12):
            cmip5_ens_mean_monmean = numpy.mean(cmip5_ts[ovl_idx - 5 +
                                                         m:ovl_idx + m:12],
                                                axis=0)
            hadisst_monmean = numpy.mean(hadisst_sst[ovl_idx - 5 + m::12],
                                         axis=0)
            mon_correct[m] = hadisst_monmean - cmip5_ens_mean_monmean
        # tile and subtract from cmip5 timeseries
        n_repeats = cmip5_ts.shape[0] / 12
        cmip5_ens_mean_correction = numpy.tile(mon_correct, [n_repeats, 1, 1])
    else:
        ovl_idx = hadisst_ey - histo_sy
        cmip5_ens_mean_timmean = numpy.mean(cmip5_ts[ovl_idx - 5:ovl_idx],
                                            axis=0)
        hadisst_timmean = numpy.mean(hadisst_sst[ovl_idx - 5:], axis=0)
        cmip5_ens_mean_correction = hadisst_timmean - cmip5_ens_mean_timmean
    return cmip5_ts + cmip5_ens_mean_correction
コード例 #16
0
def create_cmip5_rcp_anomalies(run_type, ref_start, ref_end, eof_year, percentile, monthly=True):
    # create the time series of anomalies from the mean of the various 
    # samples in the CMIP5 ensemble
    # This spans the uncertainty of the GMT response to GHG forcing in CMIP5 

    if run_type == "likely":
        load_run_type = "rcp45"
    else:
        load_run_type = run_type

    # load the eof patterns in the eof_year
    eof_fname = get_cmip5_EOF_filename(load_run_type, ref_start, ref_end, eof_year, monthly=monthly)
    eofs = load_sst_data(eof_fname, "sst")
    
    # load the principle components for the eof_year
    syn_pc_fname  = get_syn_SST_PCs_filename(load_run_type, ref_start, ref_end, eof_year, monthly=monthly)
    syn_pc_fname_new = syn_pc_fname[:-3] + "_new.nc"
    syn_pc = load_data(syn_pc_fname_new, "sst")
    
    # load the timeseries of scalings and offsets to the pcs over the CMIP5 period
    proj_pc_scale_fname = get_cmip5_proj_PC_scale_filename(load_run_type, ref_start, ref_end, eof_year, monthly=monthly)
    proj_pc_scale  = load_data(proj_pc_scale_fname, "sst_scale")
    proj_pc_offset = load_data(proj_pc_scale_fname, "sst_offset")
    
    # corresponding weights that we supplied to the EOF function
    coslat = numpy.cos(numpy.deg2rad(numpy.arange(89.5, -90.5,-1.0))).clip(0., 1.)
    wgts = numpy.sqrt(coslat)[..., numpy.newaxis]

    # create the timeseries of reconstructed SSTs for just this sample
    # recreate the field - monthy by month if necessary
    if monthly:
        syn_sst_rcp = numpy.ma.zeros([proj_pc_scale.shape[0], eofs.shape[2], eofs.shape[3]], 'f')
        for m in range(0, 12):
            pc_ts = syn_pc[m,percentile,:neofs] * proj_pc_scale[m::12,:neofs] + proj_pc_offset[m::12,:neofs]
            syn_sst_rcp[m::12] = reconstruct_field(pc_ts, eofs[m], neofs, wgts)
    else:
        pc_ts = syn_pc[0,percentile,:neofs] * proj_pc_scale[:,:neofs] + proj_pc_offset[:,:neofs]
        syn_sst_rcp = reconstruct_field(pc_ts, eofs[0], neofs, wgts)
    return syn_sst_rcp
コード例 #17
0
def create_syn_SST_PCs(run_type,
                       ref_start,
                       ref_end,
                       eof_year,
                       neofs,
                       nsamps,
                       model_mean=False,
                       monthly=False):
    # load the PCs, EOFs for this year
    pcs_fname = get_cmip5_PC_filename(run_type, ref_start, ref_end, eof_year,
                                      model_mean, monthly)
    pcs = load_data(pcs_fname, "sst")
    eof_fname = get_cmip5_EOF_filename(run_type, ref_start, ref_end, eof_year,
                                       model_mean, monthly)
    eofs = load_data(eof_fname, "sst")

    # load the smoothed ensemble mean
    ens_mean_fname = get_concat_anom_sst_ens_mean_smooth_fname(
        run_type, ref_start, ref_end, monthly)
    ens_mean = load_sst_data(ens_mean_fname, "sst")
    # we only need one ensemble mean - calculate decadal mean
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    ens_mean = ens_mean[eof_year - histo_sy]

    # transform pc data to R compatible format
    pcs = pcs.byteswap().newbyteorder()
    # create the return storage
    select_PCs = numpy.zeros([pcs.shape[0], nsamps, neofs], 'f')
    # percentile ranges
    ptiles = [0.10, 0.25, 0.50, 0.75, 0.90]
    select_PCs = numpy.random.random(select_PCs.shape)

    # now loop through each month pcs - if yearly mean then there will only be one
    for m in range(0, pcs.shape[0]):
        # fit a copula to the principle components
        pc_mvdc = fit_mvdc(pcs[m], neofs)

        # generate a large sample of GMSSTs and their corresponding PCs
        sst_means_and_PCs = generate_large_sample_of_SSTs(
            pc_mvdc, eofs[m], ens_mean, neofs)

        # now sample the distribution to get nsamps number of PCs which
        # represent the distribution of GMSSTs
        select_PCs[m] = sample_SSTs(sst_means_and_PCs, neofs, nsamps, ptiles)

    # sort the pcs based on the first pc for each of the percentiles
    sorted_select_PCs = numpy.zeros([pcs.shape[0], nsamps, neofs], 'f')
    pts_per_pc = int(nsamps / len(ptiles))
    for m in range(0, pcs.shape[0]):
        for p in range(0, len(ptiles)):
            s = p * pts_per_pc
            e = (p + 1) * pts_per_pc
            # get the first pc for this ptile
            pc0 = select_PCs[m, s:e, 0]
            # sort it and get the indices
            pc0_sort = numpy.argsort(pc0)
            # sort all the pcs so that the corresponding pc0 is ascending
            for f in range(0, neofs):
                pc1 = select_PCs[m, s:e, f]
                sorted_select_PCs[m, s:e, f] = pc1[pc0_sort]

    # save
    out_fname = get_syn_SST_PCs_filename(run_type, ref_start, ref_end,
                                         eof_year, monthly)
    out_fname = out_fname[:-3] + "_new.nc"
    # fix the missing value meta data
    out_attrs = {"missing_value": 2e20}
    # save the selected PCAs
    save_pcs(out_fname, sorted_select_PCs, out_attrs)
    print out_fname
コード例 #18
0
def create_Ma_syn_SSTs(run_type, ref_start, ref_end, sy, ey, eof_year, neofs,
                       ptile, monthly):

    # determine which hadisst ensemble member to use
    hadisst_ens_members = [
        1059, 115, 1169, 1194, 1346, 137, 1466, 396, 400, 69
    ]
    run_n = hadisst_ens_members[numpy.random.randint(0,
                                                     len(hadisst_ens_members))]

    # load the CMIP5 ensemble mean timeseries
    # load the ensemble mean of the anomalies
    cmip5_ens_mean_anoms_fname = get_concat_anom_sst_ens_mean_smooth_fname(
        run_type, ref_start, ref_end, monthly)
    cmip5_ens_mean_anoms = load_sst_data(cmip5_ens_mean_anoms_fname, "sst")

    # load the eof patterns in the eof_year
    eof_fname = get_cmip5_EOF_filename(run_type,
                                       ref_start,
                                       ref_end,
                                       eof_year,
                                       monthly=True)
    eofs = load_sst_data(eof_fname, "sst")

    # load the principle components for the eof_year
    syn_pc_fname = get_Ma_syn_SST_PCs_filename(run_type,
                                               ref_start,
                                               ref_end,
                                               eof_year,
                                               ptile,
                                               monthly=True)
    syn_pc = load_data(syn_pc_fname, "sst")

    # load the timeseries of scalings and offsets to the pcs over the CMIP5 period
    proj_pc_scale_fname = get_cmip5_proj_PC_scale_filename(run_type,
                                                           ref_start,
                                                           ref_end,
                                                           eof_year,
                                                           monthly=True)
    proj_pc_scale = load_data(proj_pc_scale_fname, "sst_scale")
    proj_pc_offset = load_data(proj_pc_scale_fname, "sst_offset")

    # corresponding weights that we supplied to the EOF function
    coslat = numpy.cos(numpy.deg2rad(numpy.arange(89.5, -90.5,
                                                  -1.0))).clip(0., 1.)
    wgts = numpy.sqrt(coslat)[..., numpy.newaxis]

    # create the timeseries of reconstructed SSTs for just this sample
    # recreate the field - monthy by month
    # pattern number
    pn = 0

    nmons = 12
    # sub set the mean anomalies and the proj_pc_scale and offset
    cmip5_sy = 1899
    si = (sy - cmip5_sy) * 12
    ei = (ey - cmip5_sy) * 12
    cmip5_ens_mean_anoms = cmip5_ens_mean_anoms[si:ei]

    if ey == 2101:
        # create 2101
        S = cmip5_ens_mean_anoms.shape
        cmip5_ens_mean_anoms2 = numpy.zeros([S[0] + 12, S[1], S[2]], 'f')
        cmip5_ens_mean_anoms2[:S[0]] = cmip5_ens_mean_anoms
        cmip5_ens_mean_anoms2[-12:] = cmip5_ens_mean_anoms[-12:]
        cmip5_ens_mean_anoms = cmip5_ens_mean_anoms2

    proj_pc_scale = proj_pc_scale[si - 12:ei]
    proj_pc_offset = proj_pc_offset[si - 12:ei]
    syn_sst_rcp = numpy.ma.zeros(
        [proj_pc_scale.shape[0], eofs.shape[2], eofs.shape[3]], 'f')
    #
    for pn in range(0, 2):  # two patterns per percentile
        for m in range(0, nmons):
            pc_ts = syn_pc[m, pn, :neofs] * proj_pc_scale[
                m::12, :neofs] + proj_pc_offset[m::12, :neofs]
            syn_sst_rcp[m::12] = reconstruct_field(pc_ts, eofs[m], neofs, wgts)

        # load the hadisst reference
        n_repeats = cmip5_ens_mean_anoms.shape[
            0] / 12  # number of repeats = number of years
        hadisst_ac = create_hadisst_monthly_reference(run_type, ref_start,
                                                      ref_end, n_repeats,
                                                      run_n)
        # load the internal variability - we are only interested in the 30 year observed ones
        resid_fname = get_HadISST_monthly_residuals_fname(1899, 2010, 400)
        intvar = load_data(resid_fname, "sst")
        intvar = intvar[(1973 - 1899) * 12:(2007 - 1899) * 12]
        print "cmip5_ens_mean_anoms ", cmip5_ens_mean_anoms.shape
        print "syn_sst_rcp ", syn_sst_rcp.shape
        print "hadisst_ac ", hadisst_ac.shape
        print "intvar ", intvar.shape
        out_data = cmip5_ens_mean_anoms + syn_sst_rcp + hadisst_ac + intvar
        # save the synthetic ssts
        save_Ma_syn_SSTs(out_data, run_type, ref_start, ref_end, sy, ey, ptile,
                         pn)
コード例 #19
0
def create_Ma_syn_SST_PCs(run_type,
                          ref_start,
                          ref_end,
                          eof_year,
                          neofs,
                          ptile,
                          model_mean=False,
                          monthly=False):
    # load the PCs, EOFs for this year
    pcs_fname = get_cmip5_PC_filename(run_type, ref_start, ref_end, eof_year,
                                      model_mean, monthly)
    pcs = load_data(pcs_fname, "sst")
    eof_fname = get_cmip5_EOF_filename(run_type, ref_start, ref_end, eof_year,
                                       model_mean, monthly)
    eofs = load_data(eof_fname, "sst")

    # load the smoothed ensemble mean
    ens_mean_fname = get_concat_anom_sst_ens_mean_smooth_fname(
        run_type, ref_start, ref_end, monthly)
    ens_mean = load_sst_data(ens_mean_fname, "sst")
    # we only need one ensemble mean - calculate decadal mean
    histo_sy, histo_ey, rcp_sy, rcp_ey = get_start_end_periods()
    ens_mean = ens_mean[eof_year - histo_sy]

    # transform pc data to R compatible format
    pcs = pcs.byteswap().newbyteorder()
    nsamps = 100
    nmons = pcs.shape[0]
    # create the return storage
    select_PCs = numpy.zeros([pcs.shape[0], nsamps, neofs + 2], 'f')

    # now loop through each month pcs - if yearly mean then there will only be one
    for m in range(0, nmons):
        # fit a copula to the principle components
        pc_mvdc = fit_mvdc(pcs[m], neofs)

        # generate a large sample of GMSSTs and their corresponding PCs
        sst_means_and_PCs = generate_Ma_large_sample_of_SSTs(
            pc_mvdc, eofs[m], ens_mean, neofs)

        # now sample the distribution to get nsamps number of PCs which
        # represent the distribution of GMSSTs
        select_PCs[m] = sample_Ma_SSTs(sst_means_and_PCs, neofs, nsamps, ptile)

    # sort the pcs based on the first pc for each of the percentiles
    sorted_select_PCs = numpy.zeros([nmons, 2, neofs], 'f')
    for m in range(0, nmons):
        # get the NA indices for this month
        na_idxs = select_PCs[m, :, 1]
        # sort it and get the indices
        na_idxs_sort = numpy.argsort(na_idxs)
        # get the first and last in the list sorted by NA indices
        # - i.e. where the North Atlantic index is the most different
        # we just want the PCs now
        for e in range(0, neofs):
            sorted_select_PCs[m, 0, e] = select_PCs[m, :,
                                                    2 + e][na_idxs_sort[0]]
            sorted_select_PCs[m, 1, e] = select_PCs[m, :,
                                                    2 + e][na_idxs_sort[-1]]

    # we now have two sets of PCs - one at each end of the distribution of NA SST gradient for the desired percentile
    # save
    out_fname = get_Ma_syn_SST_PCs_filename(run_type, ref_start, ref_end,
                                            eof_year, ptile, monthly)
    # fix the missing value meta data
    out_attrs = {"missing_value": 2e20}
    # save the selected PCAs
    save_pcs(out_fname, sorted_select_PCs, out_attrs)
    print out_fname
コード例 #20
0
def create_Ma_syn_SSTs(run_type, ref_start, ref_end, sy, ey, eof_year, neofs, ptile, monthly):

    # determine which hadisst ensemble member to use
    hadisst_ens_members = [1059, 115, 1169, 1194, 1346, 137, 1466, 396, 400, 69]
    run_n = hadisst_ens_members[numpy.random.randint(0, len(hadisst_ens_members))]

    # load the CMIP5 ensemble mean timeseries
    # load the ensemble mean of the anomalies
    cmip5_ens_mean_anoms_fname = get_concat_anom_sst_ens_mean_smooth_fname(run_type, ref_start, ref_end, monthly)
    cmip5_ens_mean_anoms = load_sst_data(cmip5_ens_mean_anoms_fname, "sst")

    # load the eof patterns in the eof_year
    eof_fname = get_cmip5_EOF_filename(run_type, ref_start, ref_end, eof_year, monthly=True)
    eofs = load_sst_data(eof_fname, "sst")
    
    # load the principle components for the eof_year
    syn_pc_fname  = get_Ma_syn_SST_PCs_filename(run_type, ref_start, ref_end, eof_year, ptile, monthly=True)
    syn_pc = load_data(syn_pc_fname, "sst")
    
    # load the timeseries of scalings and offsets to the pcs over the CMIP5 period
    proj_pc_scale_fname = get_cmip5_proj_PC_scale_filename(run_type, ref_start, ref_end, eof_year, monthly=True)
    proj_pc_scale  = load_data(proj_pc_scale_fname, "sst_scale")
    proj_pc_offset = load_data(proj_pc_scale_fname, "sst_offset")
    
    # corresponding weights that we supplied to the EOF function
    coslat = numpy.cos(numpy.deg2rad(numpy.arange(89.5, -90.5,-1.0))).clip(0., 1.)
    wgts = numpy.sqrt(coslat)[..., numpy.newaxis]

    # create the timeseries of reconstructed SSTs for just this sample
    # recreate the field - monthy by month
    # pattern number
    pn = 0

    nmons=12
    # sub set the mean anomalies and the proj_pc_scale and offset
    cmip5_sy = 1899
    si = (sy-cmip5_sy)*12
    ei = (ey-cmip5_sy)*12
    cmip5_ens_mean_anoms = cmip5_ens_mean_anoms[si:ei]

    if ey == 2101:
        # create 2101
        S = cmip5_ens_mean_anoms.shape
        cmip5_ens_mean_anoms2 = numpy.zeros([S[0]+12, S[1], S[2]], 'f')
        cmip5_ens_mean_anoms2[:S[0]] = cmip5_ens_mean_anoms
        cmip5_ens_mean_anoms2[-12:] = cmip5_ens_mean_anoms[-12:]
        cmip5_ens_mean_anoms = cmip5_ens_mean_anoms2

    proj_pc_scale = proj_pc_scale[si-12:ei]
    proj_pc_offset = proj_pc_offset[si-12:ei]
    syn_sst_rcp = numpy.ma.zeros([proj_pc_scale.shape[0], eofs.shape[2], eofs.shape[3]], 'f')
    #
    for pn in range(0, 2):  # two patterns per percentile
        for m in range(0, nmons):
            pc_ts = syn_pc[m,pn,:neofs] * proj_pc_scale[m::12,:neofs] + proj_pc_offset[m::12,:neofs]
            syn_sst_rcp[m::12] = reconstruct_field(pc_ts, eofs[m], neofs, wgts)

        # load the hadisst reference
        n_repeats = cmip5_ens_mean_anoms.shape[0] / 12       # number of repeats = number of years
        hadisst_ac = create_hadisst_monthly_reference(run_type, ref_start, ref_end, n_repeats, run_n)
        # load the internal variability - we are only interested in the 30 year observed ones
        resid_fname = get_HadISST_monthly_residuals_fname(1899, 2010, 400)
        intvar = load_data(resid_fname, "sst")
        intvar = intvar[(1973-1899)*12:(2007-1899)*12]
        print "cmip5_ens_mean_anoms ", cmip5_ens_mean_anoms.shape
        print "syn_sst_rcp ", syn_sst_rcp.shape
        print "hadisst_ac ", hadisst_ac.shape
        print "intvar ", intvar.shape
        out_data = cmip5_ens_mean_anoms + syn_sst_rcp + hadisst_ac + intvar
        # save the synthetic ssts
        save_Ma_syn_SSTs(out_data, run_type, ref_start, ref_end, sy, ey, ptile, pn)
コード例 #21
0
def create_cmip5_rcp_anomalies(run_type,
                               ref_start,
                               ref_end,
                               eof_year,
                               percentile,
                               monthly=True):
    # create the time series of anomalies from the mean of the various
    # samples in the CMIP5 ensemble
    # This spans the uncertainty of the GMT response to GHG forcing in CMIP5

    if run_type == "likely":
        load_run_type = "rcp45"
    else:
        load_run_type = run_type

    # load the eof patterns in the eof_year
    eof_fname = get_cmip5_EOF_filename(load_run_type,
                                       ref_start,
                                       ref_end,
                                       eof_year,
                                       monthly=monthly)
    eofs = load_sst_data(eof_fname, "sst")

    # load the principle components for the eof_year
    syn_pc_fname = get_syn_SST_PCs_filename(load_run_type,
                                            ref_start,
                                            ref_end,
                                            eof_year,
                                            monthly=monthly)
    syn_pc_fname_new = syn_pc_fname[:-3] + "_new.nc"
    syn_pc = load_data(syn_pc_fname_new, "sst")

    # load the timeseries of scalings and offsets to the pcs over the CMIP5 period
    proj_pc_scale_fname = get_cmip5_proj_PC_scale_filename(load_run_type,
                                                           ref_start,
                                                           ref_end,
                                                           eof_year,
                                                           monthly=monthly)
    proj_pc_scale = load_data(proj_pc_scale_fname, "sst_scale")
    proj_pc_offset = load_data(proj_pc_scale_fname, "sst_offset")

    # corresponding weights that we supplied to the EOF function
    coslat = numpy.cos(numpy.deg2rad(numpy.arange(89.5, -90.5,
                                                  -1.0))).clip(0., 1.)
    wgts = numpy.sqrt(coslat)[..., numpy.newaxis]

    # create the timeseries of reconstructed SSTs for just this sample
    # recreate the field - monthy by month if necessary
    if monthly:
        syn_sst_rcp = numpy.ma.zeros(
            [proj_pc_scale.shape[0], eofs.shape[2], eofs.shape[3]], 'f')
        for m in range(0, 12):
            pc_ts = syn_pc[m, percentile, :neofs] * proj_pc_scale[
                m::12, :neofs] + proj_pc_offset[m::12, :neofs]
            syn_sst_rcp[m::12] = reconstruct_field(pc_ts, eofs[m], neofs, wgts)
    else:
        pc_ts = syn_pc[
            0, percentile, :
            neofs] * proj_pc_scale[:, :neofs] + proj_pc_offset[:, :neofs]
        syn_sst_rcp = reconstruct_field(pc_ts, eofs[0], neofs, wgts)
    return syn_sst_rcp