Пример #1
0
 def __init__(self, dataset):
     f = cdms.open("DATA/OBS/PROCESSED/" + dataset + ".nc")
     self.data = {}
     obs_w = f("pr_W")
     self.data["west"] = obs_w
     stop_time = cmip5.stop_time(obs_w)
     if stop_time.month != 12:
         stop_time = cdtime.comptime(stop_time.year - 1, 12, 31)
     start_time = cmip5.start_time(obs_w)
     if start_time.month != 1:
         start_time = cdtime.comptime(start_time.year + 1, 1, 1)
     obs_w = obs_w(time=(start_time, stop_time))
     obs_w = fp.by_month(obs_w)
     obs_e = f("pr_CE")
     self.data["east"] = obs_e
     stop_time = cmip5.stop_time(obs_e)
     if stop_time.month != 12:
         stop_time = cdtime.comptime(stop_time.year - 1, 12, 31)
     start_time = cmip5.start_time(obs_e)
     if start_time.month != 1:
         start_time = cdtime.comptime(start_time.year + 1, 1, 1)
     obs_e = obs_e(time=(start_time, stop_time))
     obs_e = fp.by_month(obs_e)
     self.reshaped = {}
     self.reshaped["east"] = obs_e - MV.average(obs_e, axis=0)
     self.reshaped["west"] = obs_w - MV.average(obs_w, axis=0)
     self.reshaped["multi"] = [self.reshaped["west"], self.reshaped["east"]]
     self.dataset = dataset
Пример #2
0
def standardize_soilmoisture(dataset):
    f = cdms.open("../DROUGHT_ATLAS/PROCESSED/ALL_ANZDA.nc")
    obs = f("pdsi")
    obs = MV.masked_where(np.isnan(obs), obs)
    obs = MV.masked_where(np.abs(obs) > 90, obs)
    obs = obs(time=('1921-1-1', '2000-12-31'))
    pdsi = mask_data(
        obs, obs.mask[0]
    )  #Make all the obs have the same mask as the first datapoint
    f.close()
    mu_p = MV.average(pdsi, axis=0)
    sig_p = genutil.statistics.std(pdsi, axis=0)

    f = cdms.open("../DROUGHT_ATLAS/OBSERVATIONS/" + dataset +
                  "_soilmoisture_summerseason.nc")
    gleam30cm = f("smsurf")
    mask = pdsi[0].mask
    gleam30cmmask = b.mask_data(gleam30cm, mask)
    mu_s = MV.average(gleam30cmmask, axis=0)
    sig_s = genutil.statistics.std(gleam30cmmask, axis=0)

    surf = (gleam30cmmask - mu_s + mu_p) * (sig_p / sig_s)

    gleam2m = f("smroot")
    mask = pdsi[0].mask
    gleam2mmask = b.mask_data(gleam2m, mask)
    mu_s2 = MV.average(gleam2mmask, axis=0)
    sig_s2 = genutil.statistics.std(gleam2mmask, axis=0)
    root = (gleam2mmask - mu_s2 + mu_p) * (sig_p / sig_s)
    return surf, root
Пример #3
0
def scratch(good):
    #TO DO:
    # mask OWDA, NADA, MADA regions


    f=cdms.open("../DROUGHT_ATLAS/PROCESSED/OWDA.nc")
    owda=f("pdsi")
    f.close()
    f = cdms.open("../DROUGHT_ATLAS/CMIP5/pdsi.ensemble.hist.rcp85.nc")
    h85=f("pdsi")
    good = get_rid_of_bad(h85)
    

    owda_region = cdutil.region.domain(latitude=(np.min(owda.getLatitude()[:]),np.max(owda.getLatitude()[:])),longitude= (np.min(owda.getLongitude()[:]),np.max(owda.getLongitude()[:])))
    
    ow = MV.average(good(owda_region),axis=0)
    
    owsolver = Eof(MV.average(sc.mask_data(good(owda_region),owda_regrid[-1].mask),axis=0))
    ow_fingerprint = owsolver.eofs()[0]
    
    owda_regrid = owda.regrid(ow.getGrid(),regridTool='regrid2') 
    owda_regrid_mask=sc.mask_data(owda_regrid,ow_fingerprint.mask)


    
    
    nada_region = cdutil.region.domain(latitude=(np.min(nada.getLatitude()[:]),np.max(nada.getLatitude()[:])),longitude= (np.min(nada.getLongitude()[:]),np.max(nada.getLongitude()[:])))



    nasolver = Eof(MV.average(good(nada_region),axis=0))
    na_fingerprint = nasolver.eofs()[0] 

    nada_regrid = nada.regrid(na_fingerprint.getGrid(),regridTool='regrid2') 
    nada_regrid_mask=sc.mask_data(nada_regrid,na_fingerprint.mask)
Пример #4
0
def average_engine(x, wts):
    """
    The work horse that does the averaging! This is specific to averageing.
    We will always be dealing with the first dimension.....and x is already in
    the order we will average.
    1-d wts are always numpy arrays or 'equal' Multi-dimensional arrays are
    always MV2's
    """
    #
    __DEBUG__ = 0
    #
    if x is None: return None
    if wts is None: return None
    #
    shx = numpy.ma.shape(x)
    if __DEBUG__: print '\tInside average_engine.'
    if __DEBUG__: print '\tIncoming data of shape ', shx
    #
    if MV2.isMaskedVariable(wts) or isinstance(wts, numpy.ndarray):
        y, return_wts = MV2.average(x, weights=wts, returned=1, axis=0)
        return y, return_wts
    elif wts in ['equal','unweighted']:
        y, return_wts = MV2.average(x, returned=1, axis=0)
        return y, return_wts
    else:
        raise AveragerError, 'wts is an unknown type in average_engine'
        return None
Пример #5
0
def average_engine(x, wts):
    """
    The work horse that does the averaging! This is specific to averageing.
    We will always be dealing with the first dimension.....and x is already in
    the order we will average.
    1-d wts are always numpy arrays or 'equal' Multi-dimensional arrays are
    always MV2's
    """
    #
    __DEBUG__ = 0
    #
    if x is None: return None
    if wts is None: return None
    #
    shx = numpy.ma.shape(x)
    if __DEBUG__: print '\tInside average_engine.'
    if __DEBUG__: print '\tIncoming data of shape ', shx
    #
    if MV2.isMaskedVariable(wts) or isinstance(wts, numpy.ndarray):
        y, return_wts = MV2.average(x, weights=wts, returned=1, axis=0)
        return y, return_wts
    elif wts in ['equal', 'unweighted']:
        y, return_wts = MV2.average(x, returned=1, axis=0)
        return y, return_wts
    else:
        raise AveragerError, 'wts is an unknown type in average_engine'
        return None
Пример #6
0
def plot_eastwest(X):
    if len(X.reshaped["west"].shape) > 2:
        data = [
            MV.average(cmip5.ensemble2multimodel(X.reshaped["west"]), axis=0),
            MV.average(cmip5.ensemble2multimodel(X.reshaped["east"]), axis=0)
        ]
    else:
        data = [X.reshaped["west"], X.reshaped["east"]]
    solver = MultivariateEof(data)
    weofs, eeofs = solver.eofs()
    westsolver = weofs[0]
    eastsolver = eeofs[0]
    fac = da.get_orientation(solver)

    plt.subplot(211)
    months = [
        "JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT",
        "NOV", "DEC"
    ]
    plt.plot(fac * westsolver.asma(), label="WEST")
    plt.plot(eastsolver.asma() * fac, label="EAST")
    plt.xticks(np.arange(12), months)
    plt.legend()
    plt.subplot(212)
    time_plot(fac * solver.pcs()[:, 0], label="WEST")
Пример #7
0
    def __init__(self,data,proj="moll",typ="clim",fix_colorbar = True,**kwargs):
       # matplotlib.rcParams["backend"]="TkAgg"

        if data.id.find("pr")==0:
            lab = "mm/day/decade"
        else:
            lab = "K/decade"
        if len(data.shape) == 4:
            self.avg = MV.average(data,axis=0)
            self.data = data
        else:
            self.avg = data
            self.data = MV.array(data.asma()[np.newaxis])
            for i in range(3):
                self.data.setAxis(i+1,data.getAxis(i))
        
    
        if typ == "slopes":
            self.plotdata= genutil.statistics.linearregression(self.avg,nointercept=1)*3650.
        elif typ == "clim":
            self.plotdata = MV.average(self.avg,axis=0)
        elif typ == "eof":
            eofdata = cdms_clone(self.avg.anom(axis=0),self.avg)
            solver = Eof(eofdata)
            fac = get_orientation(solver)
            self.plotdata = solver.eofs()[0]*fac
        
        if fix_colorbar:
            a = max([np.abs(np.min(self.plotdata)),np.abs(np.max(self.plotdata))])
            vmin = -a
            vmax = a
        else:
            vmin=None
            vmax = None
       
        self.m = bmap(self.plotdata,alpha=1,projection=proj,vmin=vmin,vmax=vmax)
        self.m.drawcoastlines()
        plt.set_cmap(cm.RdBu_r)
        cbar=plt.colorbar(orientation="horizontal")
        cbar.set_label(lab)
        self.fig = plt.gcf()
        self.ax = plt.gca()
        self.lat = data.getLatitude()
        self.latbounds = data.getLatitude().getBounds()
        self.lon = data.getLongitude()
        self.cid = self.fig.canvas.mpl_connect('button_press_event',self.onclick)
        self.cid2 = self.fig.canvas.mpl_connect("key_press_event",self.onpress)
        self.key = "o"
        self.stars = []
        self.figs=[]
        self.xlim = self.ax.get_xlim()
        self.ylim = self.ax.get_ylim()
Пример #8
0
    def temporal_mean(self):
        """ Moyennes temporelles pour les champs modelises et observes """
        import MV2, cdutil
        import numpy as np

        # --------- !!!!!!!  -------- Refelchir a Mettre plutot des return pour les fonctions ------- !!!!!!! ---------
        if np.ndim(self.model) < 3 :
            self.model.temp_mean = self.model
            self.obs.temp_mean = self.obs
        else :
            # Calcul de la moyenne temporelle ... Resultat => Map
            self.model.temp_mean = MV2.average(self.model, axis=0)
            self.obs.temp_mean = MV2.average(self.obs, axis=0)
Пример #9
0
 def testAverage(self):
     xav = MV2.average(self.ones, axis=1)
     self.assertTrue(MV2.allequal(xav, 1))
     xav2 = MV2.average(self.u_file)
     xav3 = MV2.average(self.u_transient)
     xav4, wav4 = MV2.average(
         self.u_transient, weights=MV2.ones(
             self.u_transient.shape, numpy.float), returned=1)
     a = MV2.arange(5)
     b = 2 ** a
     av, wav = MV2.average(b, weights=a, returned=1)
     self.assertEqual(av, 9.8)
     self.assertEqual(wav, 10)
Пример #10
0
    def temporal_mean(self):
        """ Moyennes temporelles pour les champs modelises et observes """
        import MV2, cdutil
        import numpy as np

        # --------- !!!!!!!  -------- Refelchir a Mettre plutot des return pour les fonctions ------- !!!!!!! ---------
        if np.ndim(self.model) < 3:
            self.model.temp_mean = self.model
            self.obs.temp_mean = self.obs
        else:
            # Calcul de la moyenne temporelle ... Resultat => Map
            self.model.temp_mean = MV2.average(self.model, axis=0)
            self.obs.temp_mean = MV2.average(self.obs, axis=0)
Пример #11
0
    def testCrosSectionRegrid(self):
        fmod = self.getDataFile(
            "20160520.A_WCYCL1850.ne30_oEC.edison.alpha6_01_ANN_climo_Q.nc")
        fobs = self.getDataFile("MERRA_ANN_climo_SHUM.nc")
        var1 = fmod('Q')
        var2 = fobs('SHUM')

        mv1 = MV2.average(var1, axis=-1)
        mv2 = MV2.average(var2, axis=-1)
        mv1_reg = mv1
        lev_out = mv1.getLevel()
        lat_out = mv1.getLatitude()
        mv2_reg = mv2.crossSectionRegrid(lev_out, lat_out)
        self.assertTrue(numpy.ma.is_masked(mv2_reg[:, :, -1].all()))
Пример #12
0
def sum_engine(x, wts):
    """
    The work horse that does the summing ! This is specific to summing.
    We will always be dealing with the first dimension.....and x is already in
    the order we will sum.
    1-d wts are always numpy arrays or 'equal' Multi-dimensional arrays are
    always MV2's
    
    Inputs:
        x   : the input array (can be MV2 or MA)
        wts : the input weight array (numpy array or MV2 or 'equal')
        
    Returned:
        y          : The weighted sum (summed over the first dimension)
        return_wts : The sum of weights (summed over the first dimension)
    """
    #
    __DEBUG__ = 0
    #
    if x is None: return None
    if wts is None: return None
    #
    shx = numpy.ma.shape(x)
    #
    if __DEBUG__: print '\tInside sum_engine.'
    if __DEBUG__: print '\tIncoming data of shape ', shx
    #
    if MV2.isMaskedVariable(wts) or isinstance(wts, numpy.ndarray):
        #
        # wts is an MV2 or numpy array
        #
        if __DEBUG__:
            print '\t********** Weight is an MV2 or numpy array! **********'
        #
        xavg, return_wts = MV2.average(x, weights=wts, returned=1, axis=0)
        y = xavg * return_wts
        return y, return_wts
    elif wts in ['equal', 'unweighted']:
        #
        # Equal weights
        #
        if __DEBUG__: print '\t********** Weight is Equal! **********'
        xavg, return_wts = MV2.average(x, returned=1, axis=0)
        y = xavg * return_wts
        return y, return_wts
        # end of if action == 'sum':
    else:
        raise AveragerError, 'wts is an unknown type in sum_engine'
        return None
Пример #13
0
def plot_model_projections(fingerprint, h85):
    Pwest = model_projections(fingerprint, h85, "west")
    time_plot(MV.average(Pwest, axis=0),
              color=get_colors("west"),
              label="WEST")

    Peast = model_projections(fingerprint, h85, "east")
    time_plot(MV.average(Peast, axis=0),
              color=get_colors("east"),
              label="EAST")

    Pmulti = model_projections(fingerprint, h85, "multi")
    time_plot(MV.average(Pmulti, axis=0),
              color=get_colors("multi"),
              label="MULTI")
Пример #14
0
def sum_engine(x, wts):
    """
    The work horse that does the summing ! This is specific to summing.
    We will always be dealing with the first dimension.....and x is already in
    the order we will sum.
    1-d wts are always numpy arrays or 'equal' Multi-dimensional arrays are
    always MV2's
    
    Inputs:
        x   : the input array (can be MV2 or MA)
        wts : the input weight array (numpy array or MV2 or 'equal')
        
    Returned:
        y          : The weighted sum (summed over the first dimension)
        return_wts : The sum of weights (summed over the first dimension)
    """
    #
    __DEBUG__ = 0
    #
    if x is None: return None
    if wts is None: return None
    #
    shx = numpy.ma.shape(x)
    #
    if __DEBUG__: print '\tInside sum_engine.'
    if __DEBUG__: print '\tIncoming data of shape ', shx
    #
    if MV2.isMaskedVariable(wts) or isinstance(wts, numpy.ndarray):
        #
        # wts is an MV2 or numpy array 
        #
        if __DEBUG__: print '\t********** Weight is an MV2 or numpy array! **********'
        #
        xavg, return_wts = MV2.average(x, weights=wts, returned=1, axis=0)
        y = xavg * return_wts
        return y, return_wts
    elif wts in ['equal','unweighted']:
        #
        # Equal weights
        #
        if __DEBUG__: print '\t********** Weight is Equal! **********'
        xavg, return_wts = MV2.average(x, returned=1, axis=0)
        y = xavg * return_wts
        return y, return_wts
        # end of if action == 'sum':
    else:
        raise AveragerError, 'wts is an unknown type in sum_engine'
        return None
Пример #15
0
def get_ohc(forcing,
            average=True,
            prefix="/Users/kmarvel/Google Drive/ECS/OHC_DATA/"):
    """ read in the ocean heat content"""
    direc = prefix + forcing + "/"
    files = sorted(glob.glob(direc + "*.nc"))
    L = len(files)
    f = cdms.open(files[0])
    ohc_raw = f("ohc")
    tax = ohc_raw.getTime()
    ohc_sum = MV.sum(ohc_raw, axis=1)
    ohc_final = (ohc_sum) * 1.e-22  #Convert to 1e22 Joules
    nt = len(ohc_final)
    MMA = MV.zeros((L, nt))
    MMA[0] = ohc_final
    f.close()

    #Loop over all files in directory (ensemble members)
    for i in range(L)[1:]:
        f = cdms.open(files[i])
        ohc_raw = f("ohc")
        ohc_sum = MV.sum(ohc_raw, axis=1)
        ohc_final = (ohc_sum) * 1.e-22
        MMA[i] = ohc_final
        f.close()

    #Return multilodel average, or not
    if average:
        MMA = MV.average(MMA, axis=0)
        MMA.setAxis(0, tax)
    else:
        MMA.setAxis(1, tax)
    return MMA
Пример #16
0
def get_tas(forcing, average=True):
    """ Read in pre-computed annual-average, global-average surface air temperatures (NOT anomalies)"""
    if forcing == 'GHG':
        forcing = 'historicalGHG'
    model = "GISS-E2-R"
    p = "*p1*"
    direc = "/Users/kmarvel/Google Drive/HistoricalMisc/GLOBAL_MEAN/" + forcing + "/"
    files = get_ensemble(direc, model, search_string="*p1*YEAR*")
    f = cdms.open(files[0])
    c = 0
    f = cdms.open(files[0])
    start = f["tas"].getTime().asComponentTime()[0]
    stop = f["tas"].getTime().asComponentTime()[-1]
    tax = f("tas").getTime()
    L = len(f("tas", time=(start, stop)))
    tas = MV.zeros((len(files), L))
    f.close()
    for fil in files:
        f = cdms.open(fil)
        tas[c] = f("tas", time=(start, stop))

        c += 1
    if average:
        temperatures = MV.average(tas, axis=0)

        temperatures.setAxis(0, tax)
    else:
        temperatures = tas
        temperatures.setAxis(1, tax)
    return temperatures
Пример #17
0
    def onclick(self, event):
        if not event.inaxes:
            return
        #if True:
        x = event.xdata
        y = event.ydata

        lon, lat = self.m(x, y, inverse=True)
        if self.m.lonmin > -100.:
            if lon < 0:
                lon = 360 + lon

        xy = (lat, lon)
        t = get_plottable_time(self.data)
        i, j = find_ij(self.data, xy)

        X, Y = self.m(self.lon[j], self.lat[i])
        self.stars += [self.m.plot(X, Y, "y*", markersize=15)[0]]
        f2 = plt.figure()
        self.figs += [f2]

        ax2 = f2.add_subplot(111)

        plt.draw()
        for mod in range(self.data.shape[0]):

            ax2.plot(t, self.data[mod, :, i, j].asma(), color=cm.gray(.5))
        ax2.plot(t, MV.average(self.data, axis=0)[:, i, j].asma(), color="k")
        ax2.set_title("(" + str(self.lat[i]) + "," + str(self.lon[j]) + ")")
        plt.draw()
Пример #18
0
def get_crossing_time(region, variable, scenario, month):
    vcert = stats.norm.interval(.99)[1]
    mfile = get_file(region, variable, scenario, month)
    if mfile is None:
        crossing_time = None

    else:
        f = cdms.open(mfile)
        data = f(variable + "_SN")
        avg = MV.average(data, axis=0)
        threshexceed = np.where(np.abs(avg) > vcert)[0]
        #If it never exceeds the threshold, return None
        if len(threshexceed) == 0:
            return (None)
        #If it hasn't exceeded the threshold by the last time step, return None
        if len(avg) - 1 not in threshexceed:
            return (None)
        if len(np.where(np.diff(threshexceed) > 1)[0]) > 0:
            isnot1 = np.max(np.where(np.diff(threshexceed) > 1)[0]) + 1
        else:
            isnot1 = 0
        staysabove = int(threshexceed[isnot1])
        crossing_time = int(cmip5.get_plottable_time(data)[staysabove])
        f.close()
    return (crossing_time)
Пример #19
0
def dictionary_ensemble_average(d, grid=None):
    if grid is None:
        shape = 1.e20
        if grid is None:
            for m in d.keys():
                gridsize = d[m].shape[-1] * d[m].shape[-2]
                if gridsize < shape:
                    shape = gridsize
                    themodel = m
        coarsest_grid = d[themodel].getGrid()
        allstop = str(np.min([cmip5.stop_time(d[m]).year
                              for m in d.keys()])) + "-12-31"
        allstart = str(np.max([cmip5.start_time(d[m]).year
                               for m in d.keys()])) + "-1-11"
        standardize = lambda data: data(time=(allstart, allstop)).regrid(
            coarsest_grid, regridTool='regrid2')
        counter = 0
        goodmodels = list(d)
        L = len(goodmodels)

        for m in d.keys():
            modeldata = standardize(MV.average(
                d[m], axis=0))  # average over individual ensemble members
            if counter == 0:
                MME = MV.zeros((L, ) + modeldata.shape)
            MME[counter] = modeldata
            counter += 1
    modax = cmip5.make_model_axis(list(d))
    axlist = [modax] + modeldata.getAxisList()

    MME.setAxisList(axlist)
    cdutil.setTimeBoundsMonthly(MME)
    #MME.id=variable
    return MME
Пример #20
0
def truncated_solver(D,the_start=None,the_stop=None,include_cru=False,include_dai=False):
   
    thedata = D.ALL.model(time=(the_start,the_stop))
    the_mma=MV.average(cmip5.ensemble2multimodel(thedata),axis=0)
    if include_cru:
        f = cdms.open("../DROUGHT_ATLAS/OBSERVATIONS/CRU_selfcalibrated.nc")
        cru_jja=f("pdsi")
        f.close()
        cru_jja_mask = mask_data(cru_jja,D.ALL.obs[0].mask)(time=(the_start,'2018-12-31'))
        newmask = np.prod(~cru_jja_mask.mask,axis=0)
        cru_jja_mask = mask_data(cru_jja_mask,newmask==0)
        
        thesolver = Eof(mask_data(D.ALL.mma(time=(the_start,the_stop)),newmask==0),weights='area')
    elif include_dai:
        f = cdms.open("../DROUGHT_ATLAS/OBSERVATIONS/DAI_selfcalibrated.nc")
        dai_jja=f("pdsi")
        f.close()
        dai_jja_mask = mask_data(dai_jja,D.ALL.obs[0].mask)(time=(the_start,'2018-12-31'))
        newmask = np.prod(~dai_jja_mask.mask,axis=0)
        dai_jja_mask = mask_data(dai_jja_mask,newmask==0)
        
        thesolver = Eof(mask_data(D.ALL.mma(time=(the_start,the_stop)),newmask==0),weights='area')
    else:
        thesolver = Eof(the_mma,weights="area")
    return thesolver
Пример #21
0
def soilmoisture_fingerprints(mask,name=None):
    depths = ["30cm","2m","pdsi"]
    letters = ["(a): ","(b): ","(c): "]
    pcs = []
    pclabels = []
    for depth in depths:
        i=depths.index(depth)
        plt.subplot(2,2,i+1)
        sm = soilmoisture(depth,mask=mask)
        solver = Eof(MV.average(sm,axis=0))
        fac = da.get_orientation(solver)
        if name is None:
            m=landplot(fac*solver.eofs()[0],vmin=-.1,vmax=.1)
            plt.colorbar(orientation='horizontal',label='EOF loading')
        else:
            m=plot_regional(fac*solver.eofs()[0],name,vmin=-.1,vmax=.1)
            m.drawcountries()
        m.drawcoastlines(color='gray')
        
        
        plt.title(letters[i]+depth+" fingerprint")
        pcs+=[fac*solver.pcs()[:,0]]

    
    plt.subplot(2,2,4)
    for i in range(3):
        time_plot(pcs[i],label=depths[i])
    plt.legend(loc=0)
    plt.title("(d): Principal Components")
    plt.xlabel("Time")
    plt.ylabel("Temporal amplitude")
Пример #22
0
def compare_pre_post_1100_noise(X,L=31,latbounds=None):
    time1=('1100-1-1','1399-12-31')
    c1=cm.Purples(.8)
    time2=('1400-1-1','2005-12-31')
    if latbounds is not None:
        obs=X.obs(latitude=latbounds)
        mma = MV.average(X.model(latitude=latbounds),axis=0)
        mma = mask_data(mma,obs[0].mask)
        solver = Eof(mma)
        obs = mask_data(obs,solver.eofs()[0].mask)
        truncnoise=solver.projectField(obs)[:,0]*da.get_orientation(solver)
        noisy1=truncnoise(time=time1)
        noisy2=truncnoise(time=time2)
    else:
        noisy1=X.noise(time=time1)
        noisy2=X.noise(time=time2)
    c2=cm.viridis(.1)
    plt.subplot(121)
    Plotting.Plotting.time_plot(noisy1,c=c1)
    Plotting.Plotting.time_plot(noisy2,c=c2)
    plt.ylabel("Projection")
    plt.title("(a): Noise time series")
    plt.subplot(122)
   
    plt.hist(b.bootstrap_slopes(noisy1,L),color=c1,normed=True,alpha=.5)
    da.fit_normals_to_data(b.bootstrap_slopes(noisy1,L),c=c1,label="1100-1400")
    plt.hist(b.bootstrap_slopes(noisy2,L),color=c2,normed=True,alpha=.5)
    da.fit_normals_to_data(b.bootstrap_slopes(noisy2,L),c=c2,label="1400-2005")
    plt.legend()
    plt.title("(b): 31-year trend distributions")
    return np.std(b.bootstrap_slopes(noisy1,L)),np.std(b.bootstrap_slopes(noisy2,L))
Пример #23
0
def pad_by_10(X, year1, year2):
    """
    Pad an array at the beginning with an artificial 10 year spinup, in which each value is set to the climatology
    """
    if type(year1) == type("string"):
        year, month, day = year1.split("-")
        year1 = cdtime.comptime(int(year), int(month), int(day))
    if type(year2) == type("string"):
        year, month, day = year2.split("-")
        year2 = cdtime.comptime(int(year), int(month), int(day))
    tax = X.getTime()
    lastten = [year1.sub(x, cdtime.Months) for x in range(121)[1:]][::-1]
    dayax = np.array([x.torel(tax.units).value for x in lastten])
    tax_new = np.append(dayax, tax)
    new_time_axis = cdms.createAxis(tax_new)
    new_time_axis.designateTime()
    new_time_axis.id = "time"
    new_time_axis.units = tax.units
    Xnew = MV.zeros((len(tax_new), ) + X.shape[1:])
    Xnew[:120] = np.repeat(MV.average(X(time=(year1, year2)),
                                      axis=0).asma()[np.newaxis, :, :],
                           120,
                           axis=0)
    Xnew[120:] = X
    for k in X.attributes.keys():
        setattr(Xnew, k, X.attributes[k])
    Xnew.setAxisList([new_time_axis] + X.getAxisList()[1:])
    return Xnew
Пример #24
0
    def plot_seasonal_trends(self, ax=None, scenario="ssp585"):
        if ax is None:
            fig = plt.figure()
            ax = plt.subplot(111)
        if scenario.find("+") > 0:
            ssp = scenario.split("+")[-1]
            test = self.splice_historical(ssp)
        else:
            test = self.ensemble_average(scenario)
        #test=self.ensemble_average("ssp585")
        mma = MV.average(test, axis=0)
        nyears = int(len(mma) / 12)
        tst = mma.reshape((nyears, 12))

        tmp = [
            ax.plot(tst.asma()[i], c=cm.magma(i / float(nyears)))
            for i in range(nyears)
        ]
        months = [
            "JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP",
            "OCT", "NOV", "DEC"
        ]
        ax.set_xticks(np.arange(12))
        ax.set_xticklabels(months)
        ax.set_ylabel(self.variable)
        ax.set_title(self.region)
Пример #25
0
 def testReshapeMaskedAverage(self):
     a = MV2.arange(100)
     a = MV2.reshape(a, (10, 10))
     self.assertEqual(a.shape, (10, 10))
     self.assertEqual(len(a.getAxisList()), 2)
     a = MV2.masked_greater(a, 23)
     b = MV2.average(a, axis=0)
     c = a - b
Пример #26
0
 def plot_model_trends(self, i, start=None, stop=None):
     if i != "avg":
         west = self.h85.reshaped["west"][i]
         east = self.h85.reshaped["east"][i]
     else:
         west = MV.average(h85.reshaped["west"], axis=0)
         east = MV.average(h85.reshaped["east"], axis=0)
     if start is not None:
         west = west(time=(start, stop))
         east = east(time=(start, stop))
     plt.plot(cmip5.get_linear_trends(west).asma(), label="WEST")
     plt.plot(cmip5.get_linear_trends(east).asma(), label="EAST")
     months = [
         "JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP",
         "OCT", "NOV", "DEC"
     ]
     plt.xticks(np.arange(12), months)
Пример #27
0
def rcp_solver(D,early_start=None,early_stop=None):
    if early_start is None:
        early_start = cdtime.comptime(2006,1,1)
    if early_stop is None:
        early_stop=cdtime.comptime(2099,12,31)
    earlydata = D.ALL.model(time=(early_start,early_stop))
    early_mma=MV.average(cmip5.ensemble2multimodel(earlydata),axis=0)
    earlysolver = Eof(early_mma,weights="area")
    return earlysolver
def main():
    # mip = 'cmip5'
    mip = 'cmip6'
    exp = 'historical'
    version = 'v20200625'
    period = '1985-2004'
    datadir = '/p/user_pub/pmp/pmp_results/pmp_v1.1.2/diagnostic_results/mjo/' + mip + '/historical/' + version
    imgdir = '/p/user_pub/pmp/pmp_results/pmp_v1.1.2/graphics/mjo/' + mip + '/historical/' + version

    ncfile_list = glob.glob(os.path.join(datadir, '*.nc'))

    # get list of models
    models_list = sorted(
        [r.split('/')[-1].split('.')[0].split('_')[1] for r in ncfile_list])
    # remove repeat
    models_list = list(dict.fromkeys(models_list))
    # remove obs
    models_list.remove('obs')

    # models_list = models_list[0:1]
    print(models_list)

    for model in models_list:
        ncfile_list_model = glob.glob(
            os.path.join(datadir, '*' + model + '*.nc'))
        runs_list = sorted([
            r.split('/')[-1].split('.')[0].split('_')[3]
            for r in ncfile_list_model
        ])
        print(model, runs_list)
        d_runs = []
        for run in runs_list:
            try:
                ncfile = '_'.join(
                    [mip, model, exp, run, 'mjo', period, 'cmmGrid']) + '.nc'
                f = cdms2.open(os.path.join(datadir, ncfile))
                d = f('power')
                d_runs.append(d)
                f.close()
            except Exception as err:
                print(model, run, 'cannnot load:', err)
                pass
            if run == runs_list[-1]:
                num_runs = len(d_runs)
                # ensemble mean
                d_avg = MV2.average(d_runs, axis=0)
                d_avg.setAxisList(d.getAxisList())
                title = (mip.upper() + ': ' + model + ' (' + str(num_runs) +
                         ' runs mean) \n Pr, NDJFMA, ' + period +
                         ', common grid (2.5x2.5deg)')
                # E/W ratio
                ewr, eastPower, westPower = calculate_ewr(d_avg)
                # plot prepare
                pngfilename = ncfile.split('.nc')[0].replace(run, 'average')
                fout = os.path.join(imgdir, pngfilename)
                # plot
                plot_power(d_avg, title, fout, ewr)
Пример #29
0
def aerosol_solver(D,aerosol_start=None,aerosol_stop=None,include_cru=False):
    if aerosol_start is None:
        aerosol_start = cdtime.comptime(1950,1,1)
    if aerosol_stop is None:
        aerosol_stop=cdtime.comptime(1975,1,1)
    aerosoldata = D.ALL.model(time=(aerosol_start,aerosol_stop))
    aerosol_mma=MV.average(cmip5.ensemble2multimodel(aerosoldata),axis=0)
    aerosolsolver = Eof(aerosol_mma,weights="area")
    return aerosolsolver
Пример #30
0
 def plot_future(self,season,single_member=True,plot_historical=True):
     if single_member:
         func=self.single_member_ensemble
     else:
         func=self.ensemble_average
     hist=func("historical")
     
     start_time=cdtime.comptime(1951,1,1)
     end_time=cdtime.comptime(1980,12,31)
     clim = getattr(cdutil,season).climatology(hist(time= (start_time, end_time, 'co')))
     if plot_historical:
         timedata=getattr(cdutil,season).departures(hist,ref=clim)
         Plotting.time_plot(MV.average(timedata,axis=0),color=get_color("historical"),label=get_label("historical"))
     for ssp in ["ssp126","ssp245","ssp370","ssp585"]:
         rawdata=func(ssp)
         timedata=getattr(cdutil,season).departures(rawdata,ref=clim)
         Plotting.time_plot(MV.average(timedata,axis=0),color=get_color(ssp),label=get_label(ssp))
     plt.legend()
Пример #31
0
def compute(dm, do):
    """ Computes bias"""
    if dm is None and do is None:  # just want the doc
        return {
            "Name": "Bias",
            "Abstract": "Compute Full Average of Model - Observation",
            "Contact": "Peter Gleckler <*****@*****.**>",
        }
    return MV2.float(MV2.average(MV2.subtract(dm, do)))
Пример #32
0
def jja_pdsi(X):
    """Average CMIP5 PDSI over boreal summer months """
    jja = MV.average(X[:, 5:8], axis=1)
    tax = jja.getAxis(0)
    tax.units = 'years since 0000-7-1'
    tax.id = "time"
    jja.setAxis(0, tax)
    jja.id = 'pdsi'
    return jja
Пример #33
0
def multi_model_ensemble(variable,
                         experiment,
                         model_ensemble_average=False,
                         models=None,
                         grid=None):
    rawdir = get_rawdir(variable)
    if experiment.find("hist") >= 0:
        time_bounds = ('1850-1-1', '2014-12-31')
    elif experiment.find("ssp") >= 0:
        time_bounds = ('2015-1-1', '2100-12-31')

    tax = get_tax(experiment)
    L = int(len(tax) / 12)

    if models is None:
        models = cmip6_models_with_all_variables([variable], experiment)
    if grid is None:
        model = models[0]

        rip = get_rips(model, variable, experiment)[0]
        allfiles = sorted(
            glob.glob(rawdir + model + "/*." + experiment + ".*." + rip +
                      ".*"))

        f = cdms.open(allfiles[0])
        data = f(variable)
        #for model in get_ok_models("SW"):

        grid = data.getGrid()

        f.close()
    nmod = len(models)
    MME = MV.zeros((nmod, L * 12) + grid.shape) + 1.e20
    for i in range(nmod):
        print(models[i])
        model = models[i]
        if model_ensemble_average:
            ens = get_ensemble(model, variable, experiment)

            data = MV.average(ens, axis=0)
        else:
            data = get_firstmember(model, variable, experiment)

        data_regrid = data.regrid(grid, regridTool='regrid2')(time=time_bounds)
        if data_regrid.shape == MME[i].shape:
            MME[i] = data_regrid
        else:
            print("problem with", model)
    MME = MV.masked_where(MME > 1.e10, MME)
    modax = cmip5.make_model_axis(models)
    lat = grid.getLatitude()
    lon = grid.getLongitude()
    axlist = [modax, tax, lat, lon]
    MME.setAxisList(axlist)
    MME.id = variable
    return MME
Пример #34
0
def compute(dm, do):
    """ Computes Mean Absolute Error"""
    if dm is None and do is None:  # just want the doc
        return {
            "Name": "Mean Absolute Error",
            "Abstract": "Compute Full Average of " +
            "Absolute Difference Between Model And Observation",
            "Contact": "Peter Gleckler <*****@*****.**>",
        }
    mae = MV.average(MV.absolute(MV.subtract(dm, do)))
    return float(mae)
Пример #35
0
def describe(obj, stats=None, format=pprint.pformat):
    '''Return the object decription depending on its type.

    Usefull with numpy and cdms variables/axes

    :Params:
        - **obj**: The object to describe.
        - **stats**: If True, include numerical information like min, max, mean, count, ...

    :Return:
        - The object's summary string
    '''
    try:
        import cdms2, MV2, numpy
        from cdms2.avariable import AbstractVariable
        from cdms2.axis import AbstractAxis
        if isinstance(obj, configobj.ConfigObj):
            obj = obj.dict()
        if not isinstance(obj, (cdms2.avariable.AbstractVariable, cdms2.axis.AbstractAxis, numpy.ndarray)):
            return format(obj)
        otype = obj.__class__.__name__ # type(obj)
        sh, sz = MV2.shape(obj), MV2.size(obj)
        mi = ma = av = co = None
        if stats and sz:
            try:
                if hasattr(obj, 'typecode') and obj.typecode() not in ('c',):
                    mi, ma, av, co = MV2.min(obj), MV2.max(obj), MV2.average(obj), MV2.count(obj)
            except: logger.exception('Error getting statistics of object %s', type(obj))
        if isinstance(obj, AbstractVariable):
            return '%s: %s, shape: (%s), order: %s%s'%(
                otype, obj.id,
                ','.join('%s=%s'%(a.id, a.shape[0]) for a in obj.getAxisList()),
                obj.getOrder(),
                stats and ', min: %s, max: %s, avg: %s, count: %s'%(mi, ma, av, co) or '')
        elif isinstance(obj, AbstractAxis):
            return '%s: %s, size: %s%s'%(
                otype, obj.id, sz,
                stats and ', min: %s, max: %s, avg: %s, count: %s'%(mi, ma, av, co) or '')
        else:#if isinstance(obj, numpy.ndarray):
            return '%s: shape: %s%s'%(
                otype, sh,
                stats and ', min: %s, max: %s, avg: %s, count: %s'%(mi, ma, av, co) or '')
    except Exception, e:
        logger.exception('Error getting description of object %s', type(obj))
        return '%s (error getting description)'%(type(obj))
Пример #36
0
    def process(self,data):
##         if self.symetric:
##             data = symetric(data)
        # Make sure we have an even number of time steps
        t=data.getTime()

        # length of time axis
        nt=len(t)
        if nt%2!=0:
            print "Warning time wasn't even, removed last time step"
            data=data[:-1]
            t=data.getTime() ## get the new time axis
            nt=len(t)

        if len(t)<self._NTSub:
            raise Exception,"Error your data must have at least %i time steps, adjust frequency (currently: %i/day) or number_of_days (currently: %i processed at once) to reach that limit, or get more data" % (self._NTSub,self.frequency,self.number_of_days)
        ## Computes PP, number of sub-domain
        PP=float(nt-self._NTSub)/self._NShift+1
        PP=int(PP)

        ## Number of longitudes
        lons=data.getLongitude()
        NL=len(lons)
        tt=cdms2.createAxis(numpy.arange(self._NTSub),id='sub_time')
        
        ## Should redo that with just an arange (eventually...)!!!
        ## Frequencies in cycles/day
        ff=numpy.arange(0,self._NTSub+1,1,numpy.float)
        for i in range(1,self._NTSub+2):
            ff[i-1]=float(i-1-self._NTSub/2.)*self.frequency/float(self._NTSub)
            
        ## Should redo that with just an arange (eventually...)!!!
        ## Wave numbers
        ss=numpy.arange(0,NL+1,1,numpy.float)
        for i in range(1,NL+2):
            ss[i-1]=float(i-1-NL/2.)
##         print 'Frequencies:',ff
##         print 'Wave numbers:',ss
        ## Ok, we now do the real stuff
        ## Creates the array of powers (Number of subtimes,fqcy,wave_numbers,latitudes)
        lats=data.getLatitude()
        Power=numpy.zeros((PP,self._NTSub+1,NL+1,len(lats)),numpy.float)
        
        ## LOOP through time sub domains
        prev=0 # initialize the scrolling bar
        for Pcount in range(PP):
            if PP>1:
                prev=genutil.statusbar(float(Pcount),PP-1,prev=prev,tk=self.tkbar)
            
            ## Get the time subdomain
            EEo=data[Pcount*self._NShift:Pcount*self._NShift+self._NTSub](order='tx...')
            ## First does the symetric/antisymetric thing if needed
            if self.symetric: EEo=symetrick(EEo)
            
            ## Now detrending
            ##  Step 1- Get the slope and intercept
            slope,intercept=genutil.statistics.linearregression(EEo,nointercept=0)
            ##  Step 2- remove the trend
            ##    Step 2a: Create an array with the time values
            a=EEo.getTime()
            A=MV2.array(a[:],typecode='d')
            A.setAxis(0,a)
            ##    Step 2b: "Grows" it so it has the same shape than data
            A,EEo=genutil.grower(A,EEo)
            ##    Step 2c: Actually remove the trend
            EE=EEo-A*slope-intercept

            ## we don't need A,EEo,slope,intercept anymore
            del(EEo)
            del(slope)
            del(intercept)
            del(A)

            ## Remove the time mean 
            mean=MV2.average(EE,0)
            EE=EE-mean
            del(mean) # could be big in memory

            ## Tapering time...
            tapertozero(EE,1,len(EE)-1,5*self.frequency)
            
            ## OK here Wheeler has some windowing on longitude, but it's commented out
            ## I'll pass it for now

            ## Ok the actuall FFT work
            EE=numpy.fft.fft2(EE,axes=(1,0))/NL/self._NTSub
            
            ## OK NOW THE LITTLE MAGIC WITH REORDERING !
            A=numpy.absolute(EE[0:self._NTSub/2+1,1:NL/2+1])**2
            B=numpy.absolute(EE[self._NTSub/2:self._NTSub,1:NL/2+1])**2
            C=numpy.absolute(EE[self._NTSub/2:self._NTSub,0:NL/2+1])**2
            D=numpy.absolute(EE[0:self._NTSub/2+1,0:NL/2+1])**2
            Power[Pcount,self._NTSub/2:,:NL/2]=A[:,::-1]
            Power[Pcount,:self._NTSub/2,:NL/2]=B[:,::-1]
            Power[Pcount,self._NTSub/2+1:,NL/2:]=C[::-1,:]
            Power[Pcount,:self._NTSub/2+1,NL/2:]=D[::-1,:]
        ## End of Pcount loop
        if self.tkbar and PP>1:
            prev[1].destroy()
            prev[0].destroy()
        ## Now generates the decorations

        ## first the time axis (subdomains)
        vals=[]
        bounds=[]
        pp=0
        for i in range(0,len(t)-self._NShift,self._NShift):
            st=t.subAxis(i,i+self._NTSub)
            if len(st[:])==self._NTSub:
                pp+=1
                vals.append((st[0]+st[-1])/2.)
                bds=st.getBounds()
                #print 'Bounds:',bds
                if bds is None:
                    raise ValueError, "Data need to have bounds on time dimension"
                else:
                    bounds.append([bds[0][0],bds[-1][1]])
        ## Convert lists to arrays
        vals=numpy.array(vals)
        bounds=numpy.array(bounds)
        ## Creates the time axis
        dumt=cdms2.createAxis(vals,bounds=bounds)
        dumt.id='time'
        dumt.units=t.units
        dumt.designateTime()
        dumt.setCalendar(t.getCalendar())

        ## Create the frequencies axis
        T=cdms2.createAxis(ff)
        T.id='frequency'
        T.units='cycles per day'

        ## Create the wave numbers axis
        S=cdms2.createAxis(ss)
        S.id='planetaryzonalwavenumber'
        S.units='-'

        ## Makes it an MV2 with axis and id (id come sfrom orignal data id)
        Power=MV2.array(Power,axes=(dumt,T,S,lats),id=data.id+'_'+'power')
        ## Adds a long name attribute
        Power.longname='Real power spectrum for the many different parts (i.e. over separate time divisions)'
        ## And return the whole thing ordered 'time', 'latitude', 'frequencies','wavenumbers'
        return Power(order='ty...')
Пример #37
0
print absd


#*******************************************************************************
#
# Example 2:
#           In this example, we compute the spatial correlation between 2 fields
#           - say the mean surface air temperature for the 1980-1989 period
#           versus each month of the 1980-1989 period.
#
#*******************************************************************************

#
# First compute the average for the 1980-1990 period (designated ncep2 above)
#
mncep2 = MV2.average(ncep2)

#
# And now correlate this pattern with each month of ncep2
#
cor = statistics.correlation(ncep2, mncep2, axis='xy')
print 'Correlation:'
print cor


#*******************************************************************************
#
# Example 3:
#           We take the correlation example above a step further and specify
#           that the correlation be computed with weights based on latitude
#           area taken into account. Note that the correlation can also be
Пример #38
0
# get the file id from the call to cdat open file through the cdms2 module
fid = cdms2.open(cmbe_file) 

#------------------
# read some variable to get time
p_sfc=fid( 'p_sfc', squeeze = 0, order = '0' )
cdutil.times.setTimeBoundsDaily(p_sfc,24)
# set time and time_bounds
time = p_sfc.getTime()
time_bounds = time.getBounds()  
time_units = time.units

# to check some statistics on the data
vv = fid['p_sfc']
'var name = ',vv.id
'average = ',MV2.average(vv,axis=0).data
'min = ',MV2.minimum(vv)
'max = ',MV2.maximum(vv)


#------------------
# get longitude and latitude 
lon = fid['lon']   # in memory variable - force it to be a cdms variable
lat = fid['lat'] 
# need to change manually degrees_west to degrees_east, which is required by cmor table
lon_units = lon.units 
if lon_units == 'degrees_west':    
  lon = -lon()  # change it to be a scalar value by applying () 
else: 
  lon = lon()
# need to change manually degrees_south to degrees_north which is required by cmor table
Пример #39
0
                                temporary[region] = d_sub_aave(
                                    time=(start_t, end_t))
                                d_sub_aave = MV2.concatenate(
                                    [part1, part2], axis=0)
                                if debug:
                                    print('debug: ', region, year,
                                          d_sub_aave.getTime().asComponentTime())

                        # get pentad time series
                        list_d_sub_aave_chunks = list(
                            divide_chunks_advanced(d_sub_aave, n, debug=debug))
                        pentad_time_series = []
                        for d_sub_aave_chunk in list_d_sub_aave_chunks:
                            # ignore when chunk length is shorter than defined
                            if d_sub_aave_chunk.shape[0] >= n:
                                ave_chunk = MV2.average(
                                    d_sub_aave_chunk, axis=0)
                                pentad_time_series.append(float(ave_chunk))
                        if debug:
                            print('debug: pentad_time_series length: ',
                                  len(pentad_time_series))

                        # Keep pentad time series length in consistent
                        ref_length = int(365/n)
                        if len(pentad_time_series) < ref_length:
                            pentad_time_series = interp1d(
                                pentad_time_series, ref_length, debug=debug)

                        pentad_time_series = MV2.array(pentad_time_series)
                        pentad_time_series.units = d.units
                        pentad_time_series_cumsum = np.cumsum(
                            pentad_time_series)
Пример #40
0
    def plot_ped_mod_on_pro(self, model, profiles, select=None, **kwargs):
        '''Plot potential energy deficit of model data correspponding to profiles position

        :Params: See: :func:`coloc_mod_on_pro`

        '''
        self.verbose('Plotting PED of colocalized %s on %s\nselect: %s\nkwargs: %s',
            model.__class__.__name__, profiles.__class__.__name__, select, kwargs)

        # =====
        # Lecture des donnees de stratification
        lons_mod, lats_mod, deps_mod, temp_mod, sal_mod, pres_mod, dens_mod = \
            self.coloc_strat_mod_on_pro(model, profiles, select, **kwargs)

        # =====
        # Calcul ped modele
        ddeps = meshweights(deps_mod, axis=0)
        # Densité moyenne
        dmean = MV2.average(dens_mod, axis=0, weights=ddeps)
        # Anomalie de densité
        danom = dens_mod-dmean
        # Énergie potentielle disponible
        ape = danom * g
        ape *= ddeps
        # Deficit
        ped_mod = MV2.average(ape, axis=0, weights=ddeps)
        ped_mod.units = 'J.m^{-2}'
        ped_mod.long_name = u"Definit d'energie potentielle"
        ped_mod.setAxisList(lons_mod.getAxisList())
        model.verbose('PED:  %s', ped_mod)

        # =====
        # Calcul ped profiles
        ped_pro, lats_pro, lons_pro = profiles.get_ped(select)
        profiles.verbose('PED:  %s', ped_pro)

        # =====
        # Tracés
        vmin = ped_pro.min()
        vmax = ped_pro.max()
        if ped_mod.count():
            vmin = min(ped_mod.min(), vmin)
            vmax = max(ped_mod.max(), vmax)
        else:
            self.warning('No model data')
        if 'latitude' in select:
            lat_min, lat_max = select['latitude'][:2]
        else:
            la = model.get_latitude()
            lat_min, lat_max = min(la), max(la)
        if 'longitude' in select:
            lon_min, lon_max = select['longitude'][:2]
        else:
            lo = model.get_longitude()
            lon_min, lon_max = min(lo), max(lo)

        levels = auto_scale((vmin, vmax))
        vmin = levels[0]
        vmax = levels[-1]
        # Création de la palette
        cmap = cmap_magic(levels)
        # On trace de champ du modèle moyené sur la période choisie
        m = map2(lon=(lon_min, lon_max), lat=(lat_min, lat_max), show=False)
        # On trace le modèle en fond
        msca = None
        try: msca = m.map.scatter(lons_mod, lats_mod, s=100, c=ped_mod, vmin=vmin, vmax=vmax, cmap=cmap, label='model')
        except: self.exception('Failed plotting model data')
        # On trace les observations avec des points plus petits
        psca = None
        try: psca = m.map.scatter(lons_pro, lats_pro, s=25, c=ped_pro, vmin=m.vmin, vmax=m.vmax, cmap=m.cmap, label='profiles')
        except: self.exception('Failed plotting profiles data')

        # Colorbar
        if msca is not None:
            colorbar(msca)
        elif psca is not None:
            colorbar(psca)
Пример #41
0
print ac1

data1=cdutil.ANNUALCYCLE.departures(data1,ref=ac1)
data2=cdutil.ANNUALCYCLE.departures(data2,ref=ac2)

print data1.shape,data2.shape

tim = data2.getTime()
lat=data2.getLatitude()
lon=data2.getLongitude()

data1=cdms2.createVariable(data1,axes=[tim,lat,lon],typecode='f',id='tas')

diff=MV2.subtract(data1,data2)
# zonal differences
z_diff=MV2.average(diff,2)
print 'Zonal data shape (before): ',z_diff.shape

z_diff=MV2.transpose(z_diff,(1,0))

# add id to data
z_diff.id='zonal_diff'
print 'Zonal data shape (after): ',z_diff.shape

# global differences
gl_diff=cdutil.averager(diff,axis='xy')

x=vcs.init()
x.setcolormap('default')
fill=x.getisofill('default')
x.plot(z_diff,fill)
Пример #42
0
          ]

for file in files:
    f=cdms.open(file)
    u=f('u')
    
    if file == files[0]:          # First file
        sh=list(u.shape)          # Create a list with the shape of the data
        sh.insert(0,1)            # Insert value 1 in front of the list
        accumulation = u
        newdim = MV.reshape(u,sh) # Create a new 1D dimension
        
    else:
        # add u at the end of accumaltion on dimension 0
        accumulation = MV.concatenate((accumulation,u))
        tmp = MV.reshape(u,sh)                # Create a new 1D dimension
        newdim = MV.concatenate((newdim,tmp)) # Add u to the newdim over the new dimension
        
    f.close()
    
    
print accumulation.shape   # All time added over the same dimension
print newdim.shape         # Has a new dimension for years

avg = MV.average(accumulation)
std = genutil.statistics.std(newdim)

print avg.shape
print std.shape

Пример #43
0
import MV2
from markError import clearError,markError,reportError

print 'Test 15: reshape and mask and average ...',
a=MV2.arange(100)
try:
    failed = False
    a.shape=(10,10)
except:
    failed = True
    a = MV2.reshape(a,(10,10))
if failed is True: markError('shape should not have worked (protected attribute)')
if len(a.getAxisList())!=2: markError('reshape did not produce 2 axes')
        
a=MV2.masked_greater(a,23)
b=MV2.average(a,axis=0)
c=a-b
def compute(dm,do):
    """ Computes Mean Absolute Error"""
    mae = MV.average(MV.absolute(MV.subtract(dm,do)))
    return float(mae) 
Пример #45
0
def compute(dm,do):
    """ Computes bias"""
    return MV.float(MV.average(MV.subtract(dm,do)))
def compute(params):
    fileName = params.fileName
    month = params.args.month
    monthname = params.monthname
    varbname = params.varname
    template = populateStringConstructor(args.filename_template, args)
    template.variable = varbname
    # Units on output (*may be converted below from the units of input*)
    outunits = 'mm/d'
    startime = 1.5  # GMT value for starting time-of-day

    reverted = template.reverse(os.path.basename(fileName))
    dataname = reverted["model"]
    if dataname not in args.skip:
        try:
            print('Data source:', dataname)
            print('Opening %s ...' % fileName)
            f = cdms2.open(fileName)

            # Composite-mean and composite-s.d diurnal cycle for month and year(s):
            iYear = 0
            for year in range(args.firstyear, args.lastyear + 1):
                print('Year %s:' % year)
                startTime = cdtime.comptime(year, month, 1, 1, 30)
                # Last possible second to get all tpoints
                finishtime = startTime.add(
                    1, cdtime.Month).add(-1.5, cdtime.Hour).add(.1, cdtime.Second)
                print('Reading %s from %s for time interval %s to %s ...' % (varbname, fileName, startTime, finishtime))
                # Transient variable stores data for current year's month.
                tvarb = f(varbname, time=(startTime, finishtime))
                # *HARD-CODES conversion from kg/m2/sec to mm/day.
                tvarb *= 86400
                print('Shape:', tvarb.shape)
                # The following tasks need to be done only once, extracting
                # metadata from first-year file:
                if year == args.firstyear:
                    tc = tvarb.getTime().asComponentTime()
                    day1 = cdtime.comptime(tc[0].year, tc[0].month)
                    firstday = tvarb(
                        time=(
                            day1,
                            day1.add(
                                1,
                                cdtime.Day),
                            "con"))
                    dimensions = firstday.shape
                    # print '  Shape = ', dimensions
                    # Number of time points in the selected month for one year
                    N = dimensions[0]
                    nlats = dimensions[1]
                    nlons = dimensions[2]
                    deltaH = 24. / N
                    dayspermo = tvarb.shape[0] // N
                    print('  %d timepoints per day, %d hr intervals between timepoints' % (N, deltaH))
                    comptime = firstday.getTime()
                    modellons = tvarb.getLongitude()
                    modellats = tvarb.getLatitude()
                    # Longitude values are needed later to compute Local Solar
                    # Times.
                    lons = modellons[:]
                    print('  Creating temporary storage and output fields ...')
                    # Sorts tvarb into separate GMTs for one year
                    tvslice = MV2.zeros((N, dayspermo, nlats, nlons))
                    # Concatenates tvslice over all years
                    concatenation = MV2.zeros(
                        (N, dayspermo * nYears, nlats, nlons))
                    LSTs = MV2.zeros((N, nlats, nlons))
                    for iGMT in range(N):
                        hour = iGMT * deltaH + startime
                        print('  Computing Local Standard Times for GMT %5.2f ...' % hour)
                        for j in range(nlats):
                            for k in range(nlons):
                                LSTs[iGMT, j, k] = (hour + lons[k] / 15) % 24
                for iGMT in range(N):
                    hour = iGMT * deltaH + startime
                    print('  Choosing timepoints with GMT %5.2f ...' % hour)
                    # Transient-variable slice: every Nth tpoint gets all of
                    # the current GMT's tpoints for current year:
                    tvslice[iGMT] = tvarb[iGMT:tvarb.shape[0]:N]
                    concatenation[iGMT, iYear *
                                  dayspermo: (iYear +
                                              1) *
                                  dayspermo] = tvslice[iGMT]
                iYear += 1
            f.close()

            # For each GMT, take mean and standard deviation over all years for
            # the chosen month:
            avgvalues = MV2.zeros((N, nlats, nlons))
            stdvalues = MV2.zeros((N, nlats, nlons))
            for iGMT in range(N):
                hour = iGMT * deltaH + startime
                print('Computing mean and standard deviation over all GMT %5.2f timepoints ...' % hour)
                # Assumes first dimension of input ("axis#0") is time
                avgvalues[iGMT] = MV2.average(concatenation[iGMT], axis=0)
                stdvalues[iGMT] = genutil.statistics.std(concatenation[iGMT])
            avgvalues.id = 'diurnalmean'
            stdvalues.id = 'diurnalstd'
            LSTs.id = 'LST'
            avgvalues.units = outunits
            # Standard deviation has same units as mean (not so for
            # higher-moment stats).
            stdvalues.units = outunits
            LSTs.units = 'hr'
            LSTs.longname = 'Local Solar Time'
            avgvalues.setAxis(0, comptime)
            avgvalues.setAxis(1, modellats)
            avgvalues.setAxis(2, modellons)
            stdvalues.setAxis(0, comptime)
            stdvalues.setAxis(1, modellats)
            stdvalues.setAxis(2, modellons)
            LSTs.setAxis(0, comptime)
            LSTs.setAxis(1, modellats)
            LSTs.setAxis(2, modellons)
            avgoutfile = ('%s_%s_%s_%s-%s_diurnal_avg.nc') % (varbname,
                                                              dataname, monthname,
                                                              str(args.firstyear), str(args.lastyear))
            stdoutfile = ('%s_%s_%s_%s-%s_diurnal_std.nc') % (varbname,
                                                              dataname, monthname, str(
                                                                  args.firstyear),
                                                              str(args.lastyear))
            LSToutfile = ('%s_%s_LocalSolarTimes.nc' % (varbname, dataname))
            if not os.path.exists(args.results_dir):
                os.makedirs(args.results_dir)
            f = cdms2.open(
                os.path.join(
                    args.results_dir,
                    avgoutfile),
                'w')
            g = cdms2.open(
                os.path.join(
                    args.results_dir,
                    stdoutfile),
                'w')
            h = cdms2.open(
                os.path.join(
                    args.results_dir,
                    LSToutfile),
                'w')
            f.write(avgvalues)
            g.write(stdvalues)
            h.write(LSTs)
            f.close()
            g.close()
            h.close()
        except Exception as err:
            print("Failed for model %s with erro: %s" % (dataname, err))
Пример #47
0
## asarray(data, typecode=None) 
##   asarray(data, typecode=None) = array(data, typecode=None, copy=0)
##   Returns data if typecode if data is a MaskedArray and typecode None
##   or the same.

## average(a, axis=0, weights=None, returned=0) 
##   average(a, axis=0, weights=None, returned=0)
##   Computes average along indicated axis. Masked elements are ignored.
##   Result may equal masked if average cannot be computed.
##   If weights are given, result is sum(a*weights)/sum(weights), with
##   all elements masked in a or in weights ignored.
##   weights if given must have a's shape. 
##   Denominator is multiplied by 1.0 to prevent truncation for integers.
##   returned governs return of second quantity, the weights.
xav = MV2.average(xones, axis=1)
xav2 = MV2.average(ud)
xav3 = MV2.average(udat)
xav4, wav4 = MV2.average(udat, weights=MV2.ones(udat.shape, numpy.float), returned=1)

## choose(indices, t) 
##   Shaped like indices, values t[i] where at indices[i]
##   If t[j] is masked, special treatment to preserve type.
ct1 = MV2.TransientVariable([1,1,2,0,1])
ctr = MV2.choose(ct1, [numpy.ma.masked, 10,20,30,40])
if not MV2.allclose(ctr, [10, 10, 20, 100, 10]): markError('choose error 1')
ctx = MV2.TransientVariable([1,2,3,150,4])
cty = -MV2.TransientVariable([1,2,3,150,4])
ctr = MV2.choose(MV2.greater(ctx,100), (ctx, 100))
if not MV2.allclose(ctr, [1,2,3,100,4]): markError('choose error 2')
ctr = MV2.choose(MV2.greater(ctx,100), (ctx, cty))
Пример #48
0
def ensemble_average(basedir, grid = None, func = None):
    models = np.unique(map(lambda x: x.split(".")[1],glob.glob(basedir+"*")))
    #Deal with extremely annoying GISS physics
    giss = np.where([x.find("GISS")>=0 for x in models])[0]
    oldmodels = models
    models = np.delete(models, giss)
    for gissmo in oldmodels[giss]:
        physics_versions = np.unique([x.split(".")[3][-2:] for x in glob.glob(basedir+"*"+gissmo+"*")])
        for pv in physics_versions:
            models = np.append(models, gissmo+" "+pv)
        
    
    if grid is None: #Get the coarsest grid
        the_file,grid = get_coarsest_grid(basedir)
    
    if "CESM1-WACCM" in models:
        i = np.argwhere(models == "CESM1-WACCM")
        models = np.delete(models,i)
    if "CanCM4" in models:
        i = np.argwhere(models == "CanCM4")
        models = np.delete(models,i)
        

    mo = models[0]
    print mo
    ens = get_ensemble(basedir,mo)
    ens0 = ens[0]
    print ens0
    f = cdms.open(ens0)
    variable = ens0.split(".")[-4]
    data = f(variable).regrid(grid,regridTool='regrid2')
    cdutil.setTimeBoundsMonthly(data)
    if func is not None:
        data = func(data)
    f.close()
    
    time_and_space = data.shape
    realizations = MV.zeros((len(ens),)+time_and_space)
    realizations[0] = data
    if len(ens)>1:
        for i in range(len(ens))[1:]:
            f = cdms.open(ens[i])
            print ens[i]
            data = f(variable).regrid(grid,regridTool='regrid2')
            f.close()
            cdutil.setTimeBoundsMonthly(data)
            if func is not None:
                data = func(data)
            realizations[i] = data
    
    model_average = MV.zeros((len(models),)+time_and_space)+1.e20
    j= 0
    model_average[j] = MV.average(realizations,axis=0)
    
    for mo in models[1:]:
        print mo
        j+=1
        ens = get_ensemble(basedir,mo)
        realizations = MV.zeros((len(ens),)+time_and_space)
        for i in range(len(ens)):
            f = cdms.open(ens[i])
            #print ens[i]
            data = f(variable).regrid(grid,regridTool='regrid2')
            f.close()
            cdutil.setTimeBoundsMonthly(data)
            if func is not None:
                data = func(data)
            print data.shape
            print time_and_space
            print data.shape == time_and_space
            if data.shape == time_and_space:
                realizations[i] = data
                masked_ma = False
            else:
                masked_ma = True
        
        if not masked_ma:
            model_average[j] = MV.average(realizations,axis=0)
        else:
            print "not the right shape: "+mo
            model_average[j] = MV.ones(time_and_space)+1.e20

    M2 = MV.masked_where(model_average>1.e10,model_average)
    M = MV.average(M2,axis=0)
    M.setAxisList(data.getAxisList())
    M.id = data.id
    M.name = M.id
    
    return M
Пример #49
0
# Now we can loop through the lines and look at the content
data1=[]
data2=[]

for line in lines:
    # Splits the line into a list of string with seprartion
    # when it finds space or tabs or return
    sp=string.split(line)

    # Now try to see if the first element is a number, if not skip
    # we are only interested in the 2nd and third column here
    try:
        val1=float(sp[1])   # second column
        val2=float(sp[2])   # third column
        
        data1.append(val1)
        data2.append(val2)
        
    except:
        pass       #  we didn't have 2 float at the begining of this line
    
    
# Now converts the 2 datasets to MV for use in other CDAT Packages
data1=MV.array(data1,id='dataset1')
data2=MV.array(data2,id='dataset2')

# Just for fun prints the average of data1
print MV.average(data1)