def main(project_info):
    """Diagnostics and plotting script for Southern Hemisphere radiation."""

    # ESMValProject provides some easy methods to access information in
    # project_info but you can also access it directly (as in main.py)
    # First we get some general configurations (ESMValProject)
    # and then we read in the model configuration file

    E = ESMValProject(project_info)
    config_file = E.get_configfile()
    plot_dir = E.get_plot_dir()
    verbosity = E.get_verbosity()

    # A-laue_ax+
    diag_script = E.get_diag_script_name()
    res = E.write_references(
        diag_script,  # diag script name
        ["A_maek_ja"],  # authors
        ["A_eval_ma", "A_jone_co"],  # contributors
        [""],  # diag_references
        [""],  # obs_references
        ["P_embrace"],  # proj_references
        project_info,
        verbosity,
        False)
    # A-laue_ax-

    modelconfig = ConfigParser.ConfigParser()
    modelconfig.read(config_file)
    E.ensure_directory(plot_dir)

    # Check which parts of the code to run
    if (modelconfig.getboolean('general', 'plot_scatter')):
        info("Starting scatter plot", verbosity, 2)
        process_scatter(E, modelconfig)
예제 #2
0
def main(project_info):
    """Diagnostics and plotting script for Southern Hemisphere radiation."""

    # ESMValProject provides some easy methods to access information in
    # project_info but you can also access it directly (as in main.py)
    # First we get some general configurations (ESMValProject)
    # and then we read in the model configuration file

    E = ESMValProject(project_info)
    config_file = E.get_configfile()
    datakeys = E.get_currVars()
    plot_dir = E.get_plot_dir()
    verbosity = E.get_verbosity()

    # A-laue_ax+
    diag_script = E.get_diag_script_name()
    res = E.write_references(diag_script,              # diag script name
                             ["A_maek_ja"],            # authors
                             ["A_eval_ma", "A_jone_co"], # contributors
                             [""],                     # diag_references
                             [""],                     # obs_references
                             ["P_embrace"],            # proj_references
                             project_info,
                             verbosity,
                             False)
    # A-laue_ax-

    modelconfig = ConfigParser.ConfigParser()
    modelconfig.read(config_file)
    E.ensure_directory(plot_dir)

    # Check at which stage of the program we are
    clouds = False
    fluxes = False
    radiation = False
    if ('clt' in datakeys or 'clivi' in datakeys or 'clwvi' in datakeys):
        clouds = True
    elif ('hfls' in datakeys or 'hfss' in datakeys):
        fluxes = True
    else:
        radiation = True

    # Check which parts of the code to run
    if (modelconfig.getboolean('general', 'plot_clouds') and clouds is True):
        info("Starting to plot clouds", verbosity, 2)
        process_clouds(E, modelconfig)

    if (modelconfig.getboolean('general', 'plot_fluxes') and fluxes is True):
        info("Starting to plot turbulent fluxes", verbosity, 2)
        process_fluxes(E, modelconfig)

    if (modelconfig.getboolean('general', 'plot_radiation') and radiation is True):
        info("Starting to plot radiation graphs", verbosity, 2)
        process_radiation(E, modelconfig)
예제 #3
0
def main(project_info):
    print('Hello, here is the dummy routine from the direct python interface!')

    # create instance of a wrapper that allows easy access to data
    E = ESMValProject(project_info)

    # get filenames of preprocessed climatological mean files
    model_filenames = E.get_clim_model_filenames(variable='ta', monthly=True)
    print model_filenames

    print('Do something here!')
    print('ENDED SUCESSFULLY!!')
    print('')
예제 #4
0
def main(project_info):
    """Diagnostics and plotting script for Tropical Variability.
    We use ts as a proxy for Sea Surface Temperature. """

    # ESMValProject provides some easy methods to access information in
    # project_info but you can also access it directly (as in main.py)
    # First we get some general configurations (ESMValProject)
    # and then we read in the model configuration file

    E = ESMValProject(project_info)
    config_file = E.get_configfile()
    plot_dir = E.get_plot_dir()
    verbosity = E.get_verbosity()

    # A-laue_ax+
    diag_script = E.get_diag_script_name()
    res = E.write_references(
        diag_script,  # diag script name
        ["A_maek_ja"],  # authors
        ["A_eval_ma", "A_jone_co"],  # contributors
        ["D_li14jclim"],  # diag_references
        [""],  # obs_references
        ["P_embrace"],  # proj_references
        project_info,
        verbosity,
        False)

    # A-laue_ax-

    modelconfig = ConfigParser.ConfigParser()
    modelconfig.read(config_file)
    E.ensure_directory(plot_dir)

    # Here we check and process only desired parts of the diagnostics
    if (modelconfig.getboolean('general', 'plot_zonal_means')):
        info("Starting to plot zonal means", verbosity, 2)
        process_zonal_means(E, modelconfig)

    if (modelconfig.getboolean('general', 'plot_scatter')):
        info("Starting scatterplot of temperature and precipitation",
             verbosity, 2)
        process_scatterplot(E, modelconfig)

    if (modelconfig.getboolean('general', 'plot_equatorial')):
        info("Starting to gather values for equatorial means", verbosity, 2)
        process_equatorial_means(E, modelconfig)
예제 #5
0
def main(project_info):

    # print(">>>>>>>> entering ww09_ESMValTool.py <<<<<<<<<<<<")

    # create instance of a wrapper that allows easy access to data
    E = ESMValProject(project_info)

    config_file = E.get_configfile()
    plot_dir = E.get_plot_dir()
    verbosity = E.get_verbosity()
    plot_type = E.get_graphic_format()
    diag_script = E.get_diag_script_name()

    res = E.write_references(diag_script,              # diag script name
                             ["A_will_ke"],            # authors
                             [""],                     # contributors
                             ["D_Williams09climdyn"],  # diag_references
                             ["E_isccp_d1"],           # obs_references
                             ["P_cmug"],               # proj_references
                             project_info,
                             verbosity,
                             False)

    modelconfig = ConfigParser.ConfigParser()
    modelconfig.read(config_file)

    # create list of model names (plot labels)
    models = []
    for model in E.project_info['MODELS']:
        models.append(model.split_entries()[1])

    # number of models
    nummod = len(E.project_info['MODELS'])
    crems = np.empty(nummod)

    # get filenames of preprocessed climatological mean files (all models)
    fn_alb = get_climo_filenames(E, variable='albisccp')
    fn_pct = get_climo_filenames(E, variable='pctisccp')
    fn_clt = get_climo_filenames(E, variable='cltisccp')
    fn_su  = get_climo_filenames(E, variable='rsut')
    fn_suc = get_climo_filenames(E, variable='rsutcs')
    fn_lu  = get_climo_filenames(E, variable='rlut')
    fn_luc = get_climo_filenames(E, variable='rlutcs')
    fn_snc = get_climo_filenames(E, variable='snc')
    fn_sic = get_climo_filenames(E, variable='sic')

    if not fn_snc:
        print("no data for variable snc found, using variable snw instead")
        fn_snw = get_climo_filenames(E, variable='snw')

    # loop over models and calulate CREM

    for i in range(nummod):

        if fn_snc:
            snc = fn_snc[i]
        else:
            snc = ""

        pointers = {'albisccp_nc': fn_alb[i],
                    'pctisccp_nc': fn_pct[i],
                    'cltisccp_nc': fn_clt[i],
                    'rsut_nc'    : fn_su[i],
                    'rsutcs_nc'  : fn_suc[i],
                    'rlut_nc'    : fn_lu[i],
                    'rlutcs_nc'  : fn_luc[i],
                    'snc_nc'     : snc,
                    'sic_nc'     : fn_sic[i]}

        if not fn_snc:
            pointers['snw_nc'] = fn_snw[i]

        # calculate CREM

        (CREMpd, __) = crem_calc(E, pointers)

        crems[i] = CREMpd

    print("------------------------------------")
    print(crems)
    print("------------------------------------")

    # plot results

    fig = plt.figure()
    ypos = np.arange(nummod)
    plt.barh(ypos, crems)
    plt.yticks(ypos + 0.5, models)
    plt.xlabel('Cloud Regime Error Metric')

    # draw observational uncertainties (dashed red line)
    plt.plot([0.96, 0.96], [0, nummod], 'r--')

    # if needed, create directory for plots
    if not os.path.exists(plot_dir):
        os.makedirs(plot_dir)

    plt.savefig(plot_dir + 'ww09_metric_multimodel.' + plot_type)
    print("Wrote " + plot_dir + "ww09_metric_multimodel." + plot_type)
예제 #6
0
def read_pr_sm_topo(project_info, model):

    """
    ;; Arguments
    ;;    project_info: dictionary
    ;;          all info from namelist
    ;;
    ;; Return
    ;;    pr: iris cube [time, lat, lon]
    ;;          precipitation time series
    ;;    sm: iris cube [time, lat, lon]
    ;;          soil moisture time series
    ;;    topo: array [lat, lon]
    ;;          topography
    ;;    lon: array [lon]
    ;;          longitude
    ;;    lat: array [lat]
    ;;          latitude
    ;;    time: iris cube coords
    ;;          time info of cube
    ;;    time_bnds_1: float
    ;;          first time_bnd of time series
    ;;
    ;;
    ;; Description
    ;;    Read cmip5 input data for computing the diagnostic
    ;;
    """
    
    import projects
    E = ESMValProject(project_info)
    verbosity = E.get_verbosity()
    #-------------------------
    # Read model info
    #-------------------------

    currProject = getattr(vars()['projects'], model.split_entries()[0])()

    model_info = model.split_entries()

    mip = currProject.get_model_mip(model)
    exp = currProject.get_model_exp(model)
    start_year = currProject.get_model_start_year(model)
    end_year = currProject.get_model_end_year(model)

    years = range(int(start_year), int(end_year) + 1)
    
    '''
    #-------------------------
    # Read model info
    #-------------------------

    model_name = model_info[1]
    time_step = model_info[2]
    exp_fam = model_info[3]
    model_run = model_info[4]
    year_start = model_info[5]
    year_end = model_info[6]
    filedir = model_info[7]

    years = range(int(year_start), int(year_end)+1)
    '''

    
    #-------------------------
    # Input data directories
    #-------------------------
    currDiag = project_info['RUNTIME']['currDiag']

    pr_index = currDiag.get_variables().index('pr')
    pr_field = currDiag.get_field_types()[pr_index]

    sm_index = currDiag.get_variables().index('mrsos')
    sm_field = currDiag.get_field_types()[sm_index]

    indir = currProject.get_cf_outpath(project_info, model)
    in_file = currProject.get_cf_outfile(project_info, model, pr_field, 'pr', mip, exp)
    pr_files = [os.path.join(indir, in_file)]

    in_file = currProject.get_cf_outfile(project_info, model, sm_field, 'mrsos', mip, exp)
    sm_files = [os.path.join(indir, in_file)]
    
    '''
    #-------------------------
    # Input data directories
    #-------------------------
    pr_files = []
    sm_files = []

    for yy in years:

        Patt = filedir+'pr_'+time_step+'_'+model_name+'_'+exp_fam+'_'+\
               model_run+'_'+str(yy)+'*.nc'
        pr_files.append(glob.glob(Patt))

        Patt = filedir+'mrsos_'+time_step+'_'+model_name+'_'+exp_fam+'_'+\
                model_run+'_'+str(yy)+'*.nc'
        sm_files.append(glob.glob(Patt))

    pr_files = [l[0] for l in pr_files if len(l)>0]
    pr_files = sorted(pr_files)

    sm_files = [l[0] for l in sm_files if len(l)>0]
    sm_files = sorted(sm_files)
    '''

    #----------------------
    # Read in precipitation
    #----------------------

    pr_list = []

    for pr_file in pr_files:

        info('Reading precipitation from ' + pr_file, verbosity, required_verbosity=1)

        pr = iris.load(pr_file)[0]

        for at_k in pr.attributes.keys():
            pr.attributes.pop(at_k)

        pr_list.append(pr)

    pr = iris.cube.CubeList(pr_list)
    pr = pr.concatenate()[0]

    # Convert longitude from 0_360 to -180_180

    pr = coord_change([pr])[0]

    # Add metadata: day, month, year

    add_month(pr, 'time')
    add_day_of_month(pr, 'time', name='dom')
    add_year(pr, 'time')

    # Convert units to kg m-2 hr-1

    pr.convert_units('kg m-2 hr-1')

    #-----------------------
    # Read in soil moisture
    #-----------------------

    sm_list = []

    for sm_file in sm_files:

        info('Reading soil moisture from ' + sm_file, verbosity, required_verbosity=1)

        sm = iris.load(sm_file)[0]

        for at_k in sm.attributes.keys():
            sm.attributes.pop(at_k)

        sm_list.append(sm)

    sm = iris.cube.CubeList(sm_list)
    sm = sm.concatenate()[0]

    # Convert longitude from 0_360 to -180_180

    sm = coord_change([sm])[0]

    # Add metadata: day, month, year

    add_month(sm, 'time')
    add_day_of_month(sm, 'time', name='dom')
    add_year(sm, 'time')

    #----------------------------------------------
    # Constrain pr and sm data to latitude 60S_60N
    #----------------------------------------------

    latconstraint = iris.Constraint(latitude=lambda cell: -59.0 <= cell <= 59.0)

    pr = pr.extract(latconstraint)
    sm = sm.extract(latconstraint)

    #---------------------------------------------------
    # Read in grid info: latitude, longitude, timestamp
    #---------------------------------------------------

    lon = sm.coords('longitude')[0].points
    lat = sm.coords('latitude')[0].points
    time = sm.coords('time')

    # --------------------------------------
    # Convert missing data (if any) to -999.
    # --------------------------------------

    try:
        sm.data.set_fill_value(-999)
        sm.data.data[sm.data.mask] = -999.

    except:
        info('no missing data conversion', verbosity, required_verbosity=1)

    #----------------------
    # Read in topography
    #----------------------

    # Topography map specs:
    # latitude 60S_60N
    # longitude 180W_180E
    # model resolution

    #ftopo = currProject.get_cf_fx_file(project_info, model)

    #dt = '>f4'
    #topo = (np.fromfile(ftopo, dtype=dt)).reshape(len(lat), len(lon))

    topo = get_topo(project_info, lon, lat, model)

    #----------------------
    # Read in time bounds
    #----------------------

    indir, infiles = currProject.get_cf_infile(project_info, model, pr_field, 'pr', mip, exp)
    Patt = os.path.join(indir, infiles)
    pr_files = sorted(glob.glob(Patt))

    ncf = nc4.Dataset(pr_files[0])
    time_bnds_1 = ncf.variables['time_bnds'][0][0]
    time_bnds_1 = time_bnds_1 - int(time_bnds_1)
    ncf.close()

    #-----------------------------------------------
    # Return input data to compute sm_pr diagnostic
    #-----------------------------------------------
    return pr, sm, topo, lon, lat, time, time_bnds_1
예제 #7
0
def main(project_info):
    """
    ;; Description
    ;;    Main fuction
    ;;    Call all callable fuctions to
    ;;    read CMIP5 data, compute and plot diagnostic
    """

    E = ESMValProject(project_info)
    plot_dir = E.get_plot_dir()
    work_dir = E.get_work_dir()
    verbosity = E.get_verbosity()
    fileout = work_dir
    
    if not os.path.exists(plot_dir):
        os.makedirs(plot_dir)

    for model in project_info['MODELS']:
        info(model, verbosity, required_verbosity=1) 

        if not os.path.exists(work_dir+'/sample_events'):
            os.makedirs(work_dir+'/sample_events')

        if not os.path.exists(work_dir+'/event_output'):
            os.makedirs(work_dir+'/event_output')


        # --------------------------------------
        # Get input data to compute diagnostic
        # --------------------------------------

        # Read cmip5 model data
        pr, sm, topo, lon, lat, time, time_bnds_1 = read_pr_sm_topo(project_info, model)

        # --------------------------------------
        # Get sm monthly climatology
        # at selected local time: 6:00 am
        # --------------------------------------

        smclim = get_smclim(sm, lon, time)

        # -------------------------------
        # Compute diagnostic per month
        # -------------------------------

        samplefileout = fileout + 'sample_events/'

        for mn in np.arange(1, 13):

            # -------------------------------------------------
            # Create montly arrays required by fortran routines
            # -------------------------------------------------

            prbef, smbef, praft, smaft, \
                monthlypr, monthlysm, days_per_year = get_monthly_input(project_info, mn, time, 
                                                         lon, lat, time_bnds_1, pr, sm, fileout,
                                                         samplefileout, model,
                                                         verbosity)

            # -----------------------
            # Run fortran routines
            # -----------------------

            info('Executing global_rain_sm for month ' + str(mn), verbosity, required_verbosity=1)

            grs.global_rain_sm(np.asfortranarray(monthlypr),
                               np.asfortranarray(prbef),
                               np.asfortranarray(praft),
                               np.asfortranarray(monthlysm),
                               np.asfortranarray(smbef),
                               np.asfortranarray(smaft),
                               np.asfortranarray(smclim[mn - 1, :, :]),
                               np.asfortranarray(topo),
                               np.asfortranarray(lon),
                               np.asfortranarray(mn),
                               fileout, days_per_year)

            info('Executing sample_events for month ' + str(mn), verbosity, required_verbosity=1)

            se.sample_events(np.asfortranarray(monthlysm),
                             np.asfortranarray(smbef),
                             np.asfortranarray(smaft),
                             np.asfortranarray(lon),
                             np.asfortranarray(lat),
                             np.asfortranarray(mn),
                             fileout, days_per_year, samplefileout)

        # ---------------------------------------------------
        # Compute p_values (as in Fig. 3, Taylor et al 2012)
        # --------------------------------------------------
        info('Computing diagnostic', verbosity, required_verbosity=1)

        xs, ys, p_vals = get_p_val(samplefileout)

        # --------------------------------------------------
        # Save diagnostic to netCDF file and plot
        # --------------------------------------------------
        write_nc(fileout, xs, ys, p_vals, project_info, model)
        
        plot_diagnostic(fileout, plot_dir, project_info, model)

        # --------------------------------------------------
        # Remove temporary folders
        # --------------------------------------------------
        shutil.rmtree(str(fileout) + 'event_output')
        shutil.rmtree(str(fileout) + 'sample_events') 
예제 #8
0
def plot_diagnostic(fileout, plot_dir, project_info, model):

    """
    ;; Arguments
    ;;    fileout: dir
    ;;          directory to save the plot
    ;;
    ;; Description
    ;;    Plot diagnostic and save .png plot
    ;;
    """
    import projects
    E = ESMValProject(project_info)
    verbosity = E.get_verbosity()

    #-------------------------
    # Read model info
    #-------------------------
    
    currProject = getattr(vars()['projects'], model.split_entries()[0])()

    start_year = currProject.get_model_start_year(model)
    end_year = currProject.get_model_end_year(model)
    model_info = model.split_entries()


    # Read code output in netCDF format
    diag_name = 'sm-pr-diag-plot'
    netcdf_file = E.get_plot_output_filename(diag_name=diag_name,
                                             variable='pr-mrsos',
                                             model=model_info[1],
                                             specifier='Taylor2012-diagnostic')
    suffix = E.get_graphic_format() + "$"
    netcdf_file = re.sub(suffix, "nc", netcdf_file)
    ncf = nc4.Dataset(os.path.join(fileout, netcdf_file), 'r')
    p_val =  ncf.variables['T12_diag'][:,:]
    ncf.close()

    mp_val = np.ma.masked_equal(p_val, -999)

    # Gridding info: Global diagnostic 5x5 deg
    LON = np.arange(-180, 185, 5)
    LAT = np.arange(-60, 65, 5)

    # Define figure

    F, ax = plt.subplots(nrows=1, ncols=1, **dict(figsize=[15, 8]))

    # Cmap
    cmap = cm.get_cmap(name='bwr_r', lut=7)
    cmaplist = [cmap(i) for i in range(cmap.N)]
    cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
    bounds = [0., 1, 5, 10, 90, 95, 99, 100]
    norm = col.BoundaryNorm(bounds, cmap.N)
    cmap.set_bad('GainsBoro')

    # Basemap
    map_ax = Basemap(ax=ax, projection='cyl', resolution='l',
                     llcrnrlat=-60, urcrnrlat=60,
                     llcrnrlon=-180, urcrnrlon=180,
                     fix_aspect=False)
    map_ax.drawcoastlines(color='k', linewidth=.7)

    # Plot p_val on basemap
    I = map_ax.pcolormesh(LON, LAT, mp_val, cmap=cmap, norm=norm)

    # Colorbar
    cax = F.add_axes([0.94, 0.15, 0.02, 0.7])
    cbar = plt.colorbar(I, cax=cax, cmap=cmap)

    plt.suptitle('Preference for afternoon precipitation over soil moisture anomalies, ' + model_info[1] + " (" + start_year + "-" + end_year + ")",
                  fontsize = 14)

    diag_name = 'sm-pr-diag-plot'
    figure_filename = E.get_plot_output_filename(diag_name=diag_name,
                                                 variable='pr-mrsos',
                                                 model=model_info[1])

    # Save figure to fileout
    plt.savefig(os.path.join(plot_dir, figure_filename))
예제 #9
0
def get_topo(project_info, longi, lati, model):

    """ 
    ;; Arguments
    ;;    in_dir: dir
    ;;          directory with input file "topo_var_5x5.gra"
    ;;    longi: array [lon]
    ;;          longitude in degrees east
    ;;    lati: array [lat]
    ;;          latitude
    ;; Return 
    ;;    topo: array [lat, lon]
    ;;          topography ranges in model grid
    ;;
    ;; Description
    ;;    Computes topography information
    ;;
    """
    import projects
    E = ESMValProject(project_info)
    verbosity = E.get_verbosity()

    #-------------------------
    # Read model info
    #-------------------------
    
    currProject = getattr(vars()['projects'], model.split_entries()[0])()

    model_info = model.split_entries()


    # load topo max-min topography(m)
    # within 1.25x1.25 area

    nx = 1440
    ny = 480

    ftopo = currProject.get_cf_fx_file(project_info, model)
    
    info('topo file: ' + ftopo , verbosity, required_verbosity=1)

    dt = '>f4'
    topo = (np.fromfile(ftopo, dtype=dt)).reshape(4, ny, nx)
    topo_range = np.transpose(topo[3,:,:])

    # Average onto model grid

    nxo = len(longi)
    nyo = len(lati)
    lon1 = longi[0]
    lat1 = lati[0]
    dlon = np.abs(longi[0]-longi[1])
    dlat = np.abs(lati[0]-lati[1])

    topo_model = np.zeros((nxo, nyo), dtype = 'f4')
    n = np.zeros((nxo, nyo), dtype = 'f4')

    for j in range(0, ny):
        for i in range(0, nx):

            lon = i*0.25-179.875
            lat = j*0.25-59.875

            jj = int(round((lat-lat1)/dlat))
            ii = int(round((lon-lon1)/dlon))

            if(ii==nxo):
                ii = 0
            if(jj==nyo):
                jj = nyo -1
            if(topo_range[i,j]!=-999.):
                topo_model[ii,jj] = topo_model[ii,jj] + topo_range[i,j]
                n[ii,jj] = n[ii,jj] + 1.

    topo_model[n>0] = topo_model[n>0]/n[n>0]
    topo_model[n==0] = -999.
    topo_model[topo_model==-999] = 0

    return np.transpose(topo_model)
예제 #10
0
def get_monthly_input(project_info, mn, time, lon, lat,
                      time_bnds_1, pr, sm, fileout,
                      samplefileout, model,
                      verbosity):

    """
    ;; Arguments
    ;;    mn: int
    ;;          month, values from 1 to 12
    ;;    time: iris cube coords
    ;;          time info of cube
    ;;    lon: array [lon]
    ;;          longitude
    ;;    lat: array [lat]
    ;;          latitude
    ;;    time_bnds_1: float
    ;;          first time_bnd of time series
    ;;    pr: iris cube [time, lat, lon]
    ;;          3-hourly precipitation time series
    ;;    sm: iris cube [time, lat, lon]
    ;;          3-hourly soil moisture time series
    ;;    fileout: dir
    ;;          output directory
    ;;    samplefileout: dir
    ;;          temporary outpout directory used by fortran routines
    ;;
    ;; Return
    ;;    prbef: array [year, day time steps (=8), lat, lon]
    ;;          3-hourly precipitation in the previous day
    ;;    smbef: array [year, day time steps (=8), lat, lon]
    ;;          3-hourly soil moisture in the previous day
    ;;    praf: array [year, day time steps (=8), lat, lon]
    ;;          3-hourly precipitation in the following day
    ;;    smaft: array [year, day time steps (=8), lat, lon]
    ;;          3-hourly soil moisture in the following day
    ;;    monthlypr: array [year, days in month * day time steps, lat, lon]
    ;;          3-hourly precipitation in month mn for the whole analysis period
    ;;    monthlysm: array [year, days in month * day time steps, lat, lon]
    ;;          3-hourly soil moisture in month mn for the whole analysis period
    ;;    days_per_year: int
    ;;          number of days per year 
    ;;
    ;; Description
    ;;    Prepare monthly input data for fortran routines
    ;;
    """

    import projects
    E = ESMValProject(project_info)
    verbosity = E.get_verbosity()

    #-------------------------
    # Read model info
    #-------------------------
    
    model = project_info['MODELS'][0]
    currProject = getattr(vars()['projects'], model.split_entries()[0])()

    model_info = model.split_entries()



    # --------------------------------------
    # Get grid and calendar info
    # --------------------------------------

    utimedate = time[0].units

    first_year = int(model_info[5])
    last_year = int(model_info[6])

    nyr = last_year - first_year + 1
    years = np.arange(nyr) + first_year

    months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
    calendar = utimedate.calendar

    nx = len(lon)
    ny = len(lat)

    days_permonth_360 = [30 for i in range(0, 12)]
    days_permonth_noleap = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
    days_permonth_leap = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]

    if calendar == '360_day':
        days_per_year = 360
        days_permonth = days_permonth_360
        nts = [8 * dpm for dpm in days_permonth_360]
    elif any([calendar == '365_day', calendar == 'noleap']):
        days_per_year = 365
        days_permonth = days_permonth_noleap
        nts = [8 * dpm for dpm in days_permonth_noleap]
    elif any([calendar == 'gregorian', calendar == 'standard']):
        days_per_year = 365 
        days_permonth = days_permonth_noleap
        nts = [8 * dpm for dpm in days_permonth_noleap]
        # Leap days are considered by get_monthly_input()
    else:
        error('Missing calendar info')

    # --------------------------------------
    # Create pr, sm  before and after arrays
    # --------------------------------------

    prbef = np.zeros((nyr, 8, ny, nx), dtype='f4')
    praft = np.zeros((nyr, 8, ny, nx), dtype='f4')

    smbef = np.zeros((nyr, 8, ny, nx), dtype='f4')
    smaft = np.zeros((nyr, 8, ny, nx), dtype='f4')

    try:
        os.mkdir(os.path.join(samplefileout, "5x5G_mon" + str(mn).zfill(2)), stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
    except OSError as exc:
        if exc.errno != errno.EEXIST:
            raise exc
        pass

    try:
        os.mkdir(os.path.join(fileout, "event_output", "mon" + str(mn).zfill(2)), stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
    except OSError as exc:
        if exc.errno != errno.EEXIST:
            raise exc
        pass

    nt = nts[mn - 1]

    monthlypr_list = [np.zeros((nt, ny, nx), dtype='f4') for y in years]
    monthlysm_list = [np.zeros((nt, ny, nx), dtype='f4') for y in years]

    for yr, year in enumerate(years):

        info('month, year: ' + str(mn) + ", " + str(year), verbosity, required_verbosity=1)

        if all([mn == 2, any([calendar == 'gregorian', calendar == 'standard'])]):

            monthlysm_list[yr][:,:,:] = (
                    sm.extract(iris.Constraint(month=months[mn-1],year=year, dom = range(1,29) )).data)

            monthlypr_list[yr][:,:,:] = (
                    pr.extract(iris.Constraint(month=months[mn-1],year=year, dom = range(1,29) )).data)

        else: 
            try:
                monthlypr_list[yr][:,:,:] = (
                      pr.extract(iris.Constraint(month=months[mn-1],year=year)).data)  
            except:
                try:
                     monthlypr_list[yr][0,:,:] = -999.
                     monthlypr_list[yr][1::,:,:] = (
                          pr.extract(iris.Constraint(month=months[mn-1],year=year)).data)    
                except:                              
                     info('omitted pr: ' + str(mn) + ", " + str(year), verbosity, required_verbosity=1)

            try:
                monthlysm_list[yr][:,:,:] = (
                      sm.extract(iris.Constraint(month=months[mn-1],year=year)).data)  
            except:
                try:
                     monthlysm_list[yr][0,:,:] = -999.
                     monthlysm_list[yr][1::,:,:] = (
                          sm.extract(iris.Constraint(month=months[mn-1],year=year)).data)    
                except:                              
                     info('omitted sm: ' + str(mn) + ", " + str(year), verbosity, required_verbosity=1)


        # last day of previous month

        if all([mn == 1, year == first_year]):

            prbef[yr,:,:,:] = np.zeros( (8, ny, nx) ) - 999.
            smbef[yr,:,:,:] = np.zeros( (8, ny, nx) ) - 999.

        elif (mn == 1):

            prbef[yr,:,:,:] = (
                pr.extract(iris.Constraint(year=year-1,month='Dec',dom=days_permonth[-1])).data)

            smbef[yr,:,:,:] = (
                sm.extract(iris.Constraint(year=year-1,month='Dec',dom=days_permonth[-1])).data)

        else:

            if any([calendar == '360_day', calendar == '365_day', calendar == 'noleap']):

                prbef[yr,:,:,:] = (
                     pr.extract(iris.Constraint(year=year,month=months[mn-2],dom=days_permonth[mn-2])).data)

                smbef[yr,:,:,:] = (
                     sm.extract(iris.Constraint(year=year,month=months[mn-2],dom=days_permonth[mn-2])).data)

            elif any([calendar == 'gregorian', calendar == 'standard']): 

                if cal.isleap(year):

                    prbef[yr,:,:,:] = (
                         pr.extract(iris.Constraint(year=year,month=months[mn-2],dom=days_permonth_leap[mn-2])).data)

                    smbef[yr,:,:,:] = (
                         sm.extract(iris.Constraint(year=year,month=months[mn-2],dom=days_permonth_leap[mn-2])).data)

                else:
                    prbef[yr,:,:,:] = (
                         pr.extract(iris.Constraint(year=year,month=months[mn-2],dom=days_permonth[mn-2])).data)

                    smbef[yr,:,:,:] = (
                         sm.extract(iris.Constraint(year=year,month=months[mn-2],dom=days_permonth[mn-2])).data)

        # first day of following month

        if (mn == 12 and year == last_year):

            praft[yr,:,:,:] = np.zeros( (8, ny, nx) ) - 999.
            smaft[yr,:,:,:] = np.zeros( (8, ny, nx) ) - 999.

        elif (mn == 12):

            praft[yr,:,:,:] = (
                pr.extract(iris.Constraint(year=year+1,month='Jan',dom=1)).data)

            smaft[yr,:,:,:] = (
                sm.extract(iris.Constraint(year=year+1,month='Jan',dom=1)).data) 

        else:

            if any([calendar == '360_day', calendar == '365_day', calendar == 'noleap']):

                praft[yr,:,:,:] = (
                    pr.extract(iris.Constraint(year=year,month=months[mn],dom=1)).data)

                smaft[yr,:,:,:] = (
                    sm.extract(iris.Constraint(year=year,month=months[mn],dom=1)).data)

            elif any([calendar == 'gregorian', calendar == 'standard']): 

                if all([cal.isleap(year),mn == 2]):

                    praft[yr,:,:,:] = (
                        pr.extract(iris.Constraint(year=year,month=months[1],dom=29)).data)

                    smaft[yr,:,:,:] = (
                        sm.extract(iris.Constraint(year=year,month=months[1],dom=29)).data)     
              
                else:
                    praft[yr,:,:,:] = (
                        pr.extract(iris.Constraint(year=year,month=months[mn],dom=1)).data)

                    smaft[yr,:,:,:] = (
                        sm.extract(iris.Constraint(year=year,month=months[mn],dom=1)).data)


    monthlypr = np.vstack(tuple(monthlypr_list))
    monthlysm = np.vstack(tuple(monthlysm_list))
    monthlypr  = monthlypr.reshape(nyr, nt, ny, nx)
    monthlysm = monthlysm.reshape(nyr, nt, ny, nx)
    
    if time_bnds_1 == 0.0625:
        monthlypr[:, 0:-1, :, :]  = monthlypr[:, 1::, :, :]
        monthlypr[:, -1, :, :]  = -9999.
        prbef[:, 0:-1, :, :] = prbef[:, 1::, :, :]
        praft[:, 0:-1, :, :] = praft[:, 1::, :, :]
        prbef[:, -1, :, :] = -9999.
        praft[:, -1, :, :] = -9999.

    return prbef, smbef, praft, smaft, monthlypr, monthlysm, days_per_year
예제 #11
0
def write_nc(fileout, xs, ys, p_vals, project_info, model):

    """ 
    ;; Arguments
    ;;    fileout: dir
    ;;          directory to save output
    ;;    xs: array [lon]
    ;;          regridding coordinates
    ;;    ys: array [lat]
    ;;          regridding coordinates
    ;;    p_vals: list
    ;;          p_values of 5x5 deg grid-boxes
    ;;
    ;; Description
    ;;    Save netCDF file with diagnostic in a regular 5x5 deg grid 
    ;;
    """
    import projects
    E = ESMValProject(project_info)
    verbosity = E.get_verbosity()

    #-------------------------
    # Read model info
    #-------------------------
    
    currProject = getattr(vars()['projects'], model.split_entries()[0])()

    model_info = model.split_entries()

       
    #------------------------------
    # Transform p_vals in 2D array
    #------------------------------
    p_val_2d = np.zeros((360/5, 120/5), dtype = 'f4')
    p_val_2d[:,:] = -999.
    i = 0    
    for x in xs:
        for y in ys:
            if p_vals[i]>-999.:
                p_val_2d[x-1, y-1] = p_vals[i]*100
            i = i+1

    #------------------------------
    # Write nc file
    #------------------------------
    diag_name = 'sm-pr-diag-plot'
    netcdf_file = E.get_plot_output_filename(diag_name=diag_name,
                                             variable='pr-mrsos',
                                             model=model_info[1],
                                             specifier='Taylor2012-diagnostic')

    suffix = E.get_graphic_format() + "$"
    netcdf_file = re.sub(suffix, "nc", netcdf_file)
    root_grp = nc4.Dataset(os.path.join(fileout, netcdf_file), 'w', format='NETCDF4')

    root_grp.description = 'Diagnostic Taylor2012: Precipitation dependance on soil moisture'

    root_grp.fillvalue = -999.0

    root_grp.createDimension('lon',360/5)
    root_grp.createDimension('lat',120/5)

    lat = root_grp.createVariable('latitude', 'f4', ('lat',))
    lon = root_grp.createVariable('longitude', 'f4', ('lon',))
    pval = root_grp.createVariable('T12_diag', 'f4', ('lat','lon'))

    lat[:] = np.arange(-60+2.5, 60, 5)
    lon[:] = np.arange(-180+2.5, 180, 5)
    pval[:,:] = np.transpose(p_val_2d)

    root_grp.close()
예제 #12
0
def main(project_info):
    print(">>>>>>>> sst_ESACCI.py is running! <<<<<<<<<<<<")

# A_laue_ax+
    E = ESMValProject(project_info)

    verbosity = E.get_verbosity()
    diag_script = E.get_diag_script_name()

    res = E.write_references(diag_script,              # diag script name
                             ["A_muel_bn"],            # authors
                             [""],                     # contributors
                             [""],                     # diag_references
                             ["E_esacci-sst"],         # obs_references
                             ["P_cmug"],               # proj_references
                             project_info,
                             verbosity,
                             False)
# A_laue_ax-

    Diag=None
    
    for v in range(len(project_info['RUNTIME']['currDiag'].get_variables())):
    
        # read variable
        variable=project_info['RUNTIME']['currDiag'].get_variables()[v]
        
        #check if variable fits to diagnostics
        if variable == 'ts' or variable == 'tos':
            
            model_filelist=ESMValProject(project_info).get_clim_model_filenames(variable=variable)
    
            # only models are read
            for inc in range(len(project_info['MODELS'])):
                
                model=project_info['MODELS'][inc]
                
                # only for non-reference models
                if not model.model_line.split()[1] == project_info['RUNTIME']['currDiag'].variables[v].ref_model:
                
                    model_filename=model_filelist[model.model_line.split()[1]]
                    reference_filename=model_filelist[project_info['RUNTIME']['currDiag'].variables[v].ref_model]
                
                    # copy old data to provide data that is needed again                                                                                                                                                           # copy old data to provide data that is needed again
                    D_old=copy(Diag)
                    
                    # initialize diagnostic
                    Diag = SeaSurfaceTemperatureDiagnostic()
                    # provide project_info to diagnostic
                    Diag.set_info(project_info,model,variable,reference_filename,model_filename,project_info['RUNTIME']['currDiag'].diag_script_cfg)
                    # reuse region info
                    if not D_old==None:
                        if "_regions" in D_old.__dict__.keys():
                            Diag._regions=D_old._regions
                    del(D_old)
                    # load the data
                    Diag.load_data()
                    # run the diagnostics defined by the import
                    Diag.run_diagnostic()
                    # write the results to the specific folder
                    Diag.write_data(project_info['GLOBAL']['write_plots'])
                
        if len(model_filelist)>2:
            Diag.write_overview(project_info['GLOBAL']['write_plots'])
    
    print(">>>>>>>> ENDED SUCESSFULLY!! <<<<<<<<<<<<")
    print('')
예제 #13
0
def main(project_info):
    print('>>>>>>>> lc_ESACCI.py is running! <<<<<<<<<<<<')

    # A_laue_ax+
    E = ESMValProject(project_info)

    verbosity = E.get_verbosity()
    diag_script = E.get_diag_script_name()

    res = E.write_references(
        diag_script,  # diag script name
        ["A_muel_bn"],  # authors
        [""],  # contributors
        [""],  # diag_references
        ["E_esacci-landcover"],  # obs_references
        ["P_cmug"],  # proj_references
        project_info,
        verbosity,
        False)
    # A_laue_ax-

    #    f = open(project_info['RUNTIME']['currDiag'].diag_script_cfg)
    #    cfg = imp.load_source('cfg', '', f)
    #    f.close()

    Diag = None

    for v in range(len(project_info['RUNTIME']['currDiag'].get_variables())):

        # read variable
        variable = project_info['RUNTIME']['currDiag'].get_variables()[v]

        if variable in ["baresoilFrac", "grassNcropFrac",
                        "shrubNtreeFrac"]:  #TODO check in cfg

            model_filelist = ESMValProject(
                project_info).get_clim_model_filenames(variable=variable)

            # only models are read
            for inc in range(len(project_info['MODELS'])):

                model = project_info['MODELS'][inc]

                # only for non-reference models
                if not model.model_line.split()[1] == project_info['RUNTIME'][
                        'currDiag'].variables[v].ref_model:

                    model_filename = model_filelist[model.model_line.split()
                                                    [1]]
                    reference_filename = model_filelist[project_info[
                        'RUNTIME']['currDiag'].variables[v].ref_model]

                    # copy old data to provide data that is needed again
                    D_old = copy(Diag)

                    # initialize diagnostic
                    Diag = LandCoverDiagnostic()

                    # provide project_info to diagnostic
                    Diag.set_info(
                        project_info, model, variable, reference_filename,
                        model_filename,
                        project_info['RUNTIME']['currDiag'].diag_script_cfg)
                    # reuse region info
                    if not D_old == None:
                        if "_regions" in D_old.__dict__.keys():
                            Diag._regions = D_old._regions
                    del (D_old)
                    # load the data
                    Diag.load_data()
                    # run the diagnostics defined by the import
                    Diag.run_diagnostic()
                    # write the results to the specific folder
                    Diag.write_data(project_info['GLOBAL']['write_plots'])


#        if len(model_filelist)>2:
#            Diag.write_overview(project_info['GLOBAL']['write_plots'])

    print('>>>>>>>> ENDED SUCESSFULLY!! <<<<<<<<<<<<')
    print('')
예제 #14
0
def main(project_info):
    """
    main interface routine to ESMValTool

    Parameters
    ----------
    project_info : dict
        dictionary that contains all relevant informations
        it is provided by the ESMValTool launcher
    """

    # extract relevant information from project_info using a generic python
    # interface library
    # logging configuration
    verbosity = project_info['GLOBAL']['verbosity']
    if verbosity == 1:
        logger.setLevel(logging.WARNING)
    elif verbosity == 2:
        logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.DEBUG)

    # create instance of a wrapper that allows easy access to data
    E = ESMValProject(project_info)

    # get information  for runoff/ET script
    # input file directory, returns a dict
    ifile_dict = E.get_raw_inputfile()

    # A-laue_ax+
    diag_script = E.get_diag_script_name()
    res = E.write_references(diag_script,              # diag script name
                             ["A_somm_ph", "A_hage_st"],      # authors
                             ["A_loew_al"],            # contributors
                             ["D_hagemann13jadvmodelearthsyst"],  # diag_references
                             ["E_duemenil00mpi", "E_weedon14waterresourres"], # obs_references
                             ["P_embrace"],            # proj_references
                             project_info,
                             verbosity,
                             False)
    # A-laue_ax-

    # set up of input files dictionary, rewrite ifile_dict to match for
    # catchment_analysis_tool_val
    # ifiles={<<<MODEL>>>:
    #             {<<<VAR>>>:
    #                 {'file':<file>[, 'unit':<unit>, 'cdo':<cdo>,
    #                                'vname':<vname>, 'reffile':<file>,
    #                                'refvname':<vname>]}}}
    # dictionary containing the model names as keys and a second dictionary as
    # value.
    # This (second) dictionaries contain the variable names (eihter 'runoff' or
    # 'ET') as key and a third dictionary as value.
    # This (third) dictionaries contain the key
    #   - 'file'     for the datafile for the climatological mean
    #   and optionally
    #   - 'unit'     for the unit of the data. If 'unit' is not set, it will be
    #                taken from the nc-file multiplied by 'm^2'
    #   - 'cdo'      for additional cdo-commands (e.g. multiplication for
    #                changing
    #                the units)
    #   - 'vname'    for the name of the variable in the datafile (if not set,
    #                the variable name as used for the key will be used).
    #   - 'reffile'  for the reference file (if not set, defaultreffile will be
    #                used and <<<VAR>>> will be replaced by the variable name
    #                as given in the key
    #   - 'refvname' for the name of the variable in the reference file (if not
    #                set, the variable name as used for the key will be used).
    ifiles = {}
    for model, vlst in ifile_dict.items():
        ifiles[str(model)] = {}
        for var in map(str, vlst.keys()):
            if var == 'evspsbl':
                myvar = 'ET'
                # units are are kg m-2 s-1 and therefore must be multiplied
                # by the amount of seconds in one year
                ifiles[str(model)][myvar] = {'unit': 'mm/a', 'vname': var,
                                             'cdo': '-mulc,86400 -muldpy '}
            elif var == 'mrro':
                myvar = 'runoff'
                # units are are kg m-2 s-1 and therefore must be multiplied
                # by the amount of seconds in one year
                ifiles[str(model)][myvar] = {'unit': 'mm/a', 'vname': var,
                                             'cdo': '-mulc,86400 -muldpy '}
            elif var == 'pr':
                myvar = 'precip'
                # units are are kg m-2 s-1 and therefore must be multiplied
                # by the amount of seconds in one year
                ifiles[str(model)][myvar] = {'unit': 'mm/a', 'vname': var,
                                             'cdo': '-mulc,86400 -muldpy '}
            else:
                # only ET, mrro and precip are supported trough reference
                # values. Therefore raise error
                raise ValueError(
                    "Only mrro (runoff), pr (precipitation) and evspsbl "
                    "(evapotranspration) are supported!")
            # try to get reformated data and if this does not work (because no
            # CMIP5 project, use raw input data
            try:
                ifiles[str(model)][myvar]['file'] = map(str, glob.glob(
                    E.get_clim_model_filenames(var)[model]))
            except ValueError:
                indir = str(ifile_dict[model][var]['directory'])
                indir = msd["dir"]
                if indir[-1] != os.sep:
                    indir += os.sep
                infile = str(ifile_dict[model][var]['files'])

                ifiles[str(model)][myvar]['file'] = indir + infile

            # A-laue_ax+
            for file in ifiles[str(model)][myvar]['file']:
                E.add_to_filelist(file)
            # A-laue_ax-

    POUT = str(os.path.join(E.get_plot_dir(), E.get_diag_script_name()))
    POUT = POUT + os.sep
    if not os.path.exists(POUT):
        os.makedirs(POUT)

    # catchment_dir = data_directory + 'cat/'
    catchment_dir = os.path.dirname(os.path.abspath(inspect.getfile(
        inspect.currentframe()))) + '/'
    # set path to nc-file containing the catchment definition
    pcatchment = os.path.join(catchment_dir, "aux/catchment_analysis", "big_catchments.nc")

    # A-laue_ax+
    E.add_to_filelist(pcatchment)
    # A-laue_ax-

    # ---- File input declaration ----
    # input file (needs to contain grid informations). For the
    # analysis the data will be regridded to pcatchments. Data is expected to
    # be in mm/s (if not, see 'cdo' in ifiles dictionary below)

    # ---- switches ----
    # CALCULATION: switch to calculate climatological means (True) or use
    # already existing files
    CALCULATION = True
    # KEEPTIMMEAN: switch to keep files created during calculation. if true,
    # files like 'tmp_<<<MODEL>>>_<<<var>>>_<<<CATCHMENT>>>.<<<fmt>>>' will be
    # stored in the output directory (POUT) containing the timmean data for
    # the catchments (note that the file format <<<fmt>>> is defined by the
    # input file)
    KEEPTIMMEAN = False
    # PLOT: switch to set plotting. If true, one plot for each model named
    # POUT+<<<MODEL>>>_bias-plot.pdf containg all variables will be generated
    PLOT = True
    # SEPPLOTS: Integer to control diagram style. If 2: relative and absolute
    # variables will be plotted in separate files named
    # POUT+<<<MODEL>>>_rel-bias-plot.pdf and
    # POUT+<<<MODEL>>>_abs-bias-plot.pdf.
    # If 3: they will be plotted into one # figure but separated axes in file
    # POUT+<<<MODEL>>>_sep-bias-plot.pdf, if 5: # they will be plotted into one
    # single axes within file POUT+<<<MODEL>>>_bias-plot.pdf. Multiplication
    # (e.g. 6, 15 or 30) is also possible to make more than one option at the
    # same time
    SEPPLOTS = 3
    # ETREFCALCULATION: If True, create reference file
    # ref_ET_catchments.txt from timmean of precfile and runoff data from
    # ref_runoff_catchments.txt.  If None, use default reference values, save
    # them to defaultreffile and delete it afterwards. If False, use existing
    # file defined by defaultreffile or reffile defined in ifiles (see above).
    ETREFCALCULATION = None

    # formatoptions for plot (ax2 modifies diagrams showing relative values)
    # (for keywords see function xyplotformatter in
    # catchment_analysis_tool_val)
    # be aware that the additional setting of a keyword for the plot of the
    # absolute values may also influence the plot of relative value. To prevent
    # from that, define the option for 'ax2' manually.
    # ---
    # set minimal and maximal bounds of y-axis (Syntax: [ymin, ymax]). 'minmax'
    # will cause y-axis limits with minimum and maximum value and makes the
    # plot symmetric around 0 in case of SEPPLOTS%5 == 0. To let the plotting
    # routine (i.e. pyplot) choose the limits, set ylim to None
    # ---
    # Set yticks: integer or list (Default (i.e. without manipulation by the
    # plotting routine of catchment_analysis_tool_val): None). Defines the
    # y-ticks. If integer i, every i-th tick of the automatically

    # other formatoptions as provided by function
    # catchment_analysis_tool_val.xyplotformatter may be included in the
    # dictionary below

    fmt = {
        myvar: {
            'ylim': 'minmax', 'yticks': None,
            'ax2': {'ylim': 'minmax', 'yticks': None}
               }
        for myvar in ifiles.values()[0].keys()
          }

    # start computation
    analysecatchments(
        project_info,
        POUT=POUT,
        pcatchment=pcatchment,
        ifiles=ifiles,
        CALCULATION=CALCULATION,
        KEEPTIMMEAN=KEEPTIMMEAN,
        PLOT=PLOT,
        SEPPLOTS=SEPPLOTS,
        ETREFCALCULATION=ETREFCALCULATION,
        fmt=fmt,
        # precfile=precfile,
        # defaultreffile=defaultreffile
        )