def merge_jsons(mip, exp, case_id, pmprdir):
    json_file_dir_template = os.path.join(
        pmprdir,
        '%(output_type)', 'mjo',
        '%(mip)', '%(exp)', '%(case_id)')
    json_file_dir_template = StringConstructor(json_file_dir_template)
    json_file_dir = json_file_dir_template(
        output_type='metrics_results', mip=mip, exp=exp, case_id=case_id)
    print('json_file_dir:', json_file_dir)

    json_file_template = 'mjo_stat_%(mip)_%(exp)_da_atm_%(model)_%(realization)_1985-2004'
    json_file_template = StringConstructor(json_file_template)

    # Search for individual JSONs
    json_files = sorted(glob.glob(
        os.path.join(
            json_file_dir,
            json_file_template(
                mip=mip, exp=exp, case_id=case_id, model='*', realization='*')+'.json')))

    # Remove diveDown JSONs and previously generated merged JSONs if included
    json_files_revised = copy.copy(json_files)
    for j, json_file in enumerate(json_files):
        filename_component = json_file.split('/')[-1].split('.')[0].split('_')
        if 'diveDown' in filename_component:
            json_files_revised.remove(json_file)
        elif 'allModels' in filename_component:
            json_files_revised.remove(json_file)
        elif 'allRuns' in filename_component:
            json_files_revised.remove(json_file)

    # Load individual JSON and merge to one big dictionary
    for j, json_file in enumerate(json_files_revised):
        print(j, json_file)
        f = open(json_file)
        dict_tmp = json.loads(f.read())
        if j == 0:
            dict_final = dict_tmp.copy()
        else:
            dict_merge(dict_final, dict_tmp)
        f.close()

    # Dump final dictionary to JSON
    final_json_filename = json_file_template(
        mip=mip, exp=exp, case_id=case_id,
        model='allModels', realization='allRuns')+'.json'
    final_json_file = os.path.join(json_file_dir, final_json_filename)
    print('final_json_filename:', final_json_filename)

    with open(final_json_file, 'w') as fp:
        json.dump(dict_final, fp, sort_keys=True, indent=4)

    print("Done: check ", final_json_file)
def read_json_and_merge_axes(json_dir,
                             json_file,
                             statistics,
                             modes,
                             minimizeText=False):
    model_run_list = []
    model_run_list_label = []
    mode_season_list = []
    a = []
    for mode in modes:
        # open json
        input_file = StringConstructor(json_file)(mode=mode)
        with open(os.path.join(json_dir, input_file)) as f:
            d = json.load(f)
        # Get potential x-axis first
        if mode == modes[0]:
            models_list = sorted(list(d["RESULTS"].keys()))
            for model in models_list:
                #if model in ['GFDL-CM3', 'GFDL-CM4']:
                if model in ['E3SM']:
                    runs_list = sort_human(list(d["RESULTS"][model].keys()))
                    for run in runs_list:
                        model_run_list.append(model + '_' + run)
                        if run == runs_list[0]:
                            model_run_list_label.append(model + '_' + run)
                        else:
                            model_run_list_label.append(run)
            print(model_run_list)
        # season depending on mode
        if mode == 'PDO':
            seasons = ['monthly']
        else:
            seasons = ['DJF', 'MAM', 'JJA', 'SON']
        # season loop
        for season in seasons:
            mode_season_list.append(mode + '_' + season)
            for model_run in model_run_list:
                model = model_run.split('_')[0]
                run = model_run.split('_')[-1]
                try:
                    tmp = d["RESULTS"][model][run]["defaultReference"][mode][
                        season][statistics]
                    if statistics == 'std_pseudo_pcs':
                        tmp = tmp / d["REF"]["obs"]["defaultReference"][mode][
                            season]["pc1_stdv"]
                except:
                    tmp = np.nan
                a.append(tmp)
    # convert to array and decorate axes
    a = np.array(a).reshape(len(mode_season_list), len(model_run_list))
    if minimizeText:
        X = cdms2.createAxis(model_run_list_label)
    else:
        X = cdms2.createAxis(model_run_list)
    Y = cdms2.createAxis(mode_season_list)
    a = MV2.array(a, axes=(Y, X), id=statistics)
    return a
def merge_json(mode, eof, mip, exp, case_id, obs, syear, eyear, pmprdir):
    json_file_dir_template = 'metrics_results/variability_modes/%(mip)/%(exp)/%(case_id)/%(mode)/%(obs)'
    json_file_dir_template = StringConstructor(json_file_dir_template)
    json_file_dir = os.path.join(
        pmprdir,
        json_file_dir_template(mip=mip, exp=exp, case_id=case_id, mode=mode, obs=obs))

    json_file_template = 'var_mode_%(mode)_%(eof)_stat_%(mip)_%(exp)_mo_atm_%(model)_%(run)_%(syear)-%(eyear).json'
    json_file_template = StringConstructor(json_file_template)

    # Search for individual JSONs
    json_files = sorted(glob.glob(
        os.path.join(
            json_file_dir,
            json_file_template(mode=mode, eof=eof, mip=mip, exp=exp, model='*', run='*', syear='*', eyear='*'))))

    # Remove diveDown JSONs and previously generated merged JSONs if included
    json_files_revised = copy.copy(json_files)
    for j, json_file in enumerate(json_files):
        filename_component = json_file.split('/')[-1].split('.')[0].split('_')
        if 'allModels' in filename_component:
            json_files_revised.remove(json_file)
        elif 'allRuns' in filename_component:
            json_files_revised.remove(json_file)

    # Load individual JSON and merge to one big dictionary
    for j, json_file in enumerate(json_files_revised):
        print(j, json_file)
        f = open(json_file)
        dict_tmp = json.loads(f.read())
        if j == 0:
            dict_final = dict_tmp.copy()
        else:
            dict_merge(dict_final, dict_tmp)
        f.close()

    # Dump final dictionary to JSON
    final_json_filename = json_file_template(
        mode=mode, eof=eof, mip=mip, exp=exp, model='allModels', run='allRuns', syear=str(syear), eyear=str(eyear))
    final_json_file = os.path.join(json_file_dir, final_json_filename)

    with open(final_json_file, 'w') as fp:
        json.dump(dict_final, fp, sort_keys=True, indent=4)
print('mode:', mode)

# Variables
var = param.varModel

# Check dependency for given season option
seasons = param.seasons
print('seasons:', seasons)

# Observation information
obs_name = param.reference_data_name
obs_path = param.reference_data_path
obs_var = param.varOBS

# Path to model data as string template
modpath = StringConstructor(param.modpath)
if LandMask:
    modpath_lf = StringConstructor(param.modpath_lf)

# Check given model option
models = param.modnames

# Include all models if conditioned
if ('all' in [m.lower() for m in models]) or (models == 'all'):
    models = ([
        p.split('/')[-1].split('.')[1] for p in glob.glob(
            modpath(mip=mip, exp=exp, model='*', realization='*',
                    variable=var))
    ])
    # remove duplicates
    models = sorted(list(dict.fromkeys(models)), key=lambda s: s.lower())
Beispiel #5
0
               required=False)

args = P.get_parameter()

infile_template = args.infile
outfile_template = args.outfile
outpath_template = args.outpath
outfilename_template = args.outfilename
varlist = args.vars
start = args.start
end = args.end

print('start and end are ', start, ' ', end)
print('variable list: ', varlist)

InFile = StringConstructor(infile_template)
OutFile = StringConstructor(outfile_template)
OutFileName = StringConstructor(outfilename_template)
OutPath = StringConstructor(outpath_template)

for var in varlist:
    # Build filenames
    InFile.variable = var
    OutFile.variable = var
    OutFileName.variable = var
    OutPath.variable = var
    infile = InFile()
    outfile = OutFile()
    outfilename = OutFileName()
    outpath = OutPath()
Beispiel #6
0
    def __get(self):
        nfree = 0
        names = []
        for p in self.parameters_list:
            if not p in self.dummies and not p in self.auto_dummies:
                v = getattr(self, p)
                if     v is None \
                       or \
                       (isinstance(v,(list,tuple)) and len(v)>1):
                    already = 0
                    for pn in names:
                        if p == pn:
                            already = 1
                        elif isinstance(pn, list):
                            if p in pn: already = 1
                    if already == 0:
                        nfree += 1
                        added = 0
                        for g in self.grouped:
                            if p in g:
                                names.append(g)
                                added = 1
                        if added == 0:
                            names.append(p)

        if nfree != 2:
            raise 'Error MUST end up with 2 multiple values ! (we have ' + str(
                nfree) + ':' + str(names) + ')'
        # Now determines length of each axis
        axes_length = [1, 1]
        # First make sure with have 2 list of parameters
        for i in range(2):
            if not isinstance(names[i], list):
                names[i] = [names[i]]
            for n in names[i]:
                v = getattr(self, n)
                if v is None:
                    if n == 'component':
                        axes_length[i] *= 28
                    elif n == 'time_domain':
                        axes_length[i] *= 19
                    else:
                        raise 'Error, ' + n + ' is not defined correctly, please specify which values you wish to extract'
                else:
                    axes_length[i] *= len(v)
        # Creates the dummy array
        output = MV2.ones((axes_length[0], axes_length[1]))
        # Now mask everywhere
        output = MV2.masked_equal(output, 1)
        # Indices for filling
        i = 0
        j = 0
        # First creates the filler object and sets all the fixed values !
        F = StringConstructor(self.files_structure)
        # Ok let's fill it
        for p in self.parameters_list:
            if not p in self.dummies and not p in self.auto_dummies:
                v = getattr(self, p)
                if isinstance(v, (list, tuple)):
                    if len(v) == 1:
                        v = v[0]
                        if p in self.slaves.keys():
                            vslvs = v[1:]
                            v = v[0]
                        setattr(F, p, v)
                        if p in self.slaves.keys():
                            slvs = self.slaves[p]
                            for js in range(len(slvs)):
                                s = slsvs[js]
                                setattr(F, s, vslvs[js])
                    else:
                        setattr(F, p, '*')
                else:
                    if p in self.slaves.keys():
                        vslvs = v[1:]
                        v = v[0]
                    setattr(F, p, v)
                    if p in self.slaves.keys():
                        slvs = self.slaves[p]
                        for js in range(len(slvs)):
                            s = slsvs[js]
                            setattr(F, s, vslvs[js])
            else:
                setattr(F, p, '*')

        #fnms=F()
        nms = names[0] + names[1]

        t1, t2, t3 = self.string_construct(nms)
        output = output.ravel()
        sp1 = t1.split()
        n = len(sp1)
        for i in range(len(t2)):
            sp2 = t2[i].split()
            for j in range(n):
                v = sp2[j]
                if sp1[j] == 'time_domain':
                    try:
                        v = int(v)
                    except:
                        pass
                if v == 'NONE':
                    v = ''
                setattr(F, sp1[j], v)
            #print 'Search string is:',fnms
            #f=os.popen('ls '+F()).readlines()
            #ip,op,ep=os.popen3('ls '+F())
            if self.verbose: print 'command line:', F()
            #f=op.readlines()
            f = glob.glob(F())
            #print 'F is:',f
            files = []
            for file in f:
                files.append(file)
                for e in self.exclude:
                    if file.find(e) > -1:
                        files.pop(-1)
                        break
            if self.verbose: print 'files:', files
            try:
                # now we get the one value needed in this file
                f = cdms2.open(files[0])
                V = f[F.statistic]
                component = F.component
                time_domain = F.time_domain
                if isinstance(component, str):
                    dic = eval(f.components)
                    for k in dic.keys():
                        if dic[k] == F.component:
                            component = k
                if isinstance(F.time_domain, str):
                    dic = eval(f.time_domain)
                    for k in dic.keys():
                        if dic[k] == F.time_domain:
                            time_domain = k
                value = V(time_domain=time_domain,
                          component=component,
                          squeeze=1)
                output[i] = value
                # In case sometihng goes wrong (like modle not processed or inexsitant for this var, etc...)
                f.close()
            except Exception, err:
                #print 'Error:',err
                pass
Beispiel #7
0
    realization = '*'
print('realization: ', realization)

# Metrics Collection
mc_name = param.metricsCollection

# case id
case_id = param.case_id
print('case_id:', case_id)

# Output
outdir_template = param.process_templated_argument("results_dir")
outdir = StringConstructor(
    str(
        outdir_template(output_type='%(output_type)',
                        mip=mip,
                        exp=exp,
                        metricsCollection=mc_name,
                        case_id=case_id)))

# Debug
debug = param.debug
print('debug:', debug)

# =================================================
# Create output directories
# -------------------------------------------------
for output_type in ['graphics', 'diagnostic_results', 'metrics_results']:
    if not os.path.exists(outdir(output_type=output_type)):
        os.makedirs(outdir(output_type=output_type))
    print(outdir(output_type=output_type))
Beispiel #8
0
def merge_jsons(mip, exp, case_id, pmprdir):
    json_file_dir_template = os.path.join(pmprdir, "%(output_type)", "mjo",
                                          "%(mip)", "%(exp)", "%(case_id)")
    json_file_dir_template = StringConstructor(json_file_dir_template)
    json_file_dir = json_file_dir_template(output_type="metrics_results",
                                           mip=mip,
                                           exp=exp,
                                           case_id=case_id)
    print("json_file_dir:", json_file_dir)

    json_file_template = (
        "mjo_stat_%(mip)_%(exp)_da_atm_%(model)_%(realization)_1985-2004")
    json_file_template = StringConstructor(json_file_template)

    # Search for individual JSONs
    json_files = sorted(
        glob.glob(
            os.path.join(
                json_file_dir,
                json_file_template(mip=mip,
                                   exp=exp,
                                   case_id=case_id,
                                   model="*",
                                   realization="*") + ".json",
            )))

    # Remove diveDown JSONs and previously generated merged JSONs if included
    json_files_revised = copy.copy(json_files)
    for j, json_file in enumerate(json_files):
        filename_component = json_file.split("/")[-1].split(".")[0].split("_")
        if "diveDown" in filename_component:
            json_files_revised.remove(json_file)
        elif "allModels" in filename_component:
            json_files_revised.remove(json_file)
        elif "allRuns" in filename_component:
            json_files_revised.remove(json_file)

    # Load individual JSON and merge to one big dictionary
    for j, json_file in enumerate(json_files_revised):
        print(j, json_file)
        f = open(json_file)
        dict_tmp = json.loads(f.read())
        if j == 0:
            dict_final = dict_tmp.copy()
        else:
            dict_merge(dict_final, dict_tmp)
        f.close()

    # Dump final dictionary to JSON
    final_json_filename = (json_file_template(mip=mip,
                                              exp=exp,
                                              case_id=case_id,
                                              model="allModels",
                                              realization="allRuns") + ".json")
    final_json_file = os.path.join(json_file_dir, final_json_filename)
    print("final_json_filename:", final_json_filename)

    with open(final_json_file, "w") as fp:
        json.dump(dict_final, fp, sort_keys=True, indent=4)

    print("Done: check ", final_json_file)
Beispiel #9
0
    description='Runs PCMDI Modes of MJO Computations',
    formatter_class=RawTextHelpFormatter)
P = AddParserArgument(P)
param = P.get_parameter()

# Pre-defined options
mip = param.mip
exp = param.exp
print('mip:', mip)
print('exp:', exp)

# Variables
var = param.varModel

# Path to model data as string template
modpath = StringConstructor(param.modpath)

# Check given model option
models = param.modnames

# Include all models if conditioned
if ('all' in [m.lower() for m in models]) or (models == 'all'):
    model_index_path = param.modpath.split('/')[-1].split('.').index(
        "%(model)")
    models = ([
        p.split('/')[-1].split('.')[model_index_path] for p in glob.glob(
            modpath(mip=mip,
                    exp=exp,
                    realm='atmos',
                    model='*',
                    realization='*',
    # remove duplicates
    models = sorted(list(dict.fromkeys(models)), key=lambda s: s.lower())

print('models:', models)

# Realizations
realization = param.realization
print('realization: ', realization)

# case id
case_id = param.case_id

# Output
outdir_template = param.process_templated_argument("results_dir")
outdir = StringConstructor(str(outdir_template(
    output_type='%(output_type)',
    mip=mip, exp=exp, case_id=case_id)))

# Create output directory
for output_type in ['graphics', 'diagnostic_results', 'metrics_results']:
    if not os.path.exists(outdir(output_type=output_type)):
        os.makedirs(outdir(output_type=output_type))
    print(outdir(output_type=output_type))

# Debug
debug = param.debug
print('debug: ', debug)

# Year
#  model
msyear = param.msyear
Beispiel #11
0
fac = param.fac
nperseg = param.nperseg
noverlap = param.noverlap
print(modpath)
print(mod)
print(prd)
print(nperseg, noverlap)

# Get flag for CMEC output
cmec = param.cmec

# Create output directory
case_id = param.case_id
outdir_template = param.process_templated_argument("results_dir")
outdir = StringConstructor(
    str(outdir_template(output_type="%(output_type)", mip=mip,
                        case_id=case_id)))
for output_type in ["graphics", "diagnostic_results", "metrics_results"]:
    if not os.path.exists(outdir(output_type=output_type)):
        try:
            os.makedirs(outdir(output_type=output_type))
        except FileExistsError:
            pass
    print(outdir(output_type=output_type))

# Check data in advance
file_list = sorted(glob.glob(os.path.join(modpath, "*" + mod + "*")))
data = []
for file in file_list:
    if mip == "obs":
        model = file.split("/")[-1].split(".")[2]
Beispiel #12
0
def read_json_and_merge_axes(json_dir, json_file, statistics, modes, AverageRuns=False):
    runs_list_dic = {}
    model_run_list = []
    mode_season_list = []
    a = []
    for mode in modes:
        # open json
        input_file = StringConstructor(json_file)(mode=mode)
        with open(os.path.join(json_dir, input_file)) as f:
            d = json.load(f)
        # Get potential x-axis first
        if mode == modes[0]:
            models_list = sorted(list(d["RESULTS"].keys()))
            for model in models_list:
                runs_list = sort_human(list(d["RESULTS"][model].keys()))
                runs_list_dic[model] = runs_list
                for run in runs_list:
                    model_run_list.append(model+'_'+run)
            print(model_run_list)
        # season depending on mode
        if mode == 'PDO':
            seasons = ['monthly']
        else:
            seasons = ['DJF', 'MAM', 'JJA', 'SON']
        # season loop
        for season in seasons:
            mode_season_list.append(mode+'_'+season)
            if AverageRuns:
                for model in models_list:
                    b = []
                    for run in runs_list_dic[model]:
                        try:
            	            tmp = d["RESULTS"][model][run]["defaultReference"][mode][season][statistics]
	                    if statistics == 'std_pseudo_pcs':
	                       	tmp = tmp / d["REF"]["obs"]["defaultReference"][mode][season]["pc1_stdv"]
	                    b.append(tmp)
	                except:
	                    tmp = np.nan
	            tmp2 = float(np.mean(np.array(b)))
	            a.append(tmp2)
            else:
	        for model_run in model_run_list:
	            model = model_run.split('_')[0]
	            run = model_run.split('_')[-1]
	            try:
	                tmp = d["RESULTS"][model][run]["defaultReference"][mode][season][statistics]
	                if statistics == 'std_pseudo_pcs':
	                    tmp = tmp / d["REF"]["obs"]["defaultReference"][mode][season]["pc1_stdv"]
	            except:
	                tmp = np.nan
	            a.append(tmp)
    # convert to array and decorate axes
    if AverageRuns: 
        #xaxis_label = models_list
        xaxis_label = [model+'  ('+str(len(runs_list_dic[model]))+')' for model in models_list]
    else:
        xaxis_label = model_run_list
    print('xaixs_label:', xaxis_label, len(xaxis_label))
    a = np.array(a).reshape(len(mode_season_list), len(xaxis_label))
    X = cdms2.createAxis(xaxis_label)
    Y = cdms2.createAxis(mode_season_list)
    a = MV2.array(a, axes=(Y,X), id=statistics)
    return a