def __init__(self,files,parent,mapping=None,datasetids=None,fileids=None): self._files=files if not isinstance(parent,esgfDataset): raise esgfFilesException("parent must be an esgfDataset instance") self.parent=parent self.EsgfObjectException = esgfFilesException if datasetids is None: datasetids=parent.datasetids if isinstance(datasetids,genutil.StringConstructor): self.datasetids=datasetids elif isinstance(datasetids,str): self.datasetids=genutil.StringConstructor(datasetids) else: self.datasetids=None if fileids is not None: if isinstance(fileids,genutil.StringConstructor): self.fileids=fileids else: self.fileids=genutil.StringConstructor(fileids) if self.datasetids is not None: self.fileids.template=self.fileids.template.replace("%(datasetid)",self.datasetids.template) elif self.datasetids is not None: self.fileids=genutil.StringConstructor("%s.%%(filename)" % self.datasetids.template) else: self.fileids=parent.fileids if mapping is None: mapping=parent.mapping self.setMapping(mapping) self.remap() self.projects_dict = {"CMIP5": "%(project).%(product).%(institute).%(model).%(experiment).%(time_frequency).%(realm).%(cmor_table).%(ensemble)" }
def __init__(self,host,port=80,timeout=15,limit=None,offset=0,mapping=None,datasetids=None,fileids=None,restPath=None): self.autoApiInfo = AutoAPI.Info(self) self.port=port url=str(host).replace("://","^^^---^^^") sp= url.split("/") host = sp[0].replace("^^^---^^^","://") if restPath is None: restPath = "/".join(sp[1:]) if len(restPath)==0: self.restPath="/esg-search/search" else: self.restPath=restPath else: self.restPath=restPath self.host=host #self.host="esg-datanode.jpl.nasa.gov" self.defaultSearchType = "Dataset" self.EsgfObjectException = esgfConnectionException self.validSearchTypes=validSearchTypes self.validSearchTypes=["Dataset",] all = self._search("facets=*",searchType=None) ## Now figure out the facet fields self.serverOrder = [] for e in all: if e.tag=="lst" and "name" in e.keys() and e.get("name")=="responseHeader": ## ok found the Header for s in e: if s.get("name")=="params": params=s break self.params={"text":None,"limit":limit,"offset":offset} self.searchableKeys=set(["text","limit","offset"]) for p in params: if p.get("name")=="facet.field": for f in p: self.serverOrder.append(f.text) self.params[f.text]=None self.searchableKeys.add(f.text) self.keys = self.params.keys self.items = self.params.items self.values = self.params.values if datasetids is not None: self.datasetids=genutil.StringConstructor(datasetids) else: self.datasetids=None if fileids is not None: self.fileids=genutil.StringConstructor(fileids) if datasetids is not None: self.fileids.template=self.fileids.template.replace("%(datasetid)",self.datasetids.template) elif self.datasetids is not None: self.fileids=genutil.StringConstructor("%s.%%(filename)" % self.datasetids.template) else: self.fileids=None #self.setMapping(mapping) self.mapping=mapping
def __init__(self,host=None,port=80,limit=1000,offset=0,mapping=None,datasetids=None,fileids=None,_http=None,restPath=None,keys={},originalKeys={}): if host is None: raise esgfDatasetException("You need to pass url") self.host=host #self.host="esg-datanode.jpl.nasa.gov" self.port=port self.defaultSearchType="File" if restPath is None: self.restPath="/esg-search/search" else: self.restPath=restPath if datasetids is None: if "dataset_id_template_" in keys: tmp=keys["dataset_id_template_"] if tmp[:5]=="cmip5": tmp = tmp.replace("valid_institute","institute") tmp="%(project)"+tmp[5:] self.datasetids = genutil.StringConstructor(tmp.replace(")s",")")) elif "project" in keys and keys["project"]=="cmip5": self.datasetids = genutil.StringConstructor("%(project).%(product).%(institute).%(model).%(experiment).%(time_frequency).%(realm).%(cmor_table).%(ensemble)") else: self.datasetids=None if isinstance(datasetids,genutil.StringConstructor): self.datasetids=datasetids elif isinstance(datasetids,str): self.datasetids=genutil.StringConstructor(datasetids) if fileids is not None: if isinstance(fileids,genutil.StringConstructor): self.fileids=fileids else: self.fileids=genutil.StringConstructor(fileids) if self.datasetids is not None: self.fileids.template=self.fileids.template.replace("%(datasetid)",self.datasetids.template) elif self.datasetids is not None: self.fileids=genutil.StringConstructor("%s.%%(filename)" % self.datasetids.template) else: self.fileids=None self.originalKeys=originalKeys self.validSearchTypes=validSearchTypes self.validSearchTypes=["File",] self.EsgfObjectException = esgfDatasetException self.params=keys self.keys = self.params.keys self.items = self.params.items self.values = self.params.values #self.id=self["id"] self.params["limit"]=limit self.params["offset"]=offset self.mapping=mapping #print "SEARCHING DS:",originalKeys self.resp=None self.cacheTime = None
def process_templated_argument(self, name, default_value="*", extras=None): """Applies arg parse values to a genutil.StringConstructor template type argument Input: name: name of the argument to process extra: other object(s) to get keys from, superseeds argparse object Output: formatted argument as a genutil.StringConstructor """ process = getattr(self, name, None) if process is None: # Ok not an argument from arg_parse maybe a template or string constructor itself if isinstance(name, basestring): process = name elif isinstance(name, genutil.StringConstructor): process = name.template else: raise RuntimeError( "Could not figure out how to process argument {}".format( name)) if not isinstance(process, basestring): raise RuntimeError( "Could not figure out how to process argument {}".format(name)) if extras is None: sources = [] elif not isinstance(extras, (list, tuple)): sources = [extras] sources.insert(0, self) # will use itself as default source process = genutil.StringConstructor(process) for key in process.keys(): for source in sources: setattr(process, key, getattr(source, key, default_value)) return process
def model_output_structure(model_version, variable): dir_template = "%(root_modeling_group_clim_directory)/%(test_case)/" file_template = "cmip5.%(model_version).historical.r1i1p1.mo.%(table_realm).%(variable).ver-1.%(period).AC.%(ext)" ### CONSTRUCT PATH D = genutil.StringConstructor(dir_template) D.root_modeling_group_clim_directory = mod_data_path D.test_case = test_case data_location = D() ### CONSTRUCT FILENAME F = genutil.StringConstructor(file_template) F.model_version = model_version F.table_realm = 'atm.Amon' if variable in ['tos', 'sos', 'zos']: F.table_realm = 'ocn.Omon' F.variable = variable F.ext = 'nc' F.period = '1980-2005' filename = F() return data_location, filename
def __init__(self, files, parent, mapping=None, datasetids=None, fileids=None): self._files = files if not isinstance(parent, esgfDataset): raise esgfFilesException("parent must be an esgfDataset instance") self.parent = parent self.EsgfObjectException = esgfFilesException if datasetids is None: datasetids = parent.datasetids if isinstance(datasetids, genutil.StringConstructor): self.datasetids = datasetids elif isinstance(datasetids, str): self.datasetids = genutil.StringConstructor(datasetids) else: self.datasetids = None if fileids is not None: if isinstance(fileids, genutil.StringConstructor): self.fileids = fileids else: self.fileids = genutil.StringConstructor(fileids) if self.datasetids is not None: self.fileids.template = self.fileids.template.replace( "%(datasetid)", self.datasetids.template) elif self.datasetids is not None: self.fileids = genutil.StringConstructor("%s.%%(filename)" % self.datasetids.template) else: self.fileids = parent.fileids if mapping is None: mapping = parent.mapping self.setMapping(mapping) self.remap()
def setMapping(self, mapping): if mapping is None: self.mapping = "" if self.datasetids is not None: self.mapping = self.datasetids else: for k in self.parent.keys(): self.mapping += "%%(%s)" % k else: self.mapping = mapping #print "Stage 1 mapping:",self.mapping if not isinstance(self.mapping, genutil.StringConstructor): if self.datasetids is not None: self.mapping = self.mapping.replace("%(datasetid)", self.datasetids.template) self.mapping = genutil.StringConstructor(self.mapping) #print "Stage 2:",self.mapping.template,self.keys() vk = self.parent.keys() for k in self.mapping.keys(): ok = False if self.datasetids is not None: vk += self.datasetids.keys() if k in self.datasetids.keys(): ok = True if self.fileids is not None: vk += self.fileids.keys() if k in self.fileids.keys(): ok = True if k in self.parent.keys(): ok = True ## Ok second to last hope... Matching to datasetids if isinstance(self.datasetids, genutil.StringConstructor) and ok is False: try: mapid = self.datasetids.reverse(self.parent.id) vk += mapid.keys() if k in mapid.keys(): ok = True except: #print "Couldn't map: %s to %s" % (self.parent.id,self.datasetids.template) pass if ok is False: vk = set(vk) raise self.EsgfObjectException( "Invalid mapping key: %s, valid keys are: %s" % (k, sorted(vk)))
def setMapping(self,mapping): if mapping is None: self.mapping="" if self.datasetids is not None: self.mapping=self.datasetids else: for k in self.parent.keys(): if not k in ["limit","offset","text"]: self.mapping+="%%(%s)" % k else: self.mapping=mapping #print "Stage 1 mapping:",self.mapping if not isinstance(self.mapping,genutil.StringConstructor): if self.datasetids is not None: self.mapping=self.mapping.replace("%(datasetid)",self.datasetids.template) self.mapping = genutil.StringConstructor(self.mapping)
def remap(self, mapping=None, verbose=False): if mapping is None: thismapping = self.mapping else: thismapping = mapping self.mapped = {} savedmapping = thismapping # print "Remap:",self.mapping.template # if verbose: print "################ REMAPPING: %s: %s # #############################" % # (thismapping.template,repr(thismapping.keys())) for f in self._files: mappoint = self.mapped tabs = "" nok = 0 nlevels = len(list(thismapping.keys())) # print "This mapping",thismapping.template,nlevels if nlevels == 0: # ok no mapping, let's try to figure this one out if 'dataset_id_template_' in list(f.keys()): # print "We are good to go" ds = f['dataset_id_template_'].replace(")s", ")") thismapping = genutil.StringConstructor(ds) for k in list(thismapping.keys()): # if verbose: print tabs,"keys:",k,"File keys:",f.keys() # if k == self.mapping.keys()[0]: # f.matched.keys() # else: # if verbose: print if k in list(f.keys()): # if verbose: print tabs,k,f[k] nok += 1 cont = f[k] if not isinstance(cont, (str, int, float)): break if cont not in list(mappoint.keys()): mappoint[cont] = {} elif k in list(self.parent.keys()): # if verbose: print tabs,k,f[k] nok += 1 cont = self[k] if cont not in list(mappoint.keys()): mappoint[cont] = {} elif isinstance(self.fileids, genutil.StringConstructor): try: mapid = self.fileids.reverse(self.parent.id) # if verbose: # print "MAPID:",k,mapid if k in list(mapid.keys()): # if verbose: print tabs,k,mapid[k] nok += 1 cont = mapid[k] if cont not in list(mappoint.keys()): mappoint[cont] = {} except BaseException: break else: break mappoint = mappoint[cont] tabs += "\t" tmp = mappoint.get("files", []) tmp.append(f) mappoint["files"] = tmp thismapping = savedmapping
def populateStringConstructor(template, args): template = genutil.StringConstructor(template) for k in list(template.keys()): if hasattr(args, k): setattr(template, k, str(getattr(args, k))) return template
def monsoon_wang_runner(args): # args = P.parse_args(sys.argv[1:]) modpath = genutil.StringConstructor(args.test_data_path) modpath.variable = args.modvar outpathdata = args.results_dir if isinstance(args.modnames, str): mods = eval(args.modnames) else: mods = args.modnames json_filename = args.outnamejson if json_filename == "CMIP_MME": json_filename = "/MPI_" + args.mip + "_" + args.experiment # VAR IS FIXED TO BE PRECIP FOR CALCULATING MONSOON PRECIPITATION INDICES var = args.modvar thr = args.threshold sig_digits = ".3f" # Get flag for CMEC output cmec = args.cmec ######################################### # PMP monthly default PR obs cdms2.axis.longitude_aliases.append("longitude_prclim_mpd") cdms2.axis.latitude_aliases.append("latitude_prclim_mpd") fobs = cdms2.open(args.reference_data_path) dobs_orig = fobs(args.obsvar) fobs.close() obsgrid = dobs_orig.getGrid() ######################################## # FCN TO COMPUTE GLOBAL ANNUAL RANGE AND MONSOON PRECIP INDEX annrange_obs, mpi_obs = mpd(dobs_orig) ######################################### # SETUP WHERE TO OUTPUT RESULTING DATA (netcdf) nout = os.path.join(outpathdata, "_".join([args.experiment, args.mip, "wang-monsoon"])) try: os.makedirs(nout) except BaseException: pass # SETUP WHERE TO OUTPUT RESULTS (json) jout = outpathdata try: os.makedirs(nout) except BaseException: pass gmods = [] # "Got" these MODS for i, mod in enumerate(mods): modpath.model = mod for k in modpath.keys(): try: val = getattr(args, k) except Exception: continue if not isinstance(val, (list, tuple)): setattr(modpath, k, val) else: setattr(modpath, k, val[i]) l1 = modpath() if os.path.isfile(l1) is True: gmods.append(mod) if len(gmods) == 0: raise RuntimeError("No model file found!") ######################################### egg_pth = resources.resource_path() globals = {} locals = {} exec( compile( open(os.path.join(egg_pth, "default_regions.py")).read(), os.path.join(egg_pth, "default_regions.py"), "exec", ), globals, locals, ) regions_specs = locals["regions_specs"] doms = ["AllMW", "AllM", "NAMM", "SAMM", "NAFM", "SAFM", "ASM", "AUSM"] mpi_stats_dic = {} for i, mod in enumerate(gmods): modpath.model = mod for k in modpath.keys(): try: val = getattr(args, k) except Exception: continue if not isinstance(val, (list, tuple)): setattr(modpath, k, val) else: setattr(modpath, k, val[i]) modelFile = modpath() mpi_stats_dic[mod] = {} print( "******************************************************************************************" ) print(modelFile) f = cdms2.open(modelFile) d_orig = f(var) annrange_mod, mpi_mod = mpd(d_orig) annrange_mod = annrange_mod.regrid(obsgrid, regridTool="regrid2", regridMethod="conserve", mkCyclic=True) mpi_mod = mpi_mod.regrid(obsgrid, regridTool="regrid2", regridMethod="conserve", mkCyclic=True) for dom in doms: mpi_stats_dic[mod][dom] = {} reg_sel = regions_specs[dom]["domain"] mpi_obs_reg = mpi_obs(reg_sel) mpi_obs_reg_sd = float(statistics.std(mpi_obs_reg, axis="xy")) mpi_mod_reg = mpi_mod(reg_sel) cor = float( statistics.correlation(mpi_mod_reg, mpi_obs_reg, axis="xy")) rms = float(statistics.rms(mpi_mod_reg, mpi_obs_reg, axis="xy")) rmsn = rms / mpi_obs_reg_sd # DOMAIN SELECTED FROM GLOBAL ANNUAL RANGE FOR MODS AND OBS annrange_mod_dom = annrange_mod(reg_sel) annrange_obs_dom = annrange_obs(reg_sel) # SKILL SCORES # HIT/(HIT + MISSED + FALSE ALARMS) hit, missed, falarm, score, hitmap, missmap, falarmmap = mpi_skill_scores( annrange_mod_dom, annrange_obs_dom, thr) # POPULATE DICTIONARY FOR JSON FILES mpi_stats_dic[mod][dom] = {} mpi_stats_dic[mod][dom]["cor"] = format(cor, sig_digits) mpi_stats_dic[mod][dom]["rmsn"] = format(rmsn, sig_digits) mpi_stats_dic[mod][dom]["threat_score"] = format(score, sig_digits) # SAVE ANNRANGE AND HIT MISS AND FALSE ALARM FOR EACH MOD DOM fm = os.path.join(nout, "_".join([mod, dom, "wang-monsoon.nc"])) g = cdms2.open(fm, "w") g.write(annrange_mod_dom) g.write(hitmap, dtype=numpy.int32) g.write(missmap, dtype=numpy.int32) g.write(falarmmap, dtype=numpy.int32) g.close() f.close() # OUTPUT METRICS TO JSON FILE OUT = pcmdi_metrics.io.base.Base(os.path.abspath(jout), json_filename) disclaimer = open(os.path.join(egg_pth, "disclaimer.txt")).read() metrics_dictionary = collections.OrderedDict() metrics_dictionary["DISCLAIMER"] = disclaimer metrics_dictionary["REFERENCE"] = ( "The statistics in this file are based on" + " Wang, B., Kim, HJ., Kikuchi, K. et al. " + "Clim Dyn (2011) 37: 941. doi:10.1007/s00382-010-0877-0") metrics_dictionary["RESULTS"] = mpi_stats_dic # collections.OrderedDict() OUT.var = var OUT.write( metrics_dictionary, json_structure=["model", "domain", "statistic"], indent=4, separators=(",", ": "), ) if cmec: print("Writing cmec file") OUT.write_cmec(indent=4, separators=(",", ": "))
def process_template(tmpl, cnames, cols, voids={}, minmax={}, iadd=-1): F = genutil.StringConstructor(tmpl) keys = F.keys() match = 0 for c in cnames: if c in keys: match += 1 indx = cnames.index(c) ## print ' matched at %i' % indx , if indx < len(cols): val = cols[indx] else: val = "" if val.strip() == 'time2': setattr(F, "climatology", "yes") if "climatology" in keys: keys.remove("climatology") if c == "long name" and iadd != -1: val += " at Surface" if val.strip() != "": ## if c=="valid max": ## print 'ok we havew a val max predefined at:',cols if c in ['units', 'unformatted units'] and val == '1.0': val = '1' setattr(F, c, val) keys.remove(c) ## print ## else: ## print ' but empty' ## else: ## print #print 'Keys:',keys if "CMOR dimension" in keys: print 'Keys:', keys print 'cnames:', cnames raise "crap" nstd = 3.0 pmean = 0.2 ve = getattr(F, "CMOR variable name", "yep not that guy") if ve in minmax.keys(): if 'valid min' in keys: #ok let's see if we can figure this one out mnmx = minmax[ve] val = 1.e20 std = 0. for mlev in mnmx.keys(): mn = mnmx[mlev]['Min'] val = min(mn['min'], val) std += mn['std'] std /= len(mnmx.keys()) if numpy.allclose(std, 0.): std = val * pmean delta = max(nstd * std, abs(val * .05)) setattr(F, "valid min", "%.4g" % (val - delta)) keys.remove("valid min") if 'valid max' in keys: #ok let's see if we can figure this one out mnmx = minmax[ve] val = -1.e20 std = 0. for mlev in mnmx.keys(): mn = mnmx[mlev]['Max'] val = max(mn['max'], val) std += mn['std'] std /= len(mnmx.keys()) if numpy.allclose(std, 0.): std = val * pmean delta = max(nstd * std, abs(val * .05)) setattr(F, "valid max", "%.4g" % (val + delta)) keys.remove("valid max") if "mean absolute min" in keys: mnmx = minmax[ve] val = 1.e20 std = 0. for mlev in mnmx.keys(): aavg = mnmx[mlev]['AAvg'] val = min(aavg['min'], val) std += aavg['std'] std /= len(mnmx.keys()) if numpy.allclose(std, 0.): std = val * pmean delta = max(nstd * std, abs(val * .05)) setattr(F, "mean absolute min", "%.4g" % (val - delta)) keys.remove("mean absolute min") if "mean absolute max" in keys: mnmx = minmax[ve] val = -1.e20 std = 0. for mlev in mnmx.keys(): aavg = mnmx[mlev]['AAvg'] val = max(aavg['max'], val) std += aavg['std'] std /= len(mnmx.keys()) if numpy.allclose(std, 0.): std = val * pmean delta = max(nstd * std, abs(val * .05)) setattr(F, "mean absolute max", "%.4g" % (val + delta)) keys.remove("mean absolute max") ### Need to add lines for absolute mean min/max for k in keys: setattr(F, k, "!CRAP WE NEED TO REMOVE THAT LINE") ## Now generates out = F() sp = out.split("\n") lines = [] for l in sp: if l.find("!CRAP WE NEED TO REMOVE THAT LINE") > -1: continue lines.append(l) out = "\n".join(lines) ## print 'We got: %i matches' % match # fixes Karl input bug out = out.replace("..", ".") #Ok now check the void thing for kw in voids.keys(): v = getattr(F, kw, "we keep").strip() vals = voids[kw] if not isinstance(vals, (list, tuple)): vals = [ vals, ] for V in vals: if V == v: print 'Skipping:', sp out = "" return out
# b2.year = y # b1.year = y # if b1.cmp(b2) > 0: # ooops # if b1.month>b2.month and b1.month-b2.month!=11: # b1.year -= 1 # else: # b2.year += 1 # if b1.month == b2.month: # b2.year = b1.year+1 if A.verbose: print(B1.tocomp(cal), "<", t, "<", B2.tocomp(cal)) bounds.append( [B1.torel(Tunits, cal).value, B2.torel(Tunits, cal).value]) fnmout = genutil.StringConstructor(A.output_filename_template) if "model_id" in fnmout.keys(): model_id = checkCMORAttribute("model_id") if "experiment_id" in fnmout.keys(): experiment_id = checkCMORAttribute("experiment_id") if "realization" in fnmout.keys(): realization = checkCMORAttribute("realization") if "initialization_method" in fnmout.keys(): initialization = checkCMORAttribute("initialization_method") if "physics_version" in fnmout.keys(): physics_version = checkCMORAttribute("physics_version") if A.cmor and hasCMOR: dump_cmor(A, s, values, bounds) else: if A.cmor and not hasCMOR:
default=None, dest=x, help="'%s' for this run (will try to get from input file" % x, ) load_parser(parser) As = parser.get_parameters() parser = PMPParser(description="Generates Climatologies from files") load_parser(parser) for A in As: for tmpl in [A.modpath, A.filename_template, A.output_filename_template]: con = genutil.StringConstructor(tmpl) print("TEMPLE:", con.template) for k in con.keys(): print("ADDING OPTION:", k) parser.add_argument("--{}".format(k)) def getCalendarName(cal): for att in dir(cdtime): if getattr(cdtime, att) == cal: return att[:-8].lower() def dump_cmor(A, data, time, bounds, season): inst = checkCMORAttribute("institution") src = checkCMORAttribute("source")
def demo(demo_file, title, colorized=True): comment(""" PMP Demo: %s This is a demonstration of the PMP It will download some observation and model data for you It will then demonstrate how to setup a parameter file to execute PMP on these It will run the PMP It will show you where to find the results and how to look at them""" % title) cont = comment( """We will now download and untar a small set of data for the demo Data will be untarred in the 'pmp_demo' directory created in the current directory""", "Continue? [Y/n]") if cont.strip().lower() not in ["", "y", "yes"]: sys.exit() ## Download data demo_pth = os.path.join(os.getcwd(), "pmp_demo") if not os.path.exists(demo_pth): os.makedirs(demo_pth) # http://oceanonly.llnl.gov/gleckler1/pmp-demo-data/pmpv1.1_demodata.tar tar_filename = "pmpv1.1_demodata.tar" tar_pth = os.path.join(demo_pth, tar_filename) good_md5 = "a6ef8f15457378ff36fd46e8fbf5f157" attempts = 0 while attempts < 3: md5 = hashlib.md5() if os.path.exists(tar_filename): f = open(tar_filename) md5.update(f.read()) if md5.hexdigest() == good_md5: attempts = 5 continue print "Downloading: ", tar_filename r = requests.get( "http://oceanonly.llnl.gov/gleckler1/pmp-demo-data/pmpv1.1_demodata.tar", stream=True) with open(tar_pth, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter local_filename keep-alive new chunks f.write(chunk) md5.update(chunk) f.close() if md5.hexdigest() == good_md5: attempts = 5 else: attempts += 1 comment("Successfuly downloaded demo tarball\nNow untarring it", None) tar_process = subprocess.Popen(shlex.split("tar xvf %s" % tar_pth), cwd=demo_pth) tar_process.wait() comment( "Success! Files are now untarred in %s\nLet's run this demo!\n" % demo_pth, None) comment( """The PMP package runs off a 'parameter' file which needs to be edited by the user Please kindly take a look at our sample parameter file in: %s""" % demo_file) describe(demo_file) cmd = "pcmdi_metrics_driver.py -p %s" % demo_file comment( "We will now run the pmp using this parameter file\nTo do so we are using the follwoing command\n%s" % cmd) pmp = subprocess.Popen(shlex.split(cmd)) sys.path.insert(0, os.path.dirname(demo_file)) exec("import %s as pmp_param" % os.path.basename(demo_file)[:-3]) pmp.wait() loc = genutil.StringConstructor(os.path.join( pmp_param.metrics_output_path)) for att in ["case_id", "model_version", "period", "realization", "period"]: if hasattr(pmp_param, att): setattr(loc, att, getattr(pmp_param, att)) comment("You can now look at the results in: %s%s%s" % (bgcolor.HEADER + bgcolor.BOLD, loc(), bgcolor.ENDC))
def runClim(A): print("OK SO START IS:", A.start) # season dictionary season_function = { "djf": cdutil.times.DJF, "mam": cdutil.times.MAM, "jja": cdutil.times.JJA, "son": cdutil.times.SON, "ann": cdutil.times.ANNUALCYCLE, "year": cdutil.times.YEAR, } print("BEFORE RPOCESEED:", A.results_dir) # print("A VAR:", A.variable) # print("A REF:", A.reference) results_dir = A.process_templated_argument("results_dir") print("RESDIR:", results_dir.template) A.results_dir = results_dir() print("HERE?", os.path.join(A.modpath, A.filename_template)) print("A.variable", A.variable, A.model) filename_in = A.process_templated_argument( os.path.join(A.modpath, A.filename_template)) if A.verbose: print("filename in after templating:", filename_in()) filename = glob.glob(filename_in())[0] if not os.path.exists(filename): raise RuntimeError("file '{}' doe not exits".format(filename)) filein = cdms2.open(filename) fvars = list(filein.variables.keys()) v = A.variable if v not in fvars: raise RuntimeError("Variable '%s' is not contained in input file(s)" % v) V = filein[v] tim = V.getTime().clone() # "monthly" if A.bounds: cdutil.times.setTimeBoundsMonthly(tim) # Now make sure we can get the requested period if A.start is None: i0 = 0 else: # Ok user specified a start time if A.index == "index": # index-based slicing if int(A.start) >= len(tim): raise RuntimeError( "For variable %s you requested start time to be at index: %i but the file only has %i time steps" % (v, int(A.start), len(tim))) i0 = int(A.start) elif A.index == "value": # actual value used for slicing v0 = float(A.start) try: i0, tmp = tim.mapInterval((v0, v0), "cob") except Exception: raise RuntimeError( "Could not find value %s for start time for variable %s" % (A.start, v)) elif A.index == "date": v0 = A.start # When too close from bounds it messes it up, adding a minute seems to help v0 = cdtime.s2c(A.start) v0 = v0.add(1, cdtime.Minute) try: i0, tmp = tim.mapInterval((v0, v0), "cob") except Exception: raise RuntimeError( "Could not find start time %s for variable: %s" % (A.start, v)) if A.end is None: i1 = None else: # Ok user specified a end time if A.index == "index": # index-based slicing if int(A.end) >= len(tim): raise RuntimeError( "For variable %s you requested end time to be at index: %i but the file only has %i time steps" % (v, int(A.end), len(tim))) i1 = int(A.end) elif A.index == "value": # actual value used for slicing v0 = float(A.end) try: tmp, i1 = tim.mapInterval((v0, v0), "cob") except Exception: raise RuntimeError( "Could not find value %s for end time for variable %s" % (A.end, v)) elif A.index == "date": v0 = A.end # When too close from bounds it messes it up, adding a minute seems to help v0 = cdtime.s2c(A.end) v0 = v0.add(1, cdtime.Minute) try: tmp, i1 = tim.mapInterval((v0, v0), "cob") except Exception: raise RuntimeError( "Could not find end time %s for variable: %s" % (A.end, v)) # Read in data data = V(time=slice(i0, i1)) if A.verbose: print( "DATA:", data.shape, data.getTime().asComponentTime()[0], data.getTime().asComponentTime()[-1], ) if A.bounds: cdutil.times.setTimeBoundsMonthly(data) # Now we can actually read and compute the climo seasons = [s.lower() for s in A.seasons] if "all" in seasons: seasons = ["djf", "mam", "jja", "son", "year", "ann"] for season in seasons: s = season_function[season].climatology( data, criteriaarg=[A.threshold, None]) g = season_function[season].get(data, criteriaarg=[A.threshold, None]) # Ok we know we have monthly data # We want to tweak bounds T = data.getTime() Tg = g.getTime() istart = 0 while numpy.ma.allequal(g[istart].mask, True): istart += 1 iend = -1 while numpy.ma.allequal(g[iend].mask, True): iend -= 1 if iend == -1: iend = None else: iend += 1 if iend is None: iend = len(Tg) Tg = Tg.subAxis(istart, iend) cal = T.getCalendar() cal_name = getCalendarName(cal) Tunits = T.units bnds = T.getBounds() tc = T.asComponentTime() if A.verbose: print("TG:", Tg.asComponentTime()[0]) print("START END THRESHOLD:", istart, iend, A.threshold, len(Tg)) # print "SEASON:", season, "ORIGINAL:", T.asComponentTime() b1 = cdtime.reltime(Tg.getBounds()[0][0], Tg.units) b2 = cdtime.reltime(Tg.getBounds()[-1][1], Tg.units) # First and last time points y1 = cdtime.reltime(Tg[0], T.units) y2 = cdtime.reltime(Tg[-1], T.units) # Mid year is: yr = (y2.value + y1.value) / 2.0 y = cdtime.reltime(yr, T.units).tocomp(cal).year if A.verbose: print( "We found data from ", y1.tocomp(cal), "to", y2.tocomp(cal), "MID YEAR:", y, ) print("bounds:", b1.tocomp(cal), b2.tocomp(cal)) values = [] bounds = [] # Loop thru clim month and set value and bounds appropriately ts = s.getTime().asComponentTime() for ii in range(s.shape[0]): t = ts[ii] t.year = y values.append(t.torel(Tunits, cal).value) if s.shape[0] > 1: B1 = b1.tocomp(cal).add(ii, cdtime.Month) B2 = b2.tocomp(cal).add(ii - s.shape[0] + 1, cdtime.Month) else: B1 = b1 B2 = b2 # b2.year = y # b1.year = y # if b1.cmp(b2) > 0: # ooops # if b1.month>b2.month and b1.month-b2.month!=11: # b1.year -= 1 # else: # b2.year += 1 # if b1.month == b2.month: # b2.year = b1.year+1 if A.verbose: print(B1.tocomp(cal), "<", t, "<", B2.tocomp(cal)) bounds.append( [B1.torel(Tunits, cal).value, B2.torel(Tunits, cal).value]) fnmout = genutil.StringConstructor(A.output_filename_template) if "model_id" in fnmout.keys(): model_id = checkCMORAttribute("model_id") if "experiment_id" in fnmout.keys(): experiment_id = checkCMORAttribute("experiment_id") if "realization" in fnmout.keys(): realization = checkCMORAttribute("realization") if "initialization_method" in fnmout.keys(): initialization = checkCMORAttribute("initialization_method") if "physics_version" in fnmout.keys(): physics_version = checkCMORAttribute("physics_version") if A.cmor and hasCMOR: dump_cmor(A, s, values, bounds, season) else: if A.cmor and not hasCMOR: print( "Your Python does not have CMOR, using regular cdms to write out files" ) if not os.path.exists(A.results_dir): os.makedirs(A.results_dir) end_tc = tc[-1].add(1, cdtime.Month) # Populate fout template with values start = "{}{:02d}".format(tc[0].year, tc[0].month) end = "{}{:02d}".format(end_tc.year, end_tc.month) for k in fnmout.keys(): try: setattr(fnmout, k, getattr(A, k)) except Exception: pass # overwrite with locals try: setattr(fnmout, k, locals()[k]) except Exception: pass nm = os.path.join(A.results_dir, fnmout()) f = cdms2.open(nm, "w") # Global attributes copied for att, value in store_globals(filein).items(): setattr(f, att, value) t = cdms2.createAxis(values) t.setBounds(numpy.array(bounds)) t.designateTime() t.id = "time" s.setAxis(0, t) # copy orignal attributes for att, value in store_attributes(V).items(): try: setattr(s, att, value) except Exception: pass f.write(s, dtype=data.dtype) f.close() if A.verbose: print("Results out to:", nm)
def make_climatologies(settings, model_dir, wk_dir): filename_template = settings["filename_template"] modellist = settings["test_data_set"] varlist = settings["vars"] realization = settings.get("realization", "") period = settings.get("period", "") tmp = os.path.join(model_dir, filename_template) model_file = genutil.StringConstructor(tmp) model_file.period = period model_file.realization = realization out_base = os.path.join(wk_dir, "AC") os.mkdir(out_base) for model in modellist: for var in varlist: model_file.model_version = model model_file.variable = var cmd = [ "pcmdi_compute_climatologies.py", "--infile", model_file(), "--outpath", out_base, "--var", var, ] suffix = "pcmdi_compute_climatologies_{0}_{1}.log".format( model, var) outfilename = os.path.join(out_base, suffix) with open(outfilename, "w") as outfile: subprocess.run(cmd, env=os.environ.copy(), stdout=outfile, check=True) # Get the date strings from the climo files for the filename template settings["test_data_path"] = out_base filelist = os.listdir(out_base) ext = os.path.basename(filename_template)[-3:] trim = 30 # default for '.nc' if ext == "xml": trim = -29 try: for file in filelist: if ".AC." in file: suffix = file[trim:] break settings["filename_template"] = ( os.path.basename(filename_template)[:-3] + suffix) print("Success in generating climatologies\n") except TypeError: print("Error: Could not find climatologies.") sys.exit(1) # Link sftlf file in AC folder if exists, # since it is the new model folder. if settings.get("generate_sftlf", False) is False: sftlf = settings.get("sftlf_filename_template", "sftlf_%(model_version).nc") for model in modellist: s = sftlf.replace("%(model_version)", model) sftlf_src = os.path.join(model_dir, s) sftlf_dst = os.path.join(out_base, s) if os.path.exists(sftlf_src): # Make any subdirectories from sftlf template os.makedirs(os.path.dirname(sftlf_dst), exist_ok=True) os.symlink(sftlf_src, sftlf_dst) return settings