def buildOcnAvgList(start_year, stop_year, tavgdir, main_comm, debugMsg): """buildOcnAvgList - build the list of averages to be computed by the pyAverager. Checks if the file exists or not already. Arguments: start_year (string) - starting year stop_year (string) - ending year tavgdir (string) - averages directory Return: avgList (list) - list of averages to be passed to the pyaverager """ avgList = [] # check if mavg file already exists avgFile = '{0}/mavg.{1}-{2}.nc'.format(tavgdir, start_year, stop_year) if main_comm.is_manager(): debugMsg('mavgFile = {0}'.format(avgFile), header=True, verbosity=2) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('mavg:{0}:{1}'.format(start_year, stop_year)) # check if tavg file already exists avgFile = '{0}/tavg.{1}-{2}.nc'.format(tavgdir, start_year, stop_year) if main_comm.is_manager(): debugMsg('tavgFile = {0}'.format(avgFile), header=True, verbosity=2) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('tavg:{0}:{1}'.format(start_year, stop_year)) if main_comm.is_manager(): debugMsg('exit buildOcnAvgList avgList = {0}'.format(avgList), header=True, verbosity=2) return avgList
def create_za(workdir, tavgfile, gridfile, toolpath, env): """generate the global zonal average file used for most of the plots """ # generate the global zonal average file used for most of the plots zaFile = '{0}/za_{1}'.format(workdir, tavgfile) rc, err_msg = cesmEnvLib.checkFile(zaFile, 'read') if not rc: # check that the za executable exists zaCommand = '{0}/za'.format(toolpath) rc, err_msg = cesmEnvLib.checkFile(zaCommand, 'exec') if not rc: print( 'ERROR: create_za failed to verify executable za command = {0}' .format(zaCommand)) print(' {0}'.format(err_msg)) # call the za fortran code from within the workdir cwd = os.getcwd() os.chdir(workdir) testCmd = '{0} -O -time_const -grid_file {1} {2}'.format( zaCommand, gridfile, tavgfile) print('Ocean zonal average command = {0}'.format(testCmd)) try: subprocess.check_call([ '{0}'.format(zaCommand), '-O', '-time_const', '-grid_file', '{0}'.format(gridfile), '{0}'.format(tavgfile) ]) except subprocess.CalledProcessError as e: print('ERROR: create_za subprocess call to {0} failed with error:'. format(e.cmd)) print(' {0} - {1}'.format(e.returncode, e.output)) print('zonal average created') os.chdir(cwd)
def create_za(workdir, tavgfile, gridfile, toolpath, env): """generate the global zonal average file used for most of the plots """ # generate the global zonal average file used for most of the plots zaFile = '{0}/za_{1}'.format(workdir, tavgfile) rc, err_msg = cesmEnvLib.checkFile(zaFile, 'read') if not rc: # check that the za executable exists zaCommand = '{0}/za'.format(toolpath) rc, err_msg = cesmEnvLib.checkFile(zaCommand, 'exec') if not rc: print('ERROR: create_za failed to verify executable za command = {0}'.format(zaCommand)) print(' {0}'.format(err_msg)) # call the za fortran code from within the workdir cwd = os.getcwd() os.chdir(workdir) testCmd = '{0} -O -time_const -grid_file {1} {2}'.format(zaCommand,gridfile,tavgfile) print('Ocean zonal average command = {0}'.format(testCmd)) try: subprocess.check_call(['{0}'.format(zaCommand), '-O', '-time_const', '-grid_file', '{0}'.format(gridfile), '{0}'.format(tavgfile)]) except subprocess.CalledProcessError as e: print('ERROR: create_za subprocess call to {0} failed with error:'.format(e.cmd)) print(' {0} - {1}'.format(e.returncode, e.output)) print('zonal average created') os.chdir(cwd)
def buildLndAvgList(climo, avg_start_year, avg_stop_year, trends, trends_start_year, trends_stop_year, avgFileBaseName, out_dir, envDict, debugMsg): """buildLndAvgList - build the list of averages to be computed by the pyAverager. Checks if the file exists or not already. Arguments: avg_start_year (string) - starting year avg_stop_year (string) - ending year avgFileBaseName (string) - avgFileBaseName (out_dir/case.[stream].) Return: avgList (list) - list of averages to be passed to the pyaverager """ avgList = [] if (climo == 'True'): # Seasonal Files for seas in envDict['seas']: avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format( avgFileBaseName, avg_start_year, avg_stop_year, seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: if seas == 'ann': avgList.append('ann_sig:{0}:{1}'.format( avg_start_year, avg_stop_year)) else: avgList.append('dep_{0}:{1}:{2}'.format( seas.lower(), avg_start_year, avg_stop_year)) # seasonal means avgFile = '{0}.{1}-{2}.{3}_means.nc'.format( avgFileBaseName, avg_start_year, avg_stop_year, seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('{0}_mean:{1}:{2}'.format( seas.lower(), avg_start_year, avg_stop_year)) # Mons File avgFile = '{0}.{1}-{2}.MONS_climo.nc'.format(avgFileBaseName, avg_start_year, avg_stop_year) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('mons:{0}:{1}'.format(avg_start_year, avg_stop_year)) # Trends if (trends == 'True'): avgFile = '{0}.{1}-{2}.ANN_ALL.nc'.format(avgFileBaseName, trends_start_year, trends_stop_year) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('annall:{0}:{1}'.format(trends_start_year, trends_stop_year)) return avgList
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ plot_table = [] num_cols = 3 for i in range(len(self._labels)): plot_tuple_list = [] plot_tuple = (0, 'label', '{0}:'.format(self._labels[i])) plot_tuple_list.append(plot_tuple) plot_list = self._expectedPlots # create the image link img_file = '{0}.{1}'.format(self._expectedPlots[i], imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read') if not rc: plot_tuple = (i + 1, 'timeseries', '{0} - Error'.format(img_file)) else: plot_tuple = (i + 1, 'timeseries', img_file) plot_tuple_list.append(plot_tuple) # create the ascii file link asc_file = '{0}.{1}'.format(self._expectedPlots[i], 'txt') rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, asc_file), 'read') if not rc: plot_tuple = (i + 1, 'table', '{0} - Error'.format(asc_file)) else: plot_tuple = (i + 1, 'table', asc_file) plot_tuple_list.append(plot_tuple) print('DEBUG... plot_tuple_list[{0}] = {1}'.format( i, plot_tuple_list)) plot_table.append(plot_tuple_list) # create a jinja2 template object templateLoader = jinja2.FileSystemLoader(searchpath=templatePath) templateEnv = jinja2.Environment(loader=templateLoader, keep_trailing_newline=False) template = templateEnv.get_template(self._template_file) # add the template variables templateVars = { 'title': self._name, 'plot_table': plot_table, 'num_rows': len(self._labels), } # render the html template using the plot tables self._html = template.render(templateVars) return self._html
def check_prerequisites(self, env, scomm): """ check prerequisites """ print(" Checking prerequisites for : {0}".format( self.__class__.__name__)) super(modelVsObs, self).check_prerequisites(env, scomm) # Set some new env variables # note - some of these env vars are for model vs. model but are expected by the # NCL routines web_hem_clim.ncl and web_reg_clim.ncl that is called by both # diag classes env['DIAG_CODE'] = env['NCLPATH'] env['DIAG_HOME'] = env['NCLPATH'] print('DEBUG: model_vs_obs env[DIAG_HOME] = {0}'.format( env['DIAG_HOME'])) env['DIAG_ROOT'] = '{0}/{1}-{2}/'.format(env['DIAG_ROOT'], env['CASE_TO_CONT'], 'obs') env['WKDIR'] = env['DIAG_ROOT'] env['WORKDIR'] = env['WKDIR'] if scomm.is_manager(): if not os.path.exists(env['WKDIR']): os.makedirs(env['WKDIR']) env['PATH_PLOT'] = env['CLIMO_CONT'] ## env['seas'] = ['jfm','fm','amj','jas','ond','on','ann'] env['YR_AVG_FRST'] = str((int(env['ENDYR_CONT']) - int(env['YRS_TO_AVG'])) + 1) env['YR_AVG_LAST'] = env['ENDYR_CONT'] env['VAR_NAMES'] = env['VAR_NAME_TYPE_CONT'] env['YR1'] = env['BEGYR_CONT'] env['YR2'] = env['ENDYR_CONT'] env['YR1_DIFF'] = env['BEGYR_DIFF'] env['YR2_DIFF'] = env['ENDYR_DIFF'] env['PRE_PROC_ROOT_CONT'] = env['PATH_CLIMO_CONT'] env['PRE_PROC_ROOT_DIFF'] = env['PATH_CLIMO_DIFF'] # Link obs files into the climo directory if (scomm.is_manager()): # SSMI new_ssmi_fn = env['PATH_CLIMO_CONT'] + '/' + os.path.basename( env['SSMI_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['SSMI_PATH'], new_ssmi_fn) # ASPeCt new_ssmi_fn = env['PATH_CLIMO_CONT'] + '/' + os.path.basename( env['ASPeCt_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['ASPeCt_PATH'], new_ssmi_fn) scomm.sync() return env
def buildAtmAvgList(start_year, stop_year, avgFileBaseName, out_dir, envDict, debugMsg): """buildAtmAvgList - build the list of averages to be computed by the pyAverager. Checks if the file exists or not already. Arguments: start_year (string) - starting year stop_year (string) - ending year avgFileBaseName (string) - avgFileBaseName (out_dir/case.[stream].) Return: avgList (list) - list of averages to be passed to the pyaverager """ avgList = [] # Seasonal Files if envDict['significance'] == 'True': for seas in envDict['seas']: avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, start_year, stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('{0}_sig:{1}:{2}'.format(seas.lower(), start_year, stop_year)) meanAvgFile = '{0}.{1}-{2}.{3}_mean.nc'.format(avgFileBaseName, start_year, stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(meanAvgFile, 'read') if not rc: avgList.append('{0}_mean:{1}:{2}'.format(seas.lower(), start_year, stop_year)) else: for seas in envDict['seas']: avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, start_year, stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('dep_{0}:{1}:{2}'.format(seas.lower(), start_year, stop_year)) # Monthly Files m_names = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'] for m in range(1,13): month = str(m).zfill(2) avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, start_year, stop_year,month) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('{0}:{1}:{2}'.format(m_names[m-1], start_year, stop_year)) # add WACCM zonal averages if envDict['test_compute_zonalAvg'] == 'True' or envDict['cntl_compute_zonalAvg'] == 'True': avgList.append('zonalavg:{0}:{1}'.format(start_year, stop_year)) if main_comm.is_manager(): debugMsg('exit buildAtmAvgList avgList = {0}'.format(avgList)) return avgList
def buildAtmAvgList(start_year, stop_year, avgFileBaseName, out_dir, envDict, debugMsg): """buildAtmAvgList - build the list of averages to be computed by the pyAverager. Checks if the file exists or not already. Arguments: start_year (string) - starting year stop_year (string) - ending year avgFileBaseName (string) - avgFileBaseName (out_dir/case.[stream].) Return: avgList (list) - list of averages to be passed to the pyaverager """ avgList = [] # Seasonal Files if envDict['significance'] == 'True': for seas in envDict['seas']: avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, start_year, stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('{0}_sig:{1}:{2}'.format(seas.lower(), start_year, stop_year)) meanAvgFile = '{0}.{1}-{2}.{3}_mean.nc'.format(avgFileBaseName, start_year, stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(meanAvgFile, 'read') if not rc: avgList.append('{0}_mean:{1}:{2}'.format(seas.lower(), start_year, stop_year)) else: for seas in envDict['seas']: avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, start_year, stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('dep_{0}:{1}:{2}'.format(seas.lower(), start_year, stop_year)) # Monthly Files m_names = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'] for m in range(1,13): month = str(m).zfill(2) avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, start_year, stop_year,month) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('{0}:{1}:{2}'.format(m_names[m-1], start_year, stop_year)) # add WACCM zonal averages if envDict['test_compute_zonalAvg'] == 'True' or envDict['cntl_compute_zonalAvg'] == 'True': avgList.append('zonalavg:{0}:{1}'.format(start_year, stop_year)) if main_comm.is_manager(): debugMsg('exit buildAtmAvgList avgList = {0}'.format(avgList)) return avgList
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ plot_table = [] num_cols = 3 for i in range(len(self._labels)): plot_tuple_list = [] plot_tuple = (0, 'label','{0}:'.format(self._labels[i])) plot_tuple_list.append(plot_tuple) plot_list = self._expectedPlots # create the image link img_file = '{0}.{1}'.format(self._expectedPlots[i], imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: plot_tuple = (i+1, 'timeseries', '{0} - Error'.format(img_file)) else: plot_tuple = (i+1, 'timeseries', img_file) plot_tuple_list.append(plot_tuple) # create the ascii file link asc_file = '{0}.{1}'.format(self._expectedPlots[i], 'txt') rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, asc_file), 'read' ) if not rc: plot_tuple = (i+1, 'table', '{0} - Error'.format(asc_file)) else: plot_tuple = (i+1, 'table', asc_file) plot_tuple_list.append(plot_tuple) print('DEBUG... plot_tuple_list[{0}] = {1}'.format(i, plot_tuple_list)) plot_table.append(plot_tuple_list) # create a jinja2 template object templateLoader = jinja2.FileSystemLoader( searchpath=templatePath ) templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False ) template = templateEnv.get_template( self._template_file ) # add the template variables templateVars = { 'title' : self._name, 'plot_table' : plot_table, 'num_rows' : len(self._labels), } # render the html template using the plot tables self._html = template.render( templateVars ) return self._html
def check_prerequisites(self, env): """list and check specific prequisites for this plot. """ super(RegionalArea, self).check_prerequisites(env) print(' Checking prerequisites for : {0}'.format(self.__class__.__name__)) # check that temperature observation TOBSFILE exists and is readable rc, err_msg = cesmEnvLib.checkFile('{0}/{1}'.format(env['TSOBSDIR'], env['TOBSFILE']), 'read') if not rc: raise OSError(err_msg) # check that salinity observation SOBSFILE exists and is readable rc, err_msg = cesmEnvLib.checkFile('{0}/{1}'.format(env['TSOBSDIR'], env['SOBSFILE']), 'read') if not rc: raise OSError(err_msg)
def check_prerequisites(self, env, scomm): """ check prerequisites """ print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) super(modelVsObs, self).check_prerequisites(env, scomm) # Set some new env variables # note - some of these env vars are for model vs. model but are expected by the # NCL routines web_hem_clim.ncl and web_reg_clim.ncl that is called by both # diag classes env['DIAG_CODE'] = env['NCLPATH'] env['DIAG_HOME'] = env['NCLPATH'] print('DEBUG: model_vs_obs env[DIAG_HOME] = {0}'.format(env['DIAG_HOME'])) env['DIAG_ROOT'] = '{0}/{1}-{2}/'.format(env['DIAG_ROOT'], env['CASE_TO_CONT'], 'obs') env['WKDIR'] = env['DIAG_ROOT'] env['WORKDIR'] = env['WKDIR'] if scomm.is_manager(): if not os.path.exists(env['WKDIR']): os.makedirs(env['WKDIR']) env['PATH_PLOT'] = env['CLIMO_CONT'] ## env['seas'] = ['jfm','fm','amj','jas','ond','on','ann'] env['YR_AVG_FRST'] = str((int(env['ENDYR_CONT']) - int(env['YRS_TO_AVG'])) + 1) env['YR_AVG_LAST'] = env['ENDYR_CONT'] env['VAR_NAMES'] = env['VAR_NAME_TYPE_CONT'] env['YR1'] = env['BEGYR_CONT'] env['YR2'] = env['ENDYR_CONT'] env['YR1_DIFF'] = env['BEGYR_DIFF'] env['YR2_DIFF'] = env['ENDYR_DIFF'] env['PRE_PROC_ROOT_CONT'] = env['PATH_CLIMO_CONT'] env['PRE_PROC_ROOT_DIFF'] = env['PATH_CLIMO_DIFF'] # Link obs files into the climo directory if (scomm.is_manager()): # SSMI new_ssmi_fn = env['PATH_CLIMO_CONT'] + '/' + os.path.basename(env['SSMI_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['SSMI_PATH'],new_ssmi_fn) # ASPeCt new_ssmi_fn = env['PATH_CLIMO_CONT'] + '/' + os.path.basename(env['ASPeCt_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['ASPeCt_PATH'],new_ssmi_fn) scomm.sync() return env
def generate_plots(self, env): """Put commands to generate plot here! """ print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__)) # chdir into the working directory os.chdir(env['WORKDIR']) for ncl in self._ncl: # prepend the TS_CPL log value to the ncl plot name nclPlotFile = 'cpl{0}_{1}'.format(env['TS_CPL'], ncl) # copy the NCL command to the workdir shutil.copy2('{0}/{1}'.format(env['NCLPATH'],nclPlotFile), '{0}/{1}'.format(env['WORKDIR'], nclPlotFile)) nclFile = '{0}/{1}'.format(env['WORKDIR'],nclPlotFile) rc, err_msg = cesmEnvLib.checkFile(nclFile, 'read') if rc: try: print(' calling NCL plot routine {0}'.format(nclPlotFile)) subprocess.check_call(['ncl', '{0}'.format(nclFile)], env=env) except subprocess.CalledProcessError as e: print('WARNING: {0} call to {1} failed with error:'.format(self.name(), nclFile)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print('{0}... continuing with additional plots.'.format(err_msg))
def generate_ncl_plots(env, nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call Arguments: env (dictionary) - diagnostics system environment nclPlotFile (string) - ncl plotting file name """ # check if the nclPlotFile exists - # don't exit if it does not exists just print a warning. nclFile = '{0}/{1}'.format(env['NCLPATH'], nclPlotFile) print('Calling NCL routine {0} from {1}'.format(nclFile, env['WORKDIR'])) rc, err_msg = cesmEnvLib.checkFile(nclFile, 'read') if rc: try: pipe = subprocess.Popen(['ncl {0}'.format(nclFile)], cwd=env['WORKDIR'], env=env, shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0] print('NCL routine {0} \n {1}'.format(nclFile, output)) while pipe.poll() is None: time.sleep(0.5) except OSError as e: print('WARNING', e.errno, e.strerror) else: print('{0}... continuing with additional NCL calls.'.format(err_msg)) return 0
def generate_plots(self, env): """Put commands to generate plot here! """ print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__)) # chdir into the working directory os.chdir(env['WORKDIR']) # update POP transport plots if high-res if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3') : self._expectedPlots_transportDiags = [('Drake_Passage','diagts_transport.drake'), ('Mozambique_Channel','diagts_transport.mozam'), ('Bering_Strait','diagts_transport.bering'),('Indonesian_Throughflow','diagts_transport.itf'),('Windward_Passage','diagts_transport.windward'),('Florida_Strait','diagts_transport.florida')] self._expectedInFiles = ['diagts_3d.asc', 'diagts_cfc.asc', 'diagts_ecosys.asc', 'diagts_fwflux.asc', 'diagts_hflux.asc', 'diagts_info.asc', 'diagts_precfactor.asc','diagts_nino.asc', 'transports.bering.asc', 'transports.drake.asc', 'transports.florida.asc','transports.itf.asc', 'transports.mozambique.asc', 'transports.windward.asc'] for nclPlotFile in self._ncl: # copy the NCL command to the workdir shutil.copy2('{0}/{1}'.format(env['NCLPATH'],nclPlotFile), '{0}/{1}'.format(env['WORKDIR'], nclPlotFile)) nclFile = '{0}/{1}'.format(env['WORKDIR'],nclPlotFile) rc, err_msg = cesmEnvLib.checkFile(nclFile, 'read') if rc: try: print(' calling NCL plot routine {0}'.format(nclPlotFile)) subprocess.check_call(['ncl', '{0}'.format(nclFile)], env=env) except subprocess.CalledProcessError as e: print('WARNING: {0} call to {1} failed with error:'.format(self.name(), nclFile)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print('{0}... continuing with additional plots.'.format(err_msg))
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ num_cols = 2 plot_table = dict() for plot_file, label in self._webPlotsDict.iteritems(): img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read') if not rc: plot_table[label] = '{0} - Error'.format(plot_file) else: plot_table[label] = plot_file # create a jinja2 template object templateLoader = jinja2.FileSystemLoader(searchpath=templatePath) templateEnv = jinja2.Environment(loader=templateLoader, keep_trailing_newline=False) template = templateEnv.get_template(self._template_file) # add the template variables templateVars = { 'title': self._name, 'cols': num_cols, 'plot_table': plot_table, 'imgFormat': imgFormat } # render the html template using the plot tables self._html = template.render(templateVars) return self._shortname, self._html
def generate_plots(self, env): """Put commands to generate plot here! """ print(' Generating diagnostic plots for : {0}'.format( self.__class__.__name__)) # chdir into the working directory os.chdir(env['WORKDIR']) for nclPlotFile in self._ncl: # copy the NCL command to the workdir shutil.copy2('{0}/{1}'.format(env['NCLPATH'], nclPlotFile), '{0}/{1}'.format(env['WORKDIR'], nclPlotFile)) nclFile = '{0}/{1}'.format(env['WORKDIR'], nclPlotFile) rc, err_msg = cesmEnvLib.checkFile(nclFile, 'read') if rc: try: print(' calling NCL plot routine {0}'.format( nclPlotFile)) subprocess.check_call(['ncl', '{0}'.format(nclFile)], env=env) except subprocess.CalledProcessError as e: print('WARNING: {0} call to {1} failed with error:'.format( self.name(), nclfile)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print( '{0}... continuing with additional plots.'.format(err_msg))
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ num_cols = 1 plot_table = dict() for label, plot_file in self._webPlotsDict.iteritems(): img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: plot_table[label] = '{0} - Error'.format(plot_file) else: plot_table[label] = plot_file # create a jinja2 template object templateLoader = jinja2.FileSystemLoader( searchpath=templatePath ) templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False ) template = templateEnv.get_template( self._template_file ) # add the template variables templateVars = { 'title' : self._name, 'cols' : num_cols, 'plot_table' : plot_table, 'imgFormat' : imgFormat } # render the html template using the plot tables self._html = template.render( templateVars ) return self._html
def buildIceAvgList(avg_start_year, avg_stop_year, avgFileBaseName, out_dir, envDict, debugMsg): """buildIceAvgList - build the list of averages to be computed by the pyAverager. Checks if the file exists or not already. Arguments: avg_start_year (string) - starting year avg_stop_year (string) - ending year avgFileBaseName (string) - avgFileBaseName (out_dir/case.[stream].) Return: avgList (list) - list of averages to be passed to the pyaverager """ avgList = [] # Seasonal Files for seas in envDict['seas']: avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, avg_start_year, avg_stop_year, seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('dep_{0}:{1}:{2}'.format(seas.lower(), avg_start_year, avg_stop_year)) debugMsg('exit buildIceAvgList avgList = {0}'.format(avgList)) return avgList
def _convert_plots(self, workdir, imgFormat, files): """ This method converts the postscript plots to imgFormat """ splitPath = list() psFiles = list() psFiles = sorted(files) # check if the convert command exists rc = cesmEnvLib.which('convert') if rc is not None and imgFormat.lower() in ['png', 'gif']: for psFile in psFiles: sourceFile = '{0}/{1}.ps'.format(workdir, psFile) #print('...... convert source file {0}'.format(sourceFile)) # check if the sourceFile exists rc, err_msg = cesmEnvLib.checkFile(sourceFile, 'read') # check if the image file already exists and remove it to regen imgFile = '{0}/{1}.{2}'.format(workdir, psFile, imgFormat) rc1, err_msg1 = cesmEnvLib.checkFile(imgFile, 'write') if rc and rc1: #print('...... removing {0} before recreating'.format(imgFile)) os.remove(imgFile) # convert the image from ps to imgFormat try: #print('before subprocess') pipe = subprocess.check_call([ 'convert', '-trim', '-bordercolor', 'white', '-border', '5x5', '-density', '95', '{0}'.format(sourceFile), '{0}'.format(imgFile) ]) #print('...... created {0} size = {1}'.format(imgFile, os.path.getsize(imgFile))) except subprocess.CalledProcessError as e: print('...... failed to create {0}'.format(imgFile)) print( 'WARNING: convert_plots call to convert failed with error:' ) print(' {0}'.format(e.output)) else: continue else: print( 'WARNING: convert_plots unable to find convert command in path.' ) print(' Unable to convert ps formatted plots to {0}'.format( imgFormat))
def buildOcnTseriesAvgList(start_year, stop_year, avgFileBaseName, moc, main_comm, debugMsg): """buildOcnTseriesAvgList - build the list of averages to be computed by the pyAverager for timeseries. Checks if the file exists or not already. Arguments: start_year (string) - tseries starting year stop_year (string) - tseries ending year avgFileBaseName (string) - avgFileBaseName (tavgdir/case.[stream].) Return: avgList (list) - list of averages to be passed to the pyaverager """ avgList = [] avgListMoc = [] # append the horizontal mean concatenation avgList.append('hor.meanConcat:{0}:{1}'.format(start_year, stop_year)) # the following averages are necessary for model timeseries diagnostics # append the MOC and monthly MOC files if (moc): avgFile = '{0}.{1}-{2}.moc.nc'.format(avgFileBaseName, start_year, stop_year) if main_comm.is_manager(): debugMsg('mocFile = {0}'.format(avgFile), header=True, verbosity=2) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgListMoc.append('moc:{0}:{1}'.format(start_year, stop_year)) avgFile = '{0}.{1}-{2}.mocm.nc'.format(avgFileBaseName, start_year, stop_year) if main_comm.is_manager(): debugMsg('mocmFile = {0}'.format(avgFile), header=True, verbosity=2) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgListMoc.append('mocm:{0}:{1}'.format(start_year, stop_year)) if main_comm.is_manager(): debugMsg('exit buildOcnAvgTseriesList avgList = {0}'.format(avgList), header=True, verbosity=2) return avgList, avgListMoc
def buildLndAvgList(climo, avg_start_year, avg_stop_year, trends, trends_start_year, trends_stop_year, avgFileBaseName, out_dir, envDict, debugMsg): """buildLndAvgList - build the list of averages to be computed by the pyAverager. Checks if the file exists or not already. Arguments: avg_start_year (string) - starting year avg_stop_year (string) - ending year avgFileBaseName (string) - avgFileBaseName (out_dir/case.[stream].) Return: avgList (list) - list of averages to be passed to the pyaverager """ avgList = [] if (climo == 'True'): # Seasonal Files for seas in envDict['seas']: avgFile = '{0}.{1}-{2}.{3}_climo.nc'.format(avgFileBaseName, avg_start_year, avg_stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: if seas == 'ann': avgList.append('ann_sig:{0}:{1}'.format(avg_start_year, avg_stop_year)) else: avgList.append('dep_{0}:{1}:{2}'.format(seas.lower(), avg_start_year, avg_stop_year)) # seasonal means avgFile = '{0}.{1}-{2}.{3}_means.nc'.format(avgFileBaseName, avg_start_year, avg_stop_year,seas) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('{0}_mean:{1}:{2}'.format(seas.lower(), avg_start_year, avg_stop_year)) # Mons File avgFile = '{0}.{1}-{2}.MONS_climo.nc'.format(avgFileBaseName, avg_start_year, avg_stop_year) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('mons:{0}:{1}'.format(avg_start_year, avg_stop_year)) # Trends if (trends == 'True'): avgFile = '{0}.{1}-{2}.ANN_ALL.nc'.format(avgFileBaseName, trends_start_year, trends_stop_year) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if not rc: avgList.append('annall:{0}:{1}'.format(trends_start_year, trends_stop_year)) return avgList
def createSymLink(sourceFile, linkFile): """ create a symbolic link between the sourceFile and the linkFile """ # check if the sourceFile exists rc, err_msg = cesmEnvLib.checkFile(sourceFile, 'read') if not rc: # these should be raise RuntimeError instead of OSError raise RuntimeError(err_msg) # check if the linkFile exists and is readable rc, err_msg = cesmEnvLib.checkFile(linkFile, 'read') if not rc: try: os.symlink(sourceFile, linkFile) except Exception as e: print('...createSymLink error = {0}'.format(e)) raise OSError(e) return
def createSymLink(sourceFile, linkFile): """ create a symbolic link between the sourceFile and the linkFile """ # check if the sourceFile exists rc, err_msg = cesmEnvLib.checkFile(sourceFile, 'read') if not rc: # these should be raise RuntimeError instead of OSError raise RuntimeError(err_msg) # check if the linkFile exists and is readable rc, err_msg = cesmEnvLib.checkFile(linkFile, 'read') if not rc: try: os.symlink(sourceFile, linkFile) except Exception as e: print('...createSymLink error = {0}'.format(e)) raise OSError(e) return
def check_prerequisites(self, env): """list and check specific prequisites for this plot. """ super(CplLog, self).check_prerequisites(env) print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) for prefix in self._expectedPlots: rc, err_msg = cesmEnvLib.checkFile('{0}/{1}.txt'.format(env['WORKDIR'],prefix), 'read') if not rc: print('{0}... continuing with additional plots.'.format(err_msg))
def check_prerequisites(self, env): """list and check specific prequisites for this plot. """ super(PopLog, self).check_prerequisites(env) print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) for inFile in self._expectedInFiles: rc, err_msg = cesmEnvLib.checkFile('{0}/{1}'.format(env['WORKDIR'],inFile), 'read') if not rc: print('{0}... continuing with additional plots.'.format(err_msg))
def check_prerequisites(self, env): """ check prerequisites """ print(" Checking prerequisites for : {0}".format( self.__class__.__name__)) super(modelVsObs, self).check_prerequisites(env) # clean out the old working plot files from the workdir if env['CLEANUP_FILES'].upper() in ['T', 'TRUE']: cesmEnvLib.purge(env['WORKDIR'], '.*\.pro') cesmEnvLib.purge(env['WORKDIR'], '.*\.gif') cesmEnvLib.purge(env['WORKDIR'], '.*\.dat') cesmEnvLib.purge(env['WORKDIR'], '.*\.ps') cesmEnvLib.purge(env['WORKDIR'], '.*\.png') cesmEnvLib.purge(env['WORKDIR'], '.*\.html') # create the plot.dat file in the workdir used by all NCL plotting routines diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS']) # setup the gridfile based on the resolution and vertical levels os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format( env['DIAGOBSROOT'], env['RESOLUTION']) if env['VERTICAL'] == '42': os.environ[ 'gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format( env['DIAGOBSROOT'], env['RESOLUTION']) if env['VERTICAL'] == '62': os.environ[ 'gridfile'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format( env['DIAGOBSROOT'], env['RESOLUTION']) # check if gridfile exists and is readable rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read') if not rc: print( 'model_vs_obs: check_prerequisites could not find gridfile = {0}' .format(os.environ['gridfile'])) raise ocn_diags_bc.PrerequisitesError env['GRIDFILE'] = os.environ['gridfile'] # check the resolution and decide if some plot modules should be turned off if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3'): env['MVO_PM_VELISOPZ'] = os.environ['MVO_PM_VELISOPZ'] = 'FALSE' env['MVO_PM_KAPPAZ'] = os.environ['MVO_PM_KAPPAZ'] = 'FALSE' # create the global zonal average file used by most of the plotting classes print(' model vs. obs - calling create_za') diagUtilsLib.create_za(env['WORKDIR'], env['TAVGFILE'], env['GRIDFILE'], env['TOOLPATH'], env) return env
def check_prerequisites(self, env): """list and check specific prequisites for this plot. """ super(EnsoWavelet, self).check_prerequisites(env) print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) # set the FILE_IN env var to point to the diagts_nino.asc file in the workdir # may need to modify the NCL to only look at years specified. diagts_nino = '{0}/diagts_nino.asc'.format(env['WORKDIR']) rc, err_msg = cesmEnvLib.checkFile(diagts_nino, 'read') if not rc: print('{0}... continuing with additional plots.'.format(err_msg)) else: env['FILE_IN'] = os.environ['FILE_IN'] = diagts_nino
def check_prerequisites(self, env): """list and check specific prequisites for this plot. """ super(EnsoWavelet, self).check_prerequisites(env) print(" Checking prerequisites for : {0}".format( self.__class__.__name__)) # set the FILE_IN env var to point to the diagts_nino.asc file in the workdir # may need to modify the NCL to only look at years specified. diagts_nino = '{0}/diagts_nino.asc'.format(env['WORKDIR']) rc, err_msg = cesmEnvLib.checkFile(diagts_nino, 'read') if not rc: print('{0}... continuing with additional plots.'.format(err_msg)) else: env['FILE_IN'] = os.environ['FILE_IN'] = diagts_nino
def setup_workdir(self, env, t, scomm): """This method sets up the unique working directory for a given diagnostic type """ # create the working directory first before calling the base class prerequisites endYr = (int(env[t + '_first_yr']) + int(env[t + '_nyrs'])) - 1 subdir = '{0}.{1}-{2}/{3}.{4}_{5}'.format(env[t + '_casename'], env[t + '_first_yr'], endYr, self._name.lower(), env[t + '_first_yr'], endYr) workdir = '{0}/{1}'.format(env[t + '_path_climo'], subdir) if (scomm.is_manager()): try: os.makedirs(workdir) except OSError as exception: if exception.errno != errno.EEXIST: err_msg = 'ERROR: {0} problem accessing the working directory {1}'.format( self.__class__.__name__, workdir) raise OSError(err_msg) # create symbolic links between the old and new workdir and get the real names of the files old_workdir = env[t + '_path_climo'] + env[t + '_casename'] + '.' + str( env[t + '_first_yr']) + '-' + str(endYr) env[t + '_path_climo'] = workdir # Add links to the new wkrdir that use the expected file names (existing climos have dates, the NCL do not like dates) if (scomm.is_manager()): climo_files = glob.glob(old_workdir + '/*.nc') for climo_file in climo_files: name_split = climo_file.split('.') # Split on '.' if ('-' in name_split[-3]): fn = str.join( '.', name_split[:len(name_split) - 3] + name_split[-2:] ) #Piece together w/o the date, but still has old path path_split = fn.split('/') # Remove the path new_fn = workdir + '/' + path_split[ -1] # Take file name and add it to new path rc1, err_msg1 = cesmEnvLib.checkFile(new_fn, 'read') if not rc1: os.symlink(climo_file, new_fn) if (scomm.is_manager()): print("DEBUG atm_diags_bc: workdir = %s" % workdir) print("DEBUG atm_diags_bc: t = %s" % t) print("DEBUG atm_diags_bc: env[t_path_climo] = %s" % env[t + '_path_climo']) return env
def create_plot_dat(workdir, xyrange, depths): """create plot.dot file in the workdir Arguments: workdir (string) - work directory for plots xyrange (string) - env['XYRANGE'] depths (string) - env['DEPTHS'] """ rc, err_msg = cesmEnvLib.checkFile('{0}/plot.dat'.format(workdir), 'read') if not rc: file = open('{0}/plot.dat'.format(workdir),'w') file.write( xyrange + '\n') numdepths = len(depths.split(',')) file.write( str(numdepths) + '\n') file.write( depths + '\n') file.close() return 0
def create_plot_dat(workdir, xyrange, depths): """create plot.dot file in the workdir Arguments: workdir (string) - work directory for plots xyrange (string) - env['XYRANGE'] depths (string) - env['DEPTHS'] """ rc, err_msg = cesmEnvLib.checkFile('{0}/plot.dat'.format(workdir), 'read') if not rc: file = open('{0}/plot.dat'.format(workdir), 'w') file.write(xyrange + '\n') numdepths = len(depths.split(',')) file.write(str(numdepths) + '\n') file.write(depths + '\n') file.close() return 0
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ plot_table = [] num_cols = 6 for i in range(len(self._labels)): plot_list = [] plot_list.append(self._labels[i]) exp_plot_list = eval('self._expectedPlots_{0}'.format( self._labels[i])) for j in range(num_cols - 2): plot_file = exp_plot_list[j] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read') if not rc: plot_list.append('{0} - Error'.format(plot_file)) else: plot_list.append(plot_file) plot_table.append(plot_list) # create a jinja2 template object templateLoader = jinja2.FileSystemLoader(searchpath=templatePath) templateEnv = jinja2.Environment(loader=templateLoader, keep_trailing_newline=False) template = templateEnv.get_template(self._template_file) # add the template variables templateVars = { 'title': self._name, 'cols': num_cols, 'plot_table': plot_table, 'label_list': self._labels, 'imgFormat': imgFormat } # render the html template using the plot tables self._html = template.render(templateVars) return self._shortname, self._html
def check_prerequisites(self, env): """ check prerequisites """ print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) super(modelVsObs, self).check_prerequisites(env) # clean out the old working plot files from the workdir if env['CLEANUP_FILES'].upper() in ['T','TRUE']: cesmEnvLib.purge(env['WORKDIR'], '.*\.pro') cesmEnvLib.purge(env['WORKDIR'], '.*\.gif') cesmEnvLib.purge(env['WORKDIR'], '.*\.dat') cesmEnvLib.purge(env['WORKDIR'], '.*\.ps') cesmEnvLib.purge(env['WORKDIR'], '.*\.png') cesmEnvLib.purge(env['WORKDIR'], '.*\.html') # create the plot.dat file in the workdir used by all NCL plotting routines diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS']) # setup the gridfile based on the resolution and vertical levels os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION']) if env['VERTICAL'] == '42': os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION']) if env['VERTICAL'] == '62': os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION']) # check if gridfile exists and is readable rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read') if not rc: print('model_vs_obs: check_prerequisites could not find gridfile = {0}'.format(os.environ['gridfile'])) raise ocn_diags_bc.PrerequisitesError env['GRIDFILE'] = os.environ['gridfile'] # check the resolution and decide if some plot modules should be turned off if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3'): env['MVO_PM_VELISOPZ'] = os.environ['MVO_PM_VELISOPZ'] = 'FALSE' env['MVO_PM_KAPPAZ'] = os.environ['MVO_PM_KAPPAZ'] = 'FALSE' # create the global zonal average file used by most of the plotting classes print(' model vs. obs - calling create_za') diagUtilsLib.create_za( env['WORKDIR'], env['TAVGFILE'], env['GRIDFILE'], env['TOOLPATH'], env) return env
def check_prerequisites(self, env, scomm): """ check prerequisites """ print(" Checking prerequisites for : {0}".format( self.__class__.__name__)) super(modelVsModel, self).check_prerequisites(env, scomm) # Set some new env variables env['WKDIR'] = env['DIAG_BASE'] + '/diag/' + env[ 'caseid_1'] + '-' + env['caseid_2'] + '/' env['WORKDIR'] = env['WKDIR'] if scomm.is_manager(): if not os.path.exists(env['WKDIR']): os.makedirs(env['WKDIR']) env['PLOTTYPE'] = env['p_type'] env['OBS_DATA'] = env['OBS_HOME'] env['INPUT_FILES'] = env['POSTPROCESS_PATH'] + '/lnd_diag/inputFiles/' ## env['DIAG_RESOURCES'] = env['POSTPROCESS_PATH']+'/lnd_diag/resources/' env['DIAG_RESOURCES'] = env['DIAGOBSROOT'] + '/resources/' env['RUNTYPE'] = 'model1-model2' # Create variable files if scomm.is_manager(): script = env['DIAG_SHARED'] + '/create_var_lists.csh' rc1, err_msg = cesmEnvLib.checkFile(script, 'read') if rc1: try: pipe = subprocess.Popen([script], cwd=env['WORKDIR'], env=env, shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0] print(output) while pipe.poll() is None: time.sleep(0.5) except OSError as e: print('WARNING', e.errno, e.strerror) else: print('{0}... {1} file not found'.format( err_msg, web_script_1)) scomm.sync() return env
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ plot_table = [] num_cols = 6 for i in range(len(self._labels)): plot_list = [] plot_list.append(self._labels[i]) exp_plot_list = eval('self._expectedPlots_{0}'.format(self._labels[i])) for j in range(num_cols - 2): plot_file = exp_plot_list[j] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: plot_list.append('{0} - Error'.format(plot_file)) else: plot_list.append(plot_file) plot_table.append(plot_list) # create a jinja2 template object templateLoader = jinja2.FileSystemLoader( searchpath=templatePath ) templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False ) template = templateEnv.get_template( self._template_file ) # add the template variables templateVars = { 'title' : self._name, 'cols' : num_cols, 'plot_table' : plot_table, 'label_list' : self._labels, 'imgFormat' : imgFormat } # render the html template using the plot tables self._html = template.render( templateVars ) return self._shortname, self._html
def setup_workdir(self, env, t, scomm): """This method sets up the unique working directory for a given diagnostic type """ # create the working directory first before calling the base class prerequisites endYr = (int(env[t+'_first_yr']) + int(env[t+'_nyrs'])) - 1 subdir = '{0}.{1}-{2}/{3}.{4}_{5}'.format(env[t+'_casename'], env[t+'_first_yr'], endYr,self._name.lower(), env[t+'_first_yr'], endYr) workdir = '{0}/{1}'.format(env[t+'_path_climo'], subdir) if (scomm.is_manager()): try: os.makedirs(workdir) except OSError as exception: if exception.errno != errno.EEXIST: err_msg = 'ERROR: {0} problem accessing the working directory {1}'.format(self.__class__.__name__, workdir) raise OSError(err_msg) # create symbolic links between the old and new workdir and get the real names of the files old_workdir = env[t+'_path_climo']+env[t+'_casename']+'.'+str(env[t+'_first_yr'])+'-'+str(endYr) env[t+'_path_climo'] = workdir # Add links to the new wkrdir that use the expected file names (existing climos have dates, the NCL do not like dates) if (scomm.is_manager()): climo_files = glob.glob(old_workdir+'/*.nc') for climo_file in climo_files: name_split = climo_file.split('.') # Split on '.' if ('-' in name_split[-3]): fn = str.join('.',name_split[:len(name_split)-3] + name_split[-2:]) #Piece together w/o the date, but still has old path path_split = fn.split('/') # Remove the path new_fn = workdir + '/' +path_split[-1] # Take file name and add it to new path rc1, err_msg1 = cesmEnvLib.checkFile(new_fn, 'read') if not rc1: os.symlink(climo_file,new_fn) if (scomm.is_manager()): print("DEBUG atm_diags_bc: workdir = %s"%workdir) print("DEBUG atm_diags_bc: t = %s"%t) print("DEBUG atm_diags_bc: env[t_path_climo] = %s"%env[t+'_path_climo']) return env
def _convert_plots(self, workdir, imgFormat, files): """ This method converts the postscript plots to imgFormat """ splitPath = list() psFiles = list() psFiles = sorted(files) # check if the convert command exists rc = cesmEnvLib.which('convert') if rc is not None and imgFormat.lower() in ['png','gif']: for psFile in psFiles: sourceFile = '{0}/{1}.ps'.format(workdir, psFile) ## print('...... convert source file {0}'.format(sourceFile)) # check if the image file alreay exists and remove it to regen imgFile = '{0}/{1}.{2}'.format(workdir, psFile, imgFormat) rc, err_msg = cesmEnvLib.checkFile(imgFile,'write') if rc: print('...... removing {0} before recreating'.format(imgFile)) os.remove(imgFile) # convert the image from ps to imgFormat try: pipe = subprocess.check_call( ['convert', '-trim', '-bordercolor', 'white', '-border', '5x5', '-density', '95', '{0}'.format(sourceFile),'{0}'.format(imgFile)]) ## print('...... created {0} size = {1}'.format(imgFile, os.path.getsize(imgFile))) except subprocess.CalledProcessError as e: print('...... failed to create {0}'.format(imgFile)) print('WARNING: convert_plots call to convert failed with error:') print(' {0}'.format(e.output)) else: continue else: # TODO - need to create a script template to convert the plots print('WARNING: convert_plots unable to find convert command in path.') print(' Unable to convert ps formatted plots to {0}'.format(imgFormat)) print('Run the following command from a node with convert installed....')
def generate_ncl_plots(env, nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call Arguments: env (dictionary) - diagnostics system environment nclPlotFile (string) - ncl plotting file name """ # check if the nclPlotFile exists - # don't exit if it does not exists just print a warning. nclFile = '{0}/{1}'.format(env['NCLPATH'],nclPlotFile) print('Calling NCL routine {0} from {1}'.format(nclFile, env['WORKDIR'])) rc, err_msg = cesmEnvLib.checkFile(nclFile, 'read') if rc: try: pipe = subprocess.Popen(['ncl {0}'.format(nclFile)], cwd=env['WORKDIR'], env=env, shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0] print('NCL routine {0} \n {1}'.format(nclFile,output)) while pipe.poll() is None: time.sleep(0.5) except OSError as e: print('WARNING',e.errno,e.strerror) else: print('{0}... continuing with additional NCL calls.'.format(err_msg)) return 0
def check_prerequisites(self, env, scomm): """ check prerequisites """ print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) super(modelVsObs, self).check_prerequisites(env, scomm) # Set some new env variables env['WKDIR'] = env['DIAG_BASE']+'/diag/'+env['caseid_1']+'-obs/' env['WORKDIR'] = env['WKDIR'] if scomm.is_manager(): if not os.path.exists(env['WKDIR']): os.makedirs(env['WKDIR']) env['PLOTTYPE'] = env['p_type'] env['OBS_DATA'] = env['OBS_HOME'] env['INPUT_FILES'] = env['POSTPROCESS_PATH']+'/lnd_diag/inputFiles/' env['DIAG_RESOURCES'] = env['DIAGOBSROOT']+'/resources/' env['RUNTYPE'] = 'model-obs' # Create variable files if scomm.is_manager(): script = env['DIAG_SHARED']+'/create_var_lists.csh' rc1, err_msg = cesmEnvLib.checkFile(script,'read') if rc1: try: pipe = subprocess.Popen([script], cwd=env['WORKDIR'], env=env, shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0] print(output) while pipe.poll() is None: time.sleep(0.5) except OSError as e: print('WARNING',e.errno,e.strerror) else: print('{0}... {1} file not found'.format(err_msg,script)) scomm.sync() return env
def check_prerequisites(self, env): """ check prerequisites """ print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) super(modelTimeseries, self).check_prerequisites(env) # chdir into the working directory os.chdir(env['WORKDIR']) # clean out the old working plot files from the workdir if env['CLEANUP_FILES'].upper() in ['T','TRUE']: cesmEnvLib.purge(env['WORKDIR'], '.*\.pro') cesmEnvLib.purge(env['WORKDIR'], '.*\.gif') cesmEnvLib.purge(env['WORKDIR'], '.*\.dat') cesmEnvLib.purge(env['WORKDIR'], '.*\.ps') cesmEnvLib.purge(env['WORKDIR'], '.*\.png') cesmEnvLib.purge(env['WORKDIR'], '.*\.html') cesmEnvLib.purge(env['WORKDIR'], '.*\.log\.*') cesmEnvLib.purge(env['WORKDIR'], '.*\.pop\.d.\.*') # create the plot.dat file in the workdir used by all NCL plotting routines diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS']) # set the OBSROOT env['OBSROOT'] = env['OBSROOTPATH'] # check the resolution and decide if some plot modules should be turned off if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3') : env['MTS_PM_MOCANN'] = os.environ['PM_MOCANN'] = 'FALSE' env['MTS_PM_MOCMON'] = os.environ['PM_MOCMON'] = 'FALSE' # check if cpl log file path is defined if len(env['CPLLOGFILEPATH']) == 0: # print a message that the cpl log path isn't defined and turn off CPLLOG plot module print('model timeseries - CPLLOGFILEPATH is undefined. Disabling MTS_PM_CPLLOG module') env['MTS_PM_CPLLOG'] = os.environ['PM_CPLLOG'] = 'FALSE' else: # check that cpl log files exist and gunzip them if necessary initcplLogs = cplLogs = list() initCplLogs = glob.glob('{0}/cpl.log.*'.format(env['CPLLOGFILEPATH'])) if len(initCplLogs) > 0: for cplLog in initCplLogs: logFileList = cplLog.split('/') cplLogFile = logFileList[-1] shutil.copy2(cplLog, '{0}/{1}'.format(env['WORKDIR'],cplLogFile)) # gunzip the cplLog in the workdir if cplLogFile.lower().find('.gz') != -1: cplLog_gunzip = cplLogFile[:-3] inFile = gzip.open('{0}/{1}'.format(env['WORKDIR'],cplLogFile), 'rb') outFile = open('{0}/{1}'.format(env['WORKDIR'],cplLog_gunzip), 'wb') outFile.write( inFile.read() ) inFile.close() outFile.close() # append the gunzipped cpl log file to the cplLogs list cplLogs.append('{0}/{1}'.format(env['WORKDIR'],cplLog_gunzip)) # remove the original .gz file in the workdir os.remove('{0}/{1}'.format(env['WORKDIR'],cplLogFile)) else: # append the original gunzipped cpl log file to the cplLogs list cplLogs.append('{0}/{1}'.format(env['WORKDIR'],cplLogFile)) # parse the cpllog depending on the coupler version - default to 7b print('model_timeseries: setting up heat and freshwater awk calls with cplLogs = {0}'.format(cplLogs)) heatFile = 'cplheatbudget' freshWaterFile = 'cplfwbudget' cplVersion = 'cpl7b' env['ntailht'] = os.environ['ntailht'] = '22' env['ntailfw'] = os.environ['ntailfw'] = '16' if '7' == env['TS_CPL'] or '6' == env['TS_CPL']: cplVersion = 'cpl{0}'.format(env['TS_CPL']) env['ntailht'] = os.environ['ntailht'] = '21' env['ntailfw'] = os.environ['ntailfw'] = '16' # expand the cpl.log* into a list cplLogs.sort() cplLogsString = ' '.join(cplLogs) # define the awk scripts to parse the cpllog file heatPath = '{0}/process_{1}_logfiles_heat.awk'.format(env['TOOLPATH'], cplVersion) heatPath = os.path.abspath(heatPath) fwPath = '{0}/process_{1}_logfiles_fw.awk'.format(env['TOOLPATH'], cplVersion) fwPath = os.path.abspath(fwPath) heatCmd = '{0} y0={1} y1={2} {3}'.format(heatPath, env['TSERIES_YEAR0'], env['TSERIES_YEAR1'], cplLogsString).split(' ') freshWaterCmd = '{0} y0={1} y1={2} {3}'.format(fwPath, env['TSERIES_YEAR0'], env['TSERIES_YEAR1'], cplLogsString).split(' ') # run the awk scripts to generate the .txt files from the cpllogs cmdList = [ (heatCmd, heatFile, env['ntailht']), (freshWaterCmd, freshWaterFile, env['ntailfw']) ] for cmd in cmdList: outFile = '{0}.txt'.format(cmd[1]) with open (outFile, 'w') as results: try: subprocess.check_call(cmd[0], stdout=results, env=env) except subprocess.CalledProcessError as e: print('WARNING: {0} time series error executing command:'.format(self._name)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) rc, err_msg = cesmEnvLib.checkFile(outFile, 'read') if rc: # get the tail of the .txt file and redirect to a .asc file for the web ascFile = '{0}.asc'.format(cmd[1]) with open (ascFile, 'w') as results: try: # TODO - read the .txt in and write just the lines needed to avoid subprocess call tailCmd = 'tail -{0} {1}.txt'.format(cmd[2], cmd[1]).split(' ') subprocess.check_call(tailCmd, stdout=results, env=env) except subprocess.CalledProcessError as e: print('WARNING: {0} time series error executing command:'.format(self._name)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print('model timeseries - Coupler logs do not exist. Disabling MTS_PM_CPLLOG module') env['MTS_PM_CPLLOG'] = os.environ['PM_CPLLOG'] = 'FALSE' # check if ocn log files exist if len(env['OCNLOGFILEPATH']) == 0: # print a message that the ocn log path isn't defined and turn off POPLOG plot module print('model timeseries - OCNLOGFILEPATH is undefined. Disabling MTS_PM_YPOPLOG module') env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE' else: # check that ocn log files exist and gunzip them if necessary initOcnLogs = ocnLogs = list() initOcnLogs = glob.glob('{0}/ocn.log.*'.format(env['OCNLOGFILEPATH'])) if len(initOcnLogs) > 0: for ocnLog in initOcnLogs: logFileList = ocnLog.split('/') ocnLogFile = logFileList[-1] shutil.copy2(ocnLog, '{0}/{1}'.format(env['WORKDIR'],ocnLogFile)) # gunzip the ocnLog in the workdir if ocnLogFile.lower().find('.gz') != -1: ocnLog_gunzip = ocnLogFile[:-3] inFile = gzip.open('{0}/{1}'.format(env['WORKDIR'],ocnLogFile), 'rb') outFile = open('{0}/{1}'.format(env['WORKDIR'],ocnLog_gunzip), 'wb') outFile.write( inFile.read() ) inFile.close() outFile.close() # append the gunzipped ocn log file to the ocnLogs list ocnLogs.append('{0}/{1}'.format(env['WORKDIR'],ocnLog_gunzip)) # remove the original .gz file in the workdir os.remove('{0}/{1}'.format(env['WORKDIR'],ocnLogFile)) else: # append the original gunzipped ocn log file to the ocnLogs list ocnLogs.append('{0}/{1}'.format(env['WORKDIR'],ocnLogFile)) # expand the ocn.log* into a list ocnLogs.sort() ocnLogsString = ' '.join(ocnLogs) # define the awk script to parse the ocn log files globalDiagAwkPath = '{0}/process_pop2_logfiles.globaldiag.awk'.format(env['TOOLPATH']) globalDiagAwkCmd = '{0} {1}'.format(globalDiagAwkPath, ocnLogsString).split(' ') print('model_timeseries: globalDiagAwkCmd = {0}'.format(globalDiagAwkCmd)) # run the awk scripts to generate the .txt files from the ocn logs try: subprocess.check_call(globalDiagAwkCmd) except subprocess.CalledProcessError as e: print('WARNING: {0} time series error executing command:'.format(self._name)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print('model timeseries - Ocean logs do not exist. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLT modules') env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE' env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE' # check if dt files exist if len(env['DTFILEPATH']) == 0: # print a message that the dt file path isn't defined and turn off POPLOG plot module print('model timeseries - DTFILEPATH is undefined. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLT modules') env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE' env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE' else: # check that dt files exist dtFiles = list() dtFiles = glob.glob('{0}/{1}.pop.dt.*'.format(env['DTFILEPATH'], env['CASE'])) print('dtFiles = {0}'.format(dtFiles)) if len(dtFiles) > 0: for dtFile in dtFiles: logFileList = dtFile.split('/') dtLogFile = logFileList[-1] shutil.copy2(dtFile, '{0}/{1}'.format(env['WORKDIR'],dtLogFile)) # expand the *.dt.* into a list dtFiles.sort() dtFilesString = ' '.join(dtFiles) # define the awk script to parse the dt log files dtFilesAwkPath = '{0}/process_pop2_dtfiles.awk'.format(env['TOOLPATH']) dtFilesAwkCmd = '{0} {1}'.format(dtFilesAwkPath, dtFilesString).split(' ') print('model_timeseries: dtFilesAwkCmd = {0}'.format(dtFilesAwkCmd)) # run the awk scripts to generate the .txt files from the dt log files try: subprocess.check_call(dtFilesAwkCmd) except subprocess.CalledProcessError as e: print('WARNING: {0} time series error executing command:'.format(self._name)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print('model_timeseries - ocean dt files do not exist. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLT modules') env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE' env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE' return env
def check_prerequisites(self, env, scomm): """ check prerequisites """ print(" Checking prerequisites for : {0}".format( self.__class__.__name__)) super(modelVsModel, self).check_prerequisites(env, scomm) # Set some new env variables env['DIAG_CODE'] = env['NCLPATH'] env['test_path_diag'] = '{0}/{1}-{2}/'.format(env['test_path_diag'], env['test_casename'], env['cntl_casename']) env['WKDIR'] = env['test_path_diag'] env['WORKDIR'] = env['test_path_diag'] if scomm.is_manager(): if not os.path.exists(env['WKDIR']): os.makedirs(env['WKDIR']) env['COMPARE'] = env['CNTL'] env['PLOTTYPE'] = env['p_type'] env['COLORTYPE'] = env['c_type'] env['MG_MICRO'] = env['microph'] env['TIMESTAMP'] = env['time_stamp'] env['TICKMARKS'] = env['tick_marks'] if env['custom_names'] == 'True': env['CASENAMES'] = 'True' env['CASE1'] = env['test_name'] env['CASE2'] = env['cntl_name'] else: env['CASENAMES'] = 'False' env['CASE1'] = 'null' env['CASE2'] = 'null' env['CNTL_PLOTVARS'] = 'null' env['test_in'] = env['test_path_climo'] + env['test_casename'] env['test_out'] = env['test_path_climo'] + env['test_casename'] env['cntl_in'] = env['cntl_path_climo'] + env['cntl_casename'] env['cntl_out'] = env['cntl_path_climo'] + env['cntl_casename'] env['seas'] = [] if env['plot_ANN_climo'] == 'True': env['seas'].append('ANN') if env['plot_DJF_climo'] == 'True': env['seas'].append('DJF') if env['plot_MAM_climo'] == 'True': env['seas'].append('MAM') if env['plot_JJA_climo'] == 'True': env['seas'].append('JJA') if env['plot_SON_climo'] == 'True': env['seas'].append('SON') # Significance vars if env['significance'] == 'True': env['SIG_PLOT'] = 'True' env['SIG_LVL'] = env['sig_lvl'] else: env['SIG_PLOT'] = 'False' env['SIG_LVL'] = 'null' # Set the rgb file name env['RGB_FILE'] = env['DIAG_HOME'] + '/rgb/amwg.rgb' if 'default' in env['color_bar']: env['RGB_FILE'] = env['DIAG_HOME'] + '/rgb/amwg.rgb' elif 'blue_red' in env['color_bar']: env['RGB_FILE'] = env['DIAG_HOME'] + '/rgb/bluered.rgb' elif 'blue_yellow_red' in env['color_bar']: env['RGB_FILE'] = env['DIAG_HOME'] + '/rgb/blueyellowred.rgb' # Set Paleo variables env['PALEO'] = env['paleo'] if env['PALEO'] == 'True': env['DIFF_PLOTS'] = env['diff_plots'] # Test coastlines env['MODELFILE'] = env['test_path_climo'] + '/' + env[ 'test_casename'] + '_ANN_climo.nc' env['LANDMASK'] = env['land_mask1'] env['PALEODATA'] = env['test_path_climo'] + '/' + env[ 'test_casename'] if scomm.is_manager(): rc, err_msg = cesmEnvLib.checkFile(env['PALEODATA'], 'read') if not rc: diagUtilsLib.generate_ncl_plots(env, 'plot_paleo.ncl') env['PALEOCOAST1'] = env['PALEODATA'] # Cntl coastlines env['MODELFILE'] = env['cntl_path_climo'] + '/' + env[ 'cntl_casename'] + '_ANN_climo.nc' env['LANDMASK'] = env['land_mask2'] env['PALEODATA'] = env['cntl_path_climo'] + '/' + env[ 'cntl_casename'] if scomm.is_manager(): rc, err_msg = cesmEnvLib.checkFile(env['PALEODATA'], 'read') if not rc: diagUtilsLib.generate_ncl_plots(env, 'plot_paleo.ncl') env['PALEOCOAST2'] = env['PALEODATA'] else: env['PALEOCOAST1'] = 'null' env['PALEOCOAST2'] = 'null' env['DIFF_PLOTS'] = 'False' env['USE_WACCM_LEVS'] = 'False' scomm.sync() return env
def setup_workdir(self, env, t, scomm): """This method sets up the unique working directory for a given diagnostic type """ # create the working directory first before calling the base class prerequisites endYr = (int(env['clim_first_yr_'+t]) + int(env['clim_num_yrs_'+t])) - 1 subdir = '{0}.{1}-{2}/{3}.{4}_{5}'.format(env['caseid_'+t], env['clim_first_yr_'+t], endYr,self._name.lower(), env['clim_first_yr_'+t], endYr) workdir = '{0}/climo/{1}/{2}'.format(env['PTMPDIR_'+t], env['caseid_'+t], subdir) if (scomm.is_manager()): print('DEBUG lnd_diags_bc.setup_workdir t = {0}'.format(t)) print('DEBUG lnd_diags_bc.setup_workdir subdir = {0}'.format(subdir)) print('DEBUG lnd_diags_bc.setup_workdir first workdir = {0}'.format(workdir)) try: os.makedirs(workdir) os.makedirs(workdir+'/atm') os.makedirs(workdir+'/rof') except OSError as exception: if exception.errno != errno.EEXIST: err_msg = 'ERROR: {0} problem accessing the working directory {1}'.format(self.__class__.__name__, workdir) raise OSError(err_msg) for model in ('lnd', 'atm', 'rtm'): if ('rtm' in model): m_dir = 'rof' else: m_dir = model # create symbolic links between the old and new workdir and get the real names of the files old_workdir = env['PTMPDIR_'+t]+'/climo/'+env['caseid_'+t]+'/'+env['caseid_'+t]+'.'+str(env['clim_first_yr_'+t])+'-'+str(endYr)+'/'+m_dir env['case'+t+'_path_climo'] = workdir print('DEBUG lnd_diags_bc.setup_workdir old_workdir = {0}'.format(old_workdir)) print('DEBUG lnd_diags_bc.setup_workdir case_t_path_climo = {0}'.format(env['case'+t+'_path_climo'])) if 'lnd' in model: workdir_mod = workdir else: workdir_mod = workdir + '/' + m_dir # Add links to the new wkrdir that use the expected file names (existing climos have dates, the NCL do not like dates) print('DEBUG lnd_diags_bc.setup_workdir workdir_mod = {0}'.format(workdir_mod)) climo_files = glob.glob(old_workdir+'/*.nc') for climo_file in climo_files: name_split = climo_file.split('.') # Split on '.' if ('-' in name_split[-3]): fn = str.join('.',name_split[:len(name_split)-3] + name_split[-2:]) #Piece together w/o the date, but still has old path path_split = fn.split('/') # Remove the path new_fn = workdir_mod + '/' +path_split[-1] # Take file name and add it to new path rc1, err_msg1 = cesmEnvLib.checkFile(new_fn, 'read') if not rc1: try: os.symlink(climo_file,new_fn) except: print('INFO lnd_diags_bc.setup_workdir symlink {0} to {1} already exists.'.format(new_fn, climo_file)) env['DIAG_BASE'] = env['PTMPDIR_1'] env['PTMPDIR_'+t] = '{0}/climo/{1}/{2}'.format(env['PTMPDIR_'+t], env['caseid_'+t], subdir) if (scomm.is_manager()): print('DEBUG lnd_diags_bc.setup_workdir DIAG_BASE = {0}'.format(env['DIAG_BASE'])) print('DEBUG lnd_diags_bc.setup_workdir PTMPDIR_t {0}'.format(env['PTMPDIR_'+t])) return env
def check_prerequisites(self, env): """ check prerequisites """ print(' Checking prerequisites for : {0}'.format( self.__class__.__name__)) self._name = '{0}_{1}'.format(self._name, env['CNTRLCASE']) super(modelVsControl, self).check_prerequisites(env) # clean out the old working plot files from the workdir if env['CLEANUP_FILES'].upper() in ['T', 'TRUE']: cesmEnvLib.purge(env['WORKDIR'], '.*\.pro') cesmEnvLib.purge(env['WORKDIR'], '.*\.gif') cesmEnvLib.purge(env['WORKDIR'], '.*\.dat') cesmEnvLib.purge(env['WORKDIR'], '.*\.ps') cesmEnvLib.purge(env['WORKDIR'], '.*\.png') cesmEnvLib.purge(env['WORKDIR'], '.*\.html') # create the plot.dat file in the workdir used by all NCL plotting routines diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS']) # setup prerequisites for the model # setup the gridfile based on the resolution and levels os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format( env['DIAGOBSROOT'], env['RESOLUTION']) if env['VERTICAL'] == '42': os.environ[ 'gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format( env['DIAGOBSROOT'], env['RESOLUTION']) if env['VERTICAL'] == '62': os.environ[ 'gridfile'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format( env['DIAGOBSROOT'], env['RESOLUTION']) # check if gridfile exists and is readable rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read') if not rc: raise OSError(err_msg) env['GRIDFILE'] = os.environ['gridfile'] # check the resolution and decide if some plot modules should be turned off if env['RESOLUTION'] == 'tx0.1v2': env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE' env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE' # create the global zonal average file used by most of the plotting classes print(' model vs. control - calling create_za') diagUtilsLib.create_za(env['WORKDIR'], env['TAVGFILE'], env['GRIDFILE'], env['TOOLPATH'], env) # setup prerequisites for the model control control = True env['CNTRL_MAVGFILE'], env[ 'CNTRL_TAVGFILE'] = diagUtilsLib.createLinks( env['CNTRLYEAR0'], env['CNTRLYEAR1'], env['CNTRLTAVGDIR'], env['WORKDIR'], env['CNTRLCASE'], control) env['CNTRLFILE'] = env['CNTRL_TAVGFILE'] # setup the gridfile based on the resolution and vertical levels os.environ[ 'gridfilecntrl'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format( env['DIAGOBSROOT'], env['CNTRLRESOLUTION']) if env['VERTICAL'] == '42': os.environ[ 'gridfilecntrl'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format( env['DIAGOBSROOT'], env['CNTRLRESOLUTION']) if env['VERTICAL'] == '62': os.environ[ 'gridfilecntrl'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format( env['DIAGOBSROOT'], env['CNTRLRESOLUTION']) # check if gridfile exists and is readable rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfilecntrl'], 'read') if not rc: raise OSError(err_msg) env['GRIDFILECNTRL'] = os.environ['gridfilecntrl'] # check the resolution and decide if some plot modules should be turned off if env['CNTRLRESOLUTION'] == 'tx0.1v2': env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE' env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE' # create the control global zonal average file used by most of the plotting classes print(' model vs. control - calling create_za for control run') diagUtilsLib.create_za(env['WORKDIR'], env['CNTRL_TAVGFILE'], env['GRIDFILECNTRL'], env['TOOLPATH'], env) return env
def createLinks(start_year, stop_year, tavgdir, workdir, case, control): """createLinks - create symbolic links between tavgdir and workdir Arguments: start_year (string) - starting year stop_year (string) - ending year tavgdir (string) - output directory for averages workdir (string) - working directory for diagnostics case (string) - case name control (boolean) - indicates if this is a control run or not which will change the mavg and tavg filenames """ padding = 4 avgFileBaseName = '{0}/{1}.pop.h'.format(tavgdir,case) case_prefix = '{0}.pop.h'.format(case) # prepend the years with 0's for some of the plotting routines zstart_year = start_year.zfill(padding) zstop_year = stop_year.zfill(padding) # check if this is a control run or not cntrl = '' if control: cntrl = 'cntrl.' # link to the mavg file for the za and plotting routines mavgFileBase = 'mavg.{0}.{1}.{2}nc'.format(zstart_year, zstop_year, cntrl) avgFile = '{0}/mavg.{1}-{2}.nc'.format(tavgdir, zstart_year, zstop_year) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if rc: zmavgFile = '{0}/mavg.{1}.{2}.{3}nc'.format(workdir, zstart_year, zstop_year, cntrl) mavgFile = '{0}/mavg.{1}.{2}.{3}nc'.format(workdir, start_year, stop_year, cntrl) rc1, err_msg1 = cesmEnvLib.checkFile(zmavgFile, 'read') if not rc1: os.symlink(avgFile, zmavgFile) rc1, err_msg1 = cesmEnvLib.checkFile(mavgFile, 'read') if not rc1: os.symlink(avgFile, mavgFile) else: raise OSError(err_msg) # link to the tavg file tavgFileBase = 'tavg.{0}.{1}.{2}nc'.format(zstart_year, zstop_year, cntrl) avgFile = '{0}/tavg.{1}-{2}.nc'.format(tavgdir, zstart_year, zstop_year) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if rc: ztavgFile = '{0}/tavg.{1}.{2}.{3}nc'.format(workdir, zstart_year, zstop_year, cntrl) tavgFile = '{0}/tavg.{1}.{2}.{3}nc'.format(workdir, start_year, stop_year, cntrl) rc1, err_msg1 = cesmEnvLib.checkFile(ztavgFile, 'read') if not rc1: os.symlink(avgFile, ztavgFile) rc1, err_msg1 = cesmEnvLib.checkFile(tavgFile, 'read') if not rc1: os.symlink(avgFile, tavgFile) else: raise OSError(err_msg) # link to all the annual history files year = int(start_year) while year <= int(stop_year): # check if file already exists before appending to the avgList syear = str(year) zyear = syear.zfill(padding) avgFile = '{0}.{1}.nc'.format(avgFileBaseName, zyear) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if rc: workAvgFile = '{0}/{1}.{2}.nc'.format(workdir, case_prefix, zyear) rc1, err_msg1 = cesmEnvLib.checkFile(workAvgFile, 'read') if not rc1: os.symlink(avgFile, workAvgFile) year += 1 return mavgFileBase, tavgFileBase
def check_prerequisites(self, env, scomm): """ check prerequisites """ print(" Checking prerequisites for : {0}".format( self.__class__.__name__)) super(modelVsModel, self).check_prerequisites(env, scomm) # Set some new env variables env['DIAG_CODE'] = env['NCLPATH'] env['DIAG_HOME'] = env['NCLPATH'] print('DEBUG: model_vs_model env[DIAG_HOME] = {0}'.format( env['DIAG_HOME'])) env['DIAG_ROOT'] = '{0}/{1}-{2}/'.format(env['DIAG_ROOT'], env['CASE_TO_CONT'], env['CASE_TO_DIFF']) env['WKDIR'] = env['DIAG_ROOT'] env['WORKDIR'] = env['WKDIR'] if scomm.is_manager(): if not os.path.exists(env['WKDIR']): os.makedirs(env['WKDIR']) env['CASE_PREV'] = env['CASE_TO_DIFF'] env['CASE_NEW'] = env['CASE_TO_CONT'] env['VAR_NAME_PREV'] = env['VAR_NAME_TYPE_DIFF'] env['VAR_NAME_NEW'] = env['VAR_NAME_TYPE_CONT'] env['PATH_PREV'] = env['CLIMO_DIFF'] env['PATH_NEW'] = env['CLIMO_CONT'] env['PREV_YR_AVG_FRST'] = str((int(env['ENDYR_DIFF']) - int(env['YRS_TO_AVG'])) + 1) env['PREV_YR_AVG_LAST'] = env['ENDYR_DIFF'] env['NEW_YR_AVG_FRST'] = str((int(env['ENDYR_CONT']) - int(env['YRS_TO_AVG'])) + 1) env['NEW_YR_AVG_LAST'] = env['ENDYR_CONT'] env['YR1'] = env['BEGYR_CONT'] env['YR2'] = env['ENDYR_CONT'] env['YR1_DIFF'] = env['BEGYR_DIFF'] env['YR2_DIFF'] = env['ENDYR_DIFF'] env['PRE_PROC_ROOT_CONT'] = env['PATH_CLIMO_CONT'] env['PRE_PROC_ROOT_DIFF'] = env['PATH_CLIMO_DIFF'] env['PATH_PLOT'] = env['PATH_CLIMO_CONT'] # Link obs files into the climo directory if (scomm.is_manager()): # SSMI # CONT CASE new_ssmi_fn = env['PATH_CLIMO_CONT'] + '/' + os.path.basename( env['SSMI_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['SSMI_PATH'], new_ssmi_fn) # DIFF CASE new_ssmi_fn = env['PATH_CLIMO_DIFF'] + '/' + os.path.basename( env['SSMI_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['SSMI_PATH'], new_ssmi_fn) # ASPeCt #CONT CASE new_ssmi_fn = env['PATH_CLIMO_CONT'] + '/' + os.path.basename( env['ASPeCt_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['ASPeCt_PATH'], new_ssmi_fn) #DIFF CASE new_ssmi_fn = env['PATH_CLIMO_DIFF'] + '/' + os.path.basename( env['ASPeCt_PATH']) rc1, err_msg1 = cesmEnvLib.checkFile(new_ssmi_fn, 'read') if not rc1: os.symlink(env['ASPeCt_PATH'], new_ssmi_fn) scomm.sync() return env
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ plot_tables = [] plot_set = [ self._expectedPlots_globalAvg, self._expectedPlots_Nino, self._expectedPlots_transportDiags ] # build up the plot_tables array for k in range(len(plot_set)): plot_table = [] plot_tuple_list = plot_set[k] num_plots = len(plot_tuple_list) num_last_row = num_plots % self._columns[k] num_rows = num_plots//self._columns[k] index = 0 for i in range(num_rows): ptuple = [] for j in range(self._columns[k]): label, plot_file = plot_tuple_list[index] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: ptuple.append(('{0}'.format(label), '{0} - Error'.format(plot_file))) else: ptuple.append(('{0}'.format(label), plot_file)) index += 1 plot_table.append(ptuple) # pad out the last row if num_last_row > 0: ptuple = [] for i in range(num_last_row): label, plot_file = plot_tuple_list[index] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: ptuple.append(('{0}'.format(label), '{0} - Error'.format(plot_file))) else: ptuple.append(('{0}'.format(label), plot_file)) index += 1 for i in range(self._columns[k] - num_last_row): ptuple.append(('','')) plot_table.append(ptuple) plot_tables.append(('{0}'.format(self._expectedPlotHeaders[k]),plot_table, self._columns[k])) # create a jinja2 template object templateLoader = jinja2.FileSystemLoader( searchpath=templatePath ) templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False ) template = templateEnv.get_template( self._template_file ) # add the template variables templateVars = { 'title' : self._name, 'plot_tables' : plot_tables, 'imgFormat' : imgFormat } # render the html template using the plot tables self._html = template.render( templateVars ) return self._html
def test_invalidFile(self): """ test to see if invalid file raises an error """ self.assertRaises(cesmEnvLib.checkFile("blah", "write"))
def run_diagnostics(self, env, scomm): """ call the necessary plotting routines to generate diagnostics plots """ super(modelVsObs, self).run_diagnostics(env, scomm) scomm.sync() # setup some global variables requested_plot_sets = list() local_requested_plots = list() local_html_list = list() # all the plot module XML vars start with 'set_' for key, value in env.iteritems(): if ("set_" in key and value == 'True'): requested_plot_sets.append(key) scomm.sync() if scomm.is_manager(): print('DEBUG model_vs_obs requested_plot_sets = {0}'.format(requested_plot_sets)) # partition requested plots to all tasks # first, create plotting classes and get the number of plots each will created requested_plots = {} set_sizes = {} #plots_weights = [] for plot_set in requested_plot_sets: requested_plots.update(lnd_diags_plot_factory.LandDiagnosticPlotFactory(plot_set,env)) #for plot_id,plot_class in requested_plots.iteritems(): # if hasattr(plot_class, 'weight'): # factor = plot_class.weight # else: # factor = 1 # plots_weights.append((plot_id,len(plot_class.expectedPlots)*factor)) # partition based on the number of plots each set will create #local_plot_list = scomm.partition(plots_weights, func=partition.WeightBalanced(), involved=True) local_plot_list = scomm.partition(requested_plots.keys(), func=partition.EqualStride(), involved=True) scomm.sync() timer = timekeeper.TimeKeeper() # loop over local plot lists - set env and then run plotting script timer.start(str(scomm.get_rank())+"ncl total time on task") for plot_set in local_plot_list: timer.start(str(scomm.get_rank())+plot_set) plot_class = requested_plots[plot_set] print('DEBUG model vs. obs - Checking prerequisite for {0} on rank {1}'.format(plot_class.__class__.__name__, scomm.get_rank())) plot_class.check_prerequisites(env) # Stringify the env dictionary for name, value in plot_class.plot_env.iteritems(): plot_class.plot_env[name] = str(value) # call script to create plots for script in plot_class.ncl_scripts: print('DEBUG model vs. obs - Generating plots for {0} on rank {1} with script {2}'.format(plot_class.__class__.__name__, scomm.get_rank(),script)) diagUtilsLib.generate_ncl_plots(plot_class.plot_env, script) timer.stop(str(scomm.get_rank())+plot_set) timer.stop(str(scomm.get_rank())+"ncl total time on task") scomm.sync() print(timer.get_all_times()) #w = 0 #for p in plots_weights: # if p[0] in local_plot_list: # w = w + p[1] #print(str(scomm.get_rank())+' weight:'+str(w)) # set html files if scomm.is_manager(): # Create web dirs and move images/tables to that web dir for n in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'): web_dir = env['WKDIR'] set_dir = web_dir + '/set' + n # Create the plot set web directory if not os.path.exists(set_dir): os.makedirs(set_dir) # Copy plots into the correct web dir glob_string = web_dir+'/set'+n+'_*' imgs = glob.glob(glob_string) if len(imgs) > 0: for img in imgs: new_fn = set_dir + '/' + os.path.basename(img) os.rename(img,new_fn) env['WEB_DIR'] = web_dir shutil.copy2(env['POSTPROCESS_PATH']+'/lnd_diag/inputFiles/'+env['VAR_MASTER'],web_dir+'/variable_master.ncl') web_script_1 = env['POSTPROCESS_PATH']+'/lnd_diag/shared/lnd_create_webpage.pl' web_script_2 = env['POSTPROCESS_PATH']+'/lnd_diag/shared/lnd_lookupTable.pl' print('Creating Web Pages') # set the shell environment cesmEnvLib.setXmlEnv(env) # lnd_create_webpage.pl call rc1, err_msg = cesmEnvLib.checkFile(web_script_1,'read') if rc1: try: subprocess.check_call(web_script_1) except subprocess.CalledProcessError as e: print('WARNING: {0} error executing command:'.format(web_script_1)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print('{0}... {1} file not found'.format(err_msg,web_script_1)) # lnd_lookupTable.pl call rc2, err_msg = cesmEnvLib.checkFile(web_script_2,'read') if rc2: try: subprocess.check_call(web_script_2) except subprocess.CalledProcessError as e: print('WARNING: {0} error executing command:'.format(web_script_2)) print(' {0}'.format(e.cmd)) print(' rc = {0}'.format(e.returncode)) else: print('{0}... {1} file not found'.format(err_msg,web_script_2)) # move all the plots to the diag_path with the years appended to the path endYr = (int(env['clim_first_yr_1']) + int(env['clim_num_yrs_1'])) - 1 diag_path = '{0}/diag/{1}-obs.{2}_{3}'.format(env['OUTPUT_ROOT_PATH'], env['caseid_1'], env['clim_first_yr_1'], str(endYr)) move_files = True try: os.makedirs(diag_path) except OSError as exception: if exception.errno != errno.EEXIST: err_msg = 'ERROR: {0} problem accessing directory {1}'.format(self.__class__.__name__, diag_path) raise OSError(err_msg) move_files = False elif env['CLEANUP_FILES'].lower() in ['t','true']: # delete all the files in the diag_path directory for root, dirs, files in os.walk(diag_path): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) elif env['CLEANUP_FILES'].lower() in ['f','false']: print('WARNING: {0} exists and is not empty and LNDDIAG_CLEANUP_FILES = False. Leaving new diagnostics files in {1}'.format(diag_path, web_dir)) diag_path = web_dir move_files = False print('DEBUG: model vs. obs web_dir = {0}'.format(web_dir)) print('DEBUG: model vs. obs diag_path = {0}'.format(diag_path)) # move the files to the new diag_path if move_files: try: print('DEBUG: model_vs_obs renaming web files') os.rename(web_dir, diag_path) except OSError as e: print ('WARNING: Error renaming %s to %s: %s' % (web_dir, diag_path, e)) diag_path = web_dir # setup the LNDDIAG_WEBDIR_MODEL_VS_OBS output file env_file = '{0}/env_diags_lnd.xml'.format(env['PP_CASE_PATH']) key = 'LNDDIAG_WEBDIR_{0}'.format(self._name) value = diag_path web_file = '{0}/web_dirs/{1}.{2}'.format(env['PP_CASE_PATH'], key, datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')) try: diagUtilsLib.write_web_file(web_file, 'lnd', key, value) except: print('WARNING lnd model_vs_obs unable to write {0}={1} to {2}'.format(key, value, web_file)) print('*******************************************************************************') print('Successfully completed generating land diagnostics model vs. observation plots') print('*******************************************************************************')
def write_env_file(self, envDict, configFile, tmplFile, envFile, comp, new_entry_id, new_entry_value): """create the XML file in the CASEROOT Arguments: envDict (dictionary) - environment dictionary configFile (string) - full path to input config_[definition].xml file tmplFile (string) - template file for output [file].xml envFile (string) - output [file].xml name new_entry_id (string) - ID of value to be updated new_entry_value (string) - updated value """ orig_env = dict() group_list = list() sorted_group_list = list() # check all the files are read and/or write rc, err_msg = cesmEnvLib.checkFile(configFile, 'read') if not rc: raise OSError(err_msg) rc, err_msg = cesmEnvLib.checkFile('{0}/Templates/{1}'.format(envDict['POSTPROCESS_PATH'], tmplFile), 'read') if not rc: raise OSError(err_msg) rc, err_msg = cesmEnvLib.checkFile(envFile, 'read') if not rc: raise OSError(err_msg) rc, err_msg = cesmEnvLib.checkFile(envFile, 'write') if not rc: raise OSError(err_msg) # read in the original env file orig_xml_tree = etree.ElementTree() orig_xml_tree.parse(envFile) for orig_entry_tag in orig_xml_tree.findall('entry'): orig_env[orig_entry_tag.get('id')] = orig_entry_tag.get('value') # load the original env file into a dict without expanding the values # read in the configFile xml_tree = etree.ElementTree() xml_tree.parse(configFile) for group_tag in xml_tree.findall('./groups/group'): xml_list = list() group_dict = dict() name = group_tag.get('name') order = int(group_tag.find('order').text) comment = group_tag.find('comment').text for entry_tag in group_tag.findall('entry'): if entry_tag.get('id') == new_entry_id: xml_list.append(XmlEntry(new_entry_id, new_entry_value, entry_tag.get('desc'))) else: xml_list.append(XmlEntry(entry_tag.get('id'), orig_env[entry_tag.get('id')], entry_tag.get('desc'))) group_dict = {'order': order, 'name': name, 'comment': comment, 'xml_list': xml_list} group_list.append(group_dict) sorted_group_list = sorted(group_list, key=itemgetter('order')) # add an additional entry for machine dependent input # observation files root path xml_list = list() if len(comp) > 0 and 'con' not in comp: if 'DIAGOBSROOT' in new_entry_id: xml_obs = XmlEntry('{0}DIAG_DIAGOBSROOT'.format(comp.upper()), new_entry_value, 'Machine dependent diagnostics observation files root path') else: xml_obs = XmlEntry('{0}DIAG_DIAGOBSROOT'.format(comp.upper()), orig_env['{0}DIAG_DIAGOBSROOT'.format(comp.upper())], 'Machine dependent diagnostics observation files root path') xml_list.append(xml_obs) # the xml_list now contains a list of XmlEntry classes that # can be written to the template templateLoader = jinja2.FileSystemLoader( searchpath='{0}/Templates'.format(envDict['POSTPROCESS_PATH'])) templateEnv = jinja2.Environment(loader=templateLoader) template = templateEnv.get_template(tmplFile) templateVars = { 'xml_list' : xml_list, 'group_list' : sorted_group_list } # render the template env_tmpl = template.render(templateVars) # write the env_file with open(envFile, 'w') as xml: xml.write(env_tmpl)
def test_defaultFile(self): """ test to see if a known file can be read """ found = cesmEnvLib.checkFile("./test_checkXMLvar.py", "read") self.assertTrue(found)
def setup_workdir(self, env, t, scomm): """This method sets up the unique working directory for a given diagnostic type """ # create the working directory first before calling the base class prerequisites avg_BEGYR = (int(env['ENDYR_'+t]) - int(env['YRS_TO_AVG'])) + 1 subdir = '{0}.{1}-{2}/{3}.{4}_{5}'.format(env['CASE_TO_'+t], avg_BEGYR, env['ENDYR_'+t],self._name.lower(), str(avg_BEGYR), env['ENDYR_'+t]) workdir = '{0}/{1}'.format(env['PATH_CLIMO_'+t], subdir) env['CLIMO_'+t] = workdir if (scomm.is_manager()): if env['CLEANUP_FILES'].lower() in ['t','true'] and os.path.exists(workdir): shutil.rmtree(workdir) try: os.makedirs(workdir) except OSError as exception: if exception.errno != errno.EEXIST: err_msg = 'ERROR: {0} problem accessing the working directory {1}'.format(self.__class__.__name__, workdir) raise OSError(err_msg) # create symbolic links between the old and new workdir and get the real names of the files old_workdir = env['PATH_CLIMO_'+t]+'/'+env['CASE_TO_'+t]+'.'+str(avg_BEGYR)+'-'+env['ENDYR_'+t] env['PATH_CLIMO_'+t] = workdir if (scomm.is_manager()): print('calling name = {0}'.format(self._name)) print('subdir = {0}'.format(subdir)) print('workdir = {0}'.format(workdir)) print('old_workdir = {0}'.format(old_workdir)) # Add links to the new wkdir that use the expected file names (existing climos have dates, the NCL do not like dates) if (scomm.is_manager()): climo_files = glob.glob(old_workdir+'/*.nc') for climo_file in climo_files: if ('ice_vol_' in climo_file): new_fn = workdir + '/' + os.path.basename(climo_file) ## if (scomm.is_manager()): ## print('1. ice_diags_bc.py: new_fn = {0}'.format(new_fn)) else: name_split = climo_file.split('.') # Split on '.' if ('-' in name_split[-3]): fn = str.join('.',name_split[:len(name_split)-3] + name_split[-2:]) #Piece together w/o the date, but still has old path if (scomm.is_manager()): print('1. fn = {0}'.format(fn)) path_split = fn.split('/') # Remove the path if ('jfm_climo' in path_split[-1]): s = 'jfm' elif ('amj_climo' in path_split[-1]): s = 'amj' elif ('jas_climo' in path_split[-1]): s = 'jas' elif ('ond_climo' in path_split[-1]): s = 'ond' elif ('fm_climo' in path_split[-1]): s = 'fm' elif ('on_climo' in path_split[-1]): s = 'on' elif ('_ANN_climo' in path_split[-1]): s = 'ann' else: s = None if s is not None: new_fn = workdir + '/' + s + '_avg_' + str(avg_BEGYR).zfill(4) + '-' + env['ENDYR_'+t].zfill(4) + '.nc' ## if (scomm.is_manager()): ## print('2. ice_diags_bc.py s = {0}: new_fn = {1}'.format(s, new_fn)) else: new_fn = workdir + '/' +path_split[-1] # Take file name and add it to new path ## if (scomm.is_manager()): ## print('3. ice_diags_bc.py: new_fn = {0}'.format(new_fn)) else: new_fn = workdir + '/' + os.path.basename(climo_file) ## if (scomm.is_manager()): ## print('4. ice_diags_bc.py: new_fn = {0}'.format(new_fn)) rc1, err_msg1 = cesmEnvLib.checkFile(new_fn, 'read') if not rc1: os.symlink(climo_file,new_fn) else: print('ice_diags_bc.py: unable to create link to file {0}'.format(new_fn)) return env
avgFileBaseName = '{0}/{1}.pop.h'.format(tavgdir, case) case_prefix = '{0}.pop.h'.format(case) # prepend the years with 0's for some of the plotting routines zstart_year = start_year.zfill(padding) zstop_year = stop_year.zfill(padding) # check if this is a control run or not cntrl = '' if control: cntrl = 'cntrl.' # link to the mavg file for the za and plotting routines mavgFileBase = 'mavg.{0}.{1}.{2}nc'.format(zstart_year, zstop_year, cntrl) avgFile = '{0}/mavg.{1}-{2}.nc'.format(tavgdir, zstart_year, zstop_year) rc, err_msg = cesmEnvLib.checkFile(avgFile, 'read') if rc: zmavgFile = '{0}/mavg.{1}.{2}.{3}nc'.format(workdir, zstart_year, zstop_year, cntrl) mavgFile = '{0}/mavg.{1}.{2}.{3}nc'.format(workdir, start_year, stop_year, cntrl) rc1, err_msg1 = cesmEnvLib.checkFile(zmavgFile, 'read') if not rc1: os.symlink(avgFile, zmavgFile) rc1, err_msg1 = cesmEnvLib.checkFile(mavgFile, 'read') if not rc1: os.symlink(avgFile, mavgFile) else: raise OSError(err_msg)
def test_defaultFile(self): """ test to see if a known file can be read """ found = cesmEnvLib.checkFile("./test_checkXMLvar.py", "read") self.assertTrue(found)
def check_prerequisites(self, env): """ check prerequisites """ print(' Checking prerequisites for : {0}'.format(self.__class__.__name__)) self._name = '{0}_{1}'.format(self._name, env['CNTRLCASE']) super(modelVsControl, self).check_prerequisites(env) # clean out the old working plot files from the workdir if env['CLEANUP_FILES'].upper() in ['T','TRUE']: cesmEnvLib.purge(env['WORKDIR'], '.*\.pro') cesmEnvLib.purge(env['WORKDIR'], '.*\.gif') cesmEnvLib.purge(env['WORKDIR'], '.*\.dat') cesmEnvLib.purge(env['WORKDIR'], '.*\.ps') cesmEnvLib.purge(env['WORKDIR'], '.*\.png') cesmEnvLib.purge(env['WORKDIR'], '.*\.html') # create the plot.dat file in the workdir used by all NCL plotting routines diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS']) # setup prerequisites for the model # setup the gridfile based on the resolution and levels os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION']) if env['VERTICAL'] == '42': os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION']) if env['VERTICAL'] == '62': os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION']) # check if gridfile exists and is readable rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read') if not rc: raise OSError(err_msg) env['GRIDFILE'] = os.environ['gridfile'] # check the resolution and decide if some plot modules should be turned off if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3') : env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE' env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE' # create the global zonal average file used by most of the plotting classes print(' model vs. control - calling create_za') diagUtilsLib.create_za( env['WORKDIR'], env['TAVGFILE'], env['GRIDFILE'], env['TOOLPATH'], env) # setup prerequisites for the model control control = True env['CNTRL_MAVGFILE'], env['CNTRL_TAVGFILE'] = diagUtilsLib.createLinks(env['CNTRLYEAR0'], env['CNTRLYEAR1'], env['CNTRLTAVGDIR'], env['WORKDIR'], env['CNTRLCASE'], control) env['CNTRLFILE'] = env['CNTRL_TAVGFILE'] # setup the gridfile based on the resolution and vertical levels os.environ['gridfilecntrl'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(env['DIAGOBSROOT'],env['CNTRLRESOLUTION']) if env['VERTICAL'] == '42': os.environ['gridfilecntrl'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['CNTRLRESOLUTION']) if env['VERTICAL'] == '62': os.environ['gridfilecntrl'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['CNTRLRESOLUTION']) # check if gridfile exists and is readable rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfilecntrl'], 'read') if not rc: raise OSError(err_msg) env['GRIDFILECNTRL'] = os.environ['gridfilecntrl'] # check the resolution and decide if some plot modules should be turned off if (env['CNTRLRESOLUTION'] == 'tx0.1v2' or env['CNTRLRESOLUTION'] == 'tx0.1v3') : env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE' env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE' # create the control global zonal average file used by most of the plotting classes print(' model vs. control - calling create_za for control run') diagUtilsLib.create_za( env['WORKDIR'], env['CNTRL_TAVGFILE'], env['GRIDFILECNTRL'], env['TOOLPATH'], env) return env
def test_invalidFile(self): """ test to see if invalid file raises an error """ self.assertRaises(cesmEnvLib.checkFile("blah", "write"))
def check_prerequisites(self, env, scomm): """ check prerequisites """ print(" Checking prerequisites for : {0}".format(self.__class__.__name__)) super(modelVsModel, self).check_prerequisites(env, scomm) # Set some new env variables env['DIAG_CODE'] = env['NCLPATH'] env['test_path_diag'] = '{0}/{1}-{2}/'.format(env['test_path_diag'], env['test_casename'], env['cntl_casename']) env['WKDIR'] = env['test_path_diag'] env['WORKDIR'] = env['test_path_diag'] if scomm.is_manager(): if not os.path.exists(env['WKDIR']): os.makedirs(env['WKDIR']) env['COMPARE'] = env['CNTL'] env['PLOTTYPE'] = env['p_type'] env['COLORTYPE'] = env['c_type'] env['MG_MICRO'] = env['microph'] env['TIMESTAMP'] = env['time_stamp'] env['TICKMARKS'] = env['tick_marks'] if env['custom_names'] == 'True': env['CASENAMES'] = 'True' env['CASE1'] = env['test_name'] env['CASE2'] = env['cntl_name'] else: env['CASENAMES'] = 'False' env['CASE1'] = 'null' env['CASE2'] = 'null' env['CNTL_PLOTVARS'] = 'null' env['test_in'] = env['test_path_climo'] + env['test_casename'] env['test_out'] = env['test_path_climo'] + env['test_casename'] env['cntl_in'] = env['cntl_path_climo'] + env['cntl_casename'] env['cntl_out'] = env['cntl_path_climo'] + env['cntl_casename'] env['seas'] = [] if env['plot_ANN_climo'] == 'True': env['seas'].append('ANN') if env['plot_DJF_climo'] == 'True': env['seas'].append('DJF') if env['plot_MAM_climo'] == 'True': env['seas'].append('MAM') if env['plot_JJA_climo'] == 'True': env['seas'].append('JJA') if env['plot_SON_climo'] == 'True': env['seas'].append('SON') # Significance vars if env['significance'] == 'True': env['SIG_PLOT'] = 'True' env['SIG_LVL'] = env['sig_lvl'] else: env['SIG_PLOT'] = 'False' env['SIG_LVL'] = 'null' # Set the rgb file name env['RGB_FILE'] = env['DIAG_HOME']+'/rgb/amwg.rgb' if 'default' in env['color_bar']: env['RGB_FILE'] = env['DIAG_HOME']+'/rgb/amwg.rgb' elif 'blue_red' in env['color_bar']: env['RGB_FILE'] = env['DIAG_HOME']+'/rgb/bluered.rgb' elif 'blue_yellow_red' in env['color_bar']: env['RGB_FILE'] = env['DIAG_HOME']+'/rgb/blueyellowred.rgb' # Set Paleo variables env['PALEO'] = env['paleo'] if env['PALEO'] == 'True': env['DIFF_PLOTS'] = env['diff_plots'] # Test coastlines env['MODELFILE'] = env['test_path_climo']+'/'+env['test_casename']+'_ANN_climo.nc' env['LANDMASK'] = env['land_mask1'] env['PALEODATA'] = env['test_path_climo']+'/'+env['test_casename'] if scomm.is_manager(): rc, err_msg = cesmEnvLib.checkFile(env['PALEODATA'],'read') if not rc: diagUtilsLib.generate_ncl_plots(env,'plot_paleo.ncl') env['PALEOCOAST1'] = env['PALEODATA'] # Cntl coastlines env['MODELFILE'] = env['cntl_path_climo']+'/'+env['cntl_casename']+'_ANN_climo.nc' env['LANDMASK'] = env['land_mask2'] env['PALEODATA'] = env['cntl_path_climo']+'/'+env['cntl_casename'] if scomm.is_manager(): rc, err_msg = cesmEnvLib.checkFile(env['PALEODATA'],'read') if not rc: diagUtilsLib.generate_ncl_plots(env,'plot_paleo.ncl') env['PALEOCOAST2'] = env['PALEODATA'] else: env['PALEOCOAST1'] = 'null' env['PALEOCOAST2'] = 'null' env['DIFF_PLOTS'] = 'False' env['USE_WACCM_LEVS'] = 'False' scomm.sync() return env
def check_prerequisites(self, env): """list and check specific prequisites for this plot. """ super(ZonalAverage3dFields, self).check_prerequisites(env) print(' Checking prerequisites for : {0}'.format( self.__class__.__name__)) # check that temperature observation TOBSFILE exists and is readable rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(env['TSOBSDIR'], env['TOBSFILE']), 'read') if not rc: raise OSError(err_msg) # check the TEMP observation zonal average file zaTempFile = '{0}/za_{1}'.format(env['WORKDIR'], env['TOBSFILE']) rc, err_msg = cesmEnvLib.checkFile(zaTempFile, 'read') if not rc: # change to the workdir cwd = os.getcwd() os.chdir(env['WORKDIR']) # copy the TOBSFILE to a tmp file shutil.copy2('{0}/{1}'.format(env['TSOBSDIR'], env['TOBSFILE']), '{0}_tmp'.format(env['TOBSFILE'])) # call ncks to extract the UAREA variable try: subprocess.check_output([ 'ncks', '-A', '-v', 'UAREA', env['TAVGFILE'], '{0}_tmp'.format(env['TOBSFILE']) ], env=env) except CalledProcessError as e: print('ERROR: {0} call to ncks failed with error:'.format( self.name())) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # call zaCommand zaCommand = '{0}/za'.format(env['TOOLPATH']) rc, err_msg = cesmEnvLib.checkFile(zaCommand, 'exec') if not rc: raise OSError(err_msg) try: subprocess.check_output([ zaCommand, '-O', '-time_const', '-grid_file', env['GRIDFILE'], '{0}_tmp'.format(env['TOBSFILE']) ], env=env) except CalledProcessError as e: print('ERROR: {0} call to {1} failed with error:'.format( self.name(), zaCommand)) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # rename the tmp file os.renames('za_{0}_tmp'.format(env['TOBSFILE']), 'za_{0}'.format(env['TOBSFILE'])) os.chdir(cwd) # check that salinity observation SOBSFILE exists and is readable rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(env['TSOBSDIR'], env['SOBSFILE']), 'read') if not rc: raise OSError(err_msg) # check the SALT observation zonal average file zaSaltFile = '{0}/za_{1}'.format(env['WORKDIR'], env['SOBSFILE']) rc, err_msg = cesmEnvLib.checkFile(zaSaltFile, 'read') if not rc: # change to the workdir cwd = os.getcwd() os.chdir(env['WORKDIR']) # copy the TOBSFILE to a tmp file shutil.copy2('{0}/{1}'.format(env['TSOBSDIR'], env['SOBSFILE']), '{0}_tmp'.format(env['SOBSFILE'])) # call ncks to extract the UAREA variable try: subprocess.check_output([ 'ncks', '-A', '-v', 'UAREA', env['TAVGFILE'], '{0}_tmp'.format(env['SOBSFILE']) ], env=env) except CalledProcessError as e: print('ERROR: {0} call to ncks failed with error:'.format( self.name())) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # call zaCommand zaCommand = '{0}/za'.format(env['TOOLPATH']) rc, err_msg = cesmEnvLib.checkFile(zaCommand, 'exec') if not rc: raise OSError(err_msg) try: subprocess.check_output([ zaCommand, '-O', '-time_const', '-grid_file', env['GRIDFILE'], '{0}_tmp'.format(env['SOBSFILE']) ], env=env) except CalledProcessError as e: print('ERROR: {0} call to {1} failed with error:'.format( self.name(), zaCommand)) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # rename the tmp file os.renames('za_{0}_tmp'.format(env['SOBSFILE']), 'za_{0}'.format(env['SOBSFILE'])) os.chdir(cwd)
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ plot_table = [] expectedPlots_za = [] num_cols = 7 num_plots = len(self._expectedPlots) num_last_row = num_plots % num_cols num_rows = num_plots//num_cols index = 0 for i in range(num_rows): plot_list = [] for j in range(num_cols): plot_file = self._expectedPlots[index] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: plot_list.append('{0} - Error'.format(plot_file)) else: plot_list.append(plot_file) index += 1 plot_table.append(plot_list) # pad out the last row if num_last_row > 0: plot_list = [] for i in range(num_last_row): plot_file = self._expectedPlots[index] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: plot_list.append('{0} - Error'.format(plot_file)) else: plot_list.append(plot_file) index += 1 for i in range(num_cols - num_last_row): plot_list.append('') plot_table.append(plot_list) # work on the global zonal average 2d flux plots plot_za_table = [] # check which set of plots to link to for SHF and SFWF za totals img_file = 'SHF_TOTAL_GLO_za.{0}'.format(imgFormat) rc1, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir,img_file), 'read' ) img_file = 'SFWF_TOTAL_GLO_za.{0}'.format(imgFormat) rc2, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir,img_file), 'read' ) expectedPlots_za = self._expectedPlots_za if not rc1 and not rc2: expectedPlots_za = self._expectedPlots_za_new num_plots = len(expectedPlots_za) num_last_row = num_plots % num_cols num_rows = num_plots//num_cols index = 0 for i in range(num_rows): plot_list = [] for j in range(num_cols): plot_file = expectedPlots_za[index] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: plot_list.append('{0} - Error'.format(plot_file)) else: plot_list.append(plot_file) index += 1 plot_za_table.append(plot_list) # pad out the last row if num_last_row > 0: plot_list = [] for i in range(num_last_row): plot_file = expectedPlots_za[index] img_file = '{0}.{1}'.format(plot_file, imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' ) if not rc: plot_list.append('{0} - Error'.format(plot_file)) else: plot_list.append(plot_file) index += 1 for i in range(num_cols - num_last_row): plot_list.append('') plot_za_table.append(plot_list) # create a jinja2 template object templateLoader = jinja2.FileSystemLoader( searchpath=templatePath ) templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False ) template = templateEnv.get_template( self._template_file ) # add the template variables templateVars = { 'title' : self._name, 'cols' : num_cols, 'plot_table' : plot_table, 'plot_za_table' : plot_za_table, 'imgFormat' : imgFormat } # render the html template using the plot tables self._html = template.render( templateVars ) return self._shortname, self._html
def check_prerequisites(self, env): """list and check specific prequisites for this plot. """ super(ZonalAverage3dFields, self).check_prerequisites(env) print(' Checking prerequisites for : {0}'.format(self.__class__.__name__)) # check that temperature observation TOBSFILE exists and is readable rc, err_msg = cesmEnvLib.checkFile('{0}/{1}'.format(env['TSOBSDIR'], env['TOBSFILE']), 'read') if not rc: raise OSError(err_msg) # check the TEMP observation zonal average file zaTempFile = '{0}/za_{1}'.format( env['WORKDIR'], env['TOBSFILE'] ) rc, err_msg = cesmEnvLib.checkFile(zaTempFile, 'read') if not rc: # change to the workdir cwd = os.getcwd() os.chdir(env['WORKDIR']) # copy the TOBSFILE to a tmp file shutil.copy2('{0}/{1}'.format(env['TSOBSDIR'], env['TOBSFILE']), '{0}_tmp'.format(env['TOBSFILE'])) # call ncks to extract the UAREA variable za_args = ['ncks','-A','-v','UAREA',env['TAVGFILE'],'{0}_tmp'.format(env['TOBSFILE']) ] if env['netcdf_format'] in ['netcdfLarge']: za_args = ['ncks','-6','-A','-v','UAREA',env['TAVGFILE'],'{0}_tmp'.format(env['TOBSFILE']) ] try: ## subprocess.check_output( ['ncks','-A','-v','UAREA',env['TAVGFILE'],'{0}_tmp'.format(env['TOBSFILE']) ], env=env) subprocess.check_output(za_args, env=env) except subprocess.CalledProcessError as e: print('ERROR: {0} call to ncks failed with error:'.format(self.name())) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # call zaCommand zaCommand = '{0}/za'.format(env['TOOLPATH']) rc, err_msg = cesmEnvLib.checkFile(zaCommand, 'exec') if not rc: raise OSError(err_msg) try: subprocess.check_output( [zaCommand,'-O','-time_const','-grid_file',env['GRIDFILE'],'{0}_tmp'.format(env['TOBSFILE']) ], env=env) except subprocess.CalledProcessError as e: print('ERROR: {0} call to {1} failed with error:'.format(self.name(), zaCommand)) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # rename the tmp file os.renames('za_{0}_tmp'.format(env['TOBSFILE']), 'za_{0}'.format(env['TOBSFILE'])) os.chdir(cwd) # check that salinity observation SOBSFILE exists and is readable rc, err_msg = cesmEnvLib.checkFile('{0}/{1}'.format(env['TSOBSDIR'], env['SOBSFILE']), 'read') if not rc: raise OSError(err_msg) # check the SALT observation zonal average file zaSaltFile = '{0}/za_{1}'.format( env['WORKDIR'], env['SOBSFILE'] ) rc, err_msg = cesmEnvLib.checkFile(zaSaltFile, 'read') if not rc: # change to the workdir cwd = os.getcwd() os.chdir(env['WORKDIR']) # copy the TOBSFILE to a tmp file shutil.copy2('{0}/{1}'.format(env['TSOBSDIR'], env['SOBSFILE']), '{0}_tmp'.format(env['SOBSFILE'])) # call ncks to extract the UAREA variable za_args = ['ncks','-A','-v','UAREA',env['TAVGFILE'],'{0}_tmp'.format(env['SOBSFILE']) ] if env['netcdf_format'] in ['netcdfLarge']: za_args = ['ncks','-6','-A','-v','UAREA',env['TAVGFILE'],'{0}_tmp'.format(env['SOBSFILE']) ] try: ## subprocess.check_output( ['ncks','-A','-v','UAREA',env['TAVGFILE'],'{0}_tmp'.format(env['SOBSFILE']) ], env=env) subprocess.check_output(za_args, env=env) except subprocess.CalledProcessError as e: print('ERROR: {0} call to ncks failed with error:'.format(self.name())) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # call zaCommand zaCommand = '{0}/za'.format(env['TOOLPATH']) rc, err_msg = cesmEnvLib.checkFile(zaCommand, 'exec') if not rc: raise OSError(err_msg) try: subprocess.check_output( [zaCommand,'-O','-time_const','-grid_file',env['GRIDFILE'],'{0}_tmp'.format(env['SOBSFILE']) ], env=env) except subprocess.CalledProcessError as e: print('ERROR: {0} call to {1} failed with error:'.format(self.name(), zaCommand)) print(' {0} - {1}'.format(e.cmd, e.output)) sys.exit(1) # rename the tmp file os.renames('za_{0}_tmp'.format(env['SOBSFILE']), 'za_{0}'.format(env['SOBSFILE'])) os.chdir(cwd)
def _create_html(self, workdir, templatePath, imgFormat): """Creates and renders html that is returned to the calling wrapper """ labels = [ '143°E', '156°E', '165°E', '180°E', '190°E', '205°E', '220°E', '235°E', '250°E', '265°E' ] depths = [ '143', '156', '165', '180', '190', '205', '220', '235', '250', '265' ] num_cols = 5 long_plot_table = [] lat_plot_table = [] # generate the longitude table plot_tuple_list = [] plot_list = eval('self._expectedPlots_Longitude_Depth') for j in range(len(plot_list)): img_file = '{0}.{1}'.format(plot_list[j], imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read') if not rc: plot_tuple = (j, self._longitude_linkNames[j], '{0} - Error'.format(img_file)) else: plot_tuple = (j, self._longitude_linkNames[j], img_file) plot_tuple_list.append(plot_tuple) print('DEBUG... plot_tuple_list = {0}'.format(plot_tuple_list)) long_plot_table.append(plot_tuple_list) # generate the latitude table for i in range(len(labels)): plot_tuple_list = [] plot_tuple = (0, 'label', '{0}:'.format(labels[i])) plot_tuple_list.append(plot_tuple) plot_list = eval('self._expectedPlots_Latitude_Depth_{0}'.format( depths[i])) for j in range(num_cols - 1): img_file = '{0}.{1}'.format(plot_list[j], imgFormat) rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read') if not rc: plot_tuple = (j + 1, self._latitude_linkNames[j], '{0} - Error'.format(img_file)) else: plot_tuple = (j + 1, self._latitude_linkNames[j], img_file) plot_tuple_list.append(plot_tuple) print('DEBUG... plot_tuple_list[{0}] = {1}'.format( i, plot_tuple_list)) lat_plot_table.append(plot_tuple_list) # create a jinja2 template object templateLoader = jinja2.FileSystemLoader(searchpath=templatePath) templateEnv = jinja2.Environment(loader=templateLoader, keep_trailing_newline=False) template = templateEnv.get_template(self._template_file) # add the template variables templateVars = { 'title': self._name, 'long_plot_table': long_plot_table, 'lat_plot_table': lat_plot_table, 'num_rows': len(labels), 'cols': num_cols } # render the html template using the plot tables self._html = template.render(templateVars) return self._shortname, self._html