Ejemplo n.º 1
0
    def run_diagnostics(self, env, scomm):
        """ call the necessary plotting routines to generate diagnostics plots
        """
        super(modelVsModel, self).run_diagnostics(env, scomm)
        scomm.sync()

        # setup some global variables
        requested_plot_sets = list()
        local_requested_plots = list()
        local_html_list = list()

        # all the plot module XML vars start with 'set_'  need to strip that off
        for key, value in env.iteritems():
            if ("wset_" in key
                    and (value == 'True' or env['all_waccm_sets'] == 'True')):
                requested_plot_sets.append(key)
            elif ("cset_" in key
                  and (value == 'True' or env['all_chem_sets'] == 'True')):
                requested_plot_sets.append(key)
            elif ("set_" in key
                  and (value == 'True' or env['all_sets'] == 'True')):
                if ("wset_" not in key and "cset_" not in key):
                    requested_plot_sets.append(key)

        scomm.sync()

        # partition requested plots to all tasks
        # first, create plotting classes and get the number of plots each will created
        requested_plots = {}
        set_sizes = {}
        plots_weights = []
        for plot_set in requested_plot_sets:
            requested_plots.update(
                atm_diags_plot_factory.atmosphereDiagnosticPlotFactory(
                    plot_set, env))
        for plot_id, plot_class in requested_plots.iteritems():
            if hasattr(plot_class, 'weight'):
                factor = plot_class.weight
            else:
                factor = 1
            plots_weights.append(
                (plot_id, len(plot_class.expectedPlots) * factor))
        # partition based on the number of plots each set will create
        local_plot_list = scomm.partition(plots_weights,
                                          func=partition.WeightBalanced(),
                                          involved=True)

        timer = timekeeper.TimeKeeper()
        # loop over local plot lists - set env and then run plotting script
        #for plot_id,plot_class in local_plot_list.interitems():
        timer.start(str(scomm.get_rank()) + "ncl total time on task")
        for plot_set in local_plot_list:
            timer.start(str(scomm.get_rank()) + plot_set)
            plot_class = requested_plots[plot_set]
            # set all env variables (global and particular to this plot call
            plot_class.check_prerequisites(env)
            # Stringify the env dictionary
            for name, value in plot_class.plot_env.iteritems():
                plot_class.plot_env[name] = str(value)
            # call script to create plots
            for script in plot_class.ncl_scripts:
                diagUtilsLib.generate_ncl_plots(plot_class.plot_env, script)
                plot_class.plot_env['NCDF_MODE'] = 'write'
                plot_class.plot_env['VAR_MODE'] = 'write'
            timer.stop(str(scomm.get_rank()) + plot_set)
        timer.stop(str(scomm.get_rank()) + "ncl total time on task")
        scomm.sync()
        print(timer.get_all_times())
        w = 0
        for p in plots_weights:
            if p[0] in local_plot_list:
                w = w + p[1]
        print(str(scomm.get_rank()) + ' weight:' + str(w))

        # set html files
        if scomm.is_manager():
            env['HTML_HOME'] = env['DIAG_HOME'] + '/html/model1-model2/'
            # Get web dir name and create it if it does not exist
            #web_dir = '{0}/{1}-{2}'.format(env['test_path_diag'], env['test_casename'], env['cntl_casename'])
            web_dir = env['test_path_diag']
            #if not os.path.exists(web_dir):
            #    os.makedirs(web_dir)
            # Copy over some files needed by web pages
            if not os.path.exists(web_dir + '/images'):
                os.mkdir(web_dir + '/images')
            diag_imgs = glob.glob(env['DIAG_HOME'] + '/html/images/*')
            for img in diag_imgs:
                shutil.copy(img, web_dir + '/images/')

            # Create set dirs, copy plots to set dir, and create html file for set
            requested_plot_sets.append(
                'sets')  # Add 'sets' to create top level html files
            for plot_set in requested_plot_sets:
                if 'set_5' == plot_set or 'set_6' == plot_set:
                    glob_set = plot_set.replace('_', '')
                    plot_set = 'set5_6'
                elif 'set_1' == plot_set:
                    glob_set = 'table_'
                    plot_set = plot_set.replace('_', '')
                elif 'sets' == plot_set:
                    set_dir = web_dir + '/'
                else:
                    plot_set = plot_set.replace('_', '')
                    glob_set = plot_set
                if 'sets' not in plot_set:  #'sets' is top level, don't create directory or copy images files
                    set_dir = web_dir + '/' + plot_set
                    # Create the plot set web directory
                    if not os.path.exists(set_dir):
                        os.makedirs(set_dir)
                    # Copy plots into the correct web dir
                    glob_string = env['test_path_diag'] + '/' + glob_set + '*.*'
                    imgs = glob.glob(glob_string)
                    if imgs > 0:
                        for img in imgs:
                            new_fn = set_dir + '/' + os.path.basename(img)
                            os.rename(img, new_fn)
                # Copy/Process html files
                if 'sets' in plot_set:
                    orig_html = env['HTML_HOME'] + '/' + plot_set
                else:
                    orig_html = env[
                        'HTML_HOME'] + '/' + plot_set + '/' + plot_set
                create_atm_html.create_plotset_html(orig_html, set_dir,
                                                    plot_set, env,
                                                    'model_vs_model')

            # Remove any plotvar netcdf files that exists in the diag directory
            if env['save_ncdfs'] == 'False':
                cesmEnvLib.purge(env['test_path_diag'], '.*\.nc')
                cesmEnvLib.purge(env['test_path_diag'], '/station_ids')

            # move all the plots to the diag_path with the years appended to the path
            endYr1 = (int(env['test_first_yr']) + int(env['test_nyrs'])) - 1
            endYr2 = (int(env['cntl_first_yr']) + int(env['cntl_nyrs'])) - 1
            diag_path = '{0}/diag/{1}.{2}_{3}-{4}.{5}_{6}'.format(
                env['OUTPUT_ROOT_PATH'], env['test_casename'],
                env['test_first_yr'], str(endYr1), env['cntl_casename'],
                env['cntl_first_yr'], str(endYr2))
            move_files = True

            try:
                os.makedirs(diag_path)
            except OSError as exception:
                if exception.errno != errno.EEXIST:
                    err_msg = 'ERROR: {0} problem accessing directory {1}'.format(
                        self.__class__.__name__, diag_path)
                    raise OSError(err_msg)
                    move_files = False

                elif env['CLEANUP_FILES'].lower() in ['t', 'true']:
                    # delete all the files in the diag_path directory
                    for root, dirs, files in os.walk('diag_path'):
                        for f in files:
                            os.unlink(os.path.join(root, f))
                        for d in dirs:
                            shutil.rmtree(os.path.join(root, d))

                elif env['CLEANUP_FILES'].lower() in ['f', 'false']:
                    print(
                        'WARNING: {0} exists and is not empty and ATMDIAG_CLEANUP_FILES = False. Leaving new diagnostics files in {1}'
                        .format(diag_path, web_dir))
                    diag_path = web_dir
                    move_files = False

            # move the files to the new diag_path
            if move_files:
                try:
                    print('DEBUG: model_vs_model renaming web files')
                    os.rename(web_dir, diag_path)
                except OSError as e:
                    print('WARNING: Error renaming %s to %s: %s' %
                          (web_dir, diag_path, e))
                    diag_path = web_dir

            # setup the unique ATMDIAG_WEBDIR_MODEL_VS_OBS output file
            env_file = '{0}/env_diags_atm.xml'.format(env['PP_CASE_PATH'])
            key = 'ATMDIAG_WEBDIR_{0}'.format(self._name)
            value = diag_path
            ##web_file = '{0}/web_dirs/{1}.{2}-{3}'.format(env['PP_CASE_PATH'], key, scomm.get_size(), scomm.get_rank() )
            web_file = '{0}/web_dirs/{1}.{2}'.format(
                env['PP_CASE_PATH'], key,
                datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S'))
            try:
                diagUtilsLib.write_web_file(web_file, 'atm', key, value)
            except:
                print(
                    'WARNING atm model_vs_model unable to write {0}={1} to {2}'
                    .format(key, value, web_file))

            print(
                '*******************************************************************************'
            )
            print(
                'Successfully completed generating atmosphere diagnostics model vs. model plots'
            )
            print(
                '*******************************************************************************'
            )
Ejemplo n.º 2
0
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print('  Checking prerequisites for : {0}'.format(
            self.__class__.__name__))
        self._name = '{0}_{1}'.format(self._name, env['CNTRLCASE'])
        super(modelVsControl, self).check_prerequisites(env)

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T', 'TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'],
                                     env['DEPTHS'])

        # setup prerequisites for the model
        # setup the gridfile based on the resolution and levels
        os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(
            env['DIAGOBSROOT'], env['RESOLUTION'])
        if env['VERTICAL'] == '42':
            os.environ[
                'gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(
                    env['DIAGOBSROOT'], env['RESOLUTION'])

        if env['VERTICAL'] == '62':
            os.environ[
                'gridfile'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(
                    env['DIAGOBSROOT'], env['RESOLUTION'])

        # check if gridfile exists and is readable
        rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read')
        if not rc:
            raise OSError(err_msg)
        env['GRIDFILE'] = os.environ['gridfile']

        # check the resolution and decide if some plot modules should be turned off
        if env['RESOLUTION'] == 'tx0.1v2':
            env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE'
            env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE'

        # create the global zonal average file used by most of the plotting classes
        print('   model vs. control - calling create_za')
        diagUtilsLib.create_za(env['WORKDIR'], env['TAVGFILE'],
                               env['GRIDFILE'], env['TOOLPATH'], env)

        # setup prerequisites for the model control
        control = True
        env['CNTRL_MAVGFILE'], env[
            'CNTRL_TAVGFILE'] = diagUtilsLib.createLinks(
                env['CNTRLYEAR0'], env['CNTRLYEAR1'], env['CNTRLTAVGDIR'],
                env['WORKDIR'], env['CNTRLCASE'], control)
        env['CNTRLFILE'] = env['CNTRL_TAVGFILE']

        # setup the gridfile based on the resolution and vertical levels
        os.environ[
            'gridfilecntrl'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(
                env['DIAGOBSROOT'], env['CNTRLRESOLUTION'])
        if env['VERTICAL'] == '42':
            os.environ[
                'gridfilecntrl'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(
                    env['DIAGOBSROOT'], env['CNTRLRESOLUTION'])

        if env['VERTICAL'] == '62':
            os.environ[
                'gridfilecntrl'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(
                    env['DIAGOBSROOT'], env['CNTRLRESOLUTION'])

        # check if gridfile exists and is readable
        rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfilecntrl'], 'read')
        if not rc:
            raise OSError(err_msg)
        env['GRIDFILECNTRL'] = os.environ['gridfilecntrl']

        # check the resolution and decide if some plot modules should be turned off
        if env['CNTRLRESOLUTION'] == 'tx0.1v2':
            env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE'
            env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE'

        # create the control global zonal average file used by most of the plotting classes
        print('    model vs. control - calling create_za for control run')
        diagUtilsLib.create_za(env['WORKDIR'], env['CNTRL_TAVGFILE'],
                               env['GRIDFILECNTRL'], env['TOOLPATH'], env)

        return env
Ejemplo n.º 3
0
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print("  Checking prerequisites for : {0}".format(self.__class__.__name__))
        super(modelTimeseries, self).check_prerequisites(env)
        
        # chdir into the  working directory
        os.chdir(env['WORKDIR'])

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T','TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.log\.*')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pop\.d.\.*')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS'])

        # set the OBSROOT 
        env['OBSROOT'] = env['OBSROOTPATH']

        # check the resolution and decide if some plot modules should be turned off
        if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3') :
            env['MTS_PM_MOCANN'] = os.environ['PM_MOCANN'] = 'FALSE'
            env['MTS_PM_MOCMON'] = os.environ['PM_MOCMON'] = 'FALSE'

        # check if cpl log file path is defined
        if len(env['CPLLOGFILEPATH']) == 0:
            # print a message that the cpl log path isn't defined and turn off CPLLOG plot module
            print('model timeseries - CPLLOGFILEPATH is undefined. Disabling MTS_PM_CPLLOG module')
            env['MTS_PM_CPLLOG'] = os.environ['PM_CPLLOG'] = 'FALSE'

        else:
            # check that cpl log files exist and gunzip them if necessary
            initcplLogs = cplLogs = list()
            initCplLogs = glob.glob('{0}/cpl.log.*'.format(env['CPLLOGFILEPATH']))
            if len(initCplLogs) > 0:
                for cplLog in initCplLogs:
                    logFileList = cplLog.split('/')
                    cplLogFile = logFileList[-1]
                    shutil.copy2(cplLog, '{0}/{1}'.format(env['WORKDIR'],cplLogFile))

                    # gunzip the cplLog in the workdir
                    if cplLogFile.lower().find('.gz') != -1:
                        cplLog_gunzip = cplLogFile[:-3]
                        inFile = gzip.open('{0}/{1}'.format(env['WORKDIR'],cplLogFile), 'rb')
                        outFile = open('{0}/{1}'.format(env['WORKDIR'],cplLog_gunzip), 'wb')
                        outFile.write( inFile.read() )
                        inFile.close()
                        outFile.close()

                        # append the gunzipped cpl log file to the cplLogs list
                        cplLogs.append('{0}/{1}'.format(env['WORKDIR'],cplLog_gunzip))

                        # remove the original .gz file in the workdir
                        os.remove('{0}/{1}'.format(env['WORKDIR'],cplLogFile))
                    else:
                        # append the original gunzipped cpl log file to the cplLogs list
                        cplLogs.append('{0}/{1}'.format(env['WORKDIR'],cplLogFile))

                # parse the cpllog depending on the coupler version - default to 7b
                print('model_timeseries: setting up heat and freshwater awk calls with cplLogs = {0}'.format(cplLogs))
                heatFile = 'cplheatbudget'
                freshWaterFile = 'cplfwbudget'
                cplVersion = 'cpl7b'
                env['ntailht'] = os.environ['ntailht'] = '22'
                env['ntailfw'] = os.environ['ntailfw'] = '16'

                if '7' == env['TS_CPL'] or '6' == env['TS_CPL']:
                    cplVersion = 'cpl{0}'.format(env['TS_CPL'])
                    env['ntailht'] = os.environ['ntailht'] = '21'
                    env['ntailfw'] = os.environ['ntailfw'] = '16'

                # expand the cpl.log* into a list
                cplLogs.sort()
                cplLogsString = ' '.join(cplLogs)

                # define the awk scripts to parse the cpllog file
                heatPath = '{0}/process_{1}_logfiles_heat.awk'.format(env['TOOLPATH'], cplVersion)
                heatPath = os.path.abspath(heatPath)

                fwPath = '{0}/process_{1}_logfiles_fw.awk'.format(env['TOOLPATH'], cplVersion)
                fwPath = os.path.abspath(fwPath)
        
                heatCmd = '{0} y0={1} y1={2} {3}'.format(heatPath, env['TSERIES_YEAR0'], env['TSERIES_YEAR1'], cplLogsString).split(' ')
                freshWaterCmd = '{0} y0={1} y1={2} {3}'.format(fwPath, env['TSERIES_YEAR0'], env['TSERIES_YEAR1'], cplLogsString).split(' ')

                # run the awk scripts to generate the .txt files from the cpllogs
                cmdList = [ (heatCmd, heatFile, env['ntailht']), (freshWaterCmd, freshWaterFile, env['ntailfw']) ]
                for cmd in cmdList:
                    outFile = '{0}.txt'.format(cmd[1])
                    with open (outFile, 'w') as results:
                        try:
                            subprocess.check_call(cmd[0], stdout=results, env=env)
                        except subprocess.CalledProcessError as e:
                            print('WARNING: {0} time series error executing command:'.format(self._name))
                            print('    {0}'.format(e.cmd))
                            print('    rc = {0}'.format(e.returncode))

                        rc, err_msg = cesmEnvLib.checkFile(outFile, 'read')
                        if rc:
                            # get the tail of the .txt file and redirect to a .asc file for the web
                            ascFile = '{0}.asc'.format(cmd[1])
                            with open (ascFile, 'w') as results:
                                try:
                                    # TODO - read the .txt in and write just the lines needed to avoid subprocess call
                                    tailCmd = 'tail -{0} {1}.txt'.format(cmd[2], cmd[1]).split(' ')
                                    subprocess.check_call(tailCmd, stdout=results, env=env)
                                except subprocess.CalledProcessError as e:
                                    print('WARNING: {0} time series error executing command:'.format(self._name))
                                    print('    {0}'.format(e.cmd))
                                    print('    rc = {0}'.format(e.returncode))

            else:
                print('model timeseries - Coupler logs do not exist. Disabling MTS_PM_CPLLOG module')
                env['MTS_PM_CPLLOG'] = os.environ['PM_CPLLOG'] = 'FALSE'

        # check if ocn log files exist
        if len(env['OCNLOGFILEPATH']) == 0:
            # print a message that the ocn log path isn't defined and turn off POPLOG plot module
            print('model timeseries - OCNLOGFILEPATH is undefined. Disabling MTS_PM_YPOPLOG module')
            env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
        
        else:
            # check that ocn log files exist and gunzip them if necessary
            initOcnLogs = ocnLogs = list()
            initOcnLogs = glob.glob('{0}/ocn.log.*'.format(env['OCNLOGFILEPATH']))
            if len(initOcnLogs) > 0:
                for ocnLog in initOcnLogs:
                    logFileList = ocnLog.split('/')
                    ocnLogFile = logFileList[-1]
                    shutil.copy2(ocnLog, '{0}/{1}'.format(env['WORKDIR'],ocnLogFile))

                    # gunzip the ocnLog in the workdir
                    if ocnLogFile.lower().find('.gz') != -1:
                        ocnLog_gunzip = ocnLogFile[:-3]
                        inFile = gzip.open('{0}/{1}'.format(env['WORKDIR'],ocnLogFile), 'rb')
                        outFile = open('{0}/{1}'.format(env['WORKDIR'],ocnLog_gunzip), 'wb')
                        outFile.write( inFile.read() )
                        inFile.close()
                        outFile.close()

                        # append the gunzipped ocn log file to the ocnLogs list
                        ocnLogs.append('{0}/{1}'.format(env['WORKDIR'],ocnLog_gunzip))

                        # remove the original .gz file in the workdir
                        os.remove('{0}/{1}'.format(env['WORKDIR'],ocnLogFile))

                    else:
                        # append the original gunzipped ocn log file to the ocnLogs list
                        ocnLogs.append('{0}/{1}'.format(env['WORKDIR'],ocnLogFile))

                # expand the ocn.log* into a list
                ocnLogs.sort()
                ocnLogsString = ' '.join(ocnLogs)

                # define the awk script to parse the ocn log files
                globalDiagAwkPath = '{0}/process_pop2_logfiles.globaldiag.awk'.format(env['TOOLPATH'])
                globalDiagAwkCmd = '{0} {1}'.format(globalDiagAwkPath, ocnLogsString).split(' ')
                print('model_timeseries: globalDiagAwkCmd = {0}'.format(globalDiagAwkCmd))

                # run the awk scripts to generate the .txt files from the ocn logs
                try:
                    subprocess.check_call(globalDiagAwkCmd)
                except subprocess.CalledProcessError as e:
                    print('WARNING: {0} time series error executing command:'.format(self._name))
                    print('    {0}'.format(e.cmd))
                    print('    rc = {0}'.format(e.returncode))
            else:
                print('model timeseries - Ocean logs do not exist. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLT modules')
                env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
                env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE'

        # check if dt files exist
        if len(env['DTFILEPATH']) == 0:
            # print a message that the dt file path isn't defined and turn off POPLOG plot module
            print('model timeseries - DTFILEPATH is undefined. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLT modules')
            env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
            env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE'
        
        else:
            # check that dt files exist
            dtFiles = list()
            dtFiles = glob.glob('{0}/{1}.pop.dt.*'.format(env['DTFILEPATH'], env['CASE']))
            print('dtFiles = {0}'.format(dtFiles))
            if len(dtFiles) > 0:
                for dtFile in dtFiles:
                    logFileList = dtFile.split('/')
                    dtLogFile = logFileList[-1]
                    shutil.copy2(dtFile, '{0}/{1}'.format(env['WORKDIR'],dtLogFile))

                # expand the *.dt.* into a list
                dtFiles.sort()
                dtFilesString = ' '.join(dtFiles)

                # define the awk script to parse the dt log files
                dtFilesAwkPath = '{0}/process_pop2_dtfiles.awk'.format(env['TOOLPATH'])
                dtFilesAwkCmd = '{0} {1}'.format(dtFilesAwkPath, dtFilesString).split(' ')
                print('model_timeseries: dtFilesAwkCmd = {0}'.format(dtFilesAwkCmd))

                # run the awk scripts to generate the .txt files from the dt log files
                try:
                    subprocess.check_call(dtFilesAwkCmd)
                except subprocess.CalledProcessError as e:
                    print('WARNING: {0} time series error executing command:'.format(self._name))
                    print('    {0}'.format(e.cmd))
                    print('    rc = {0}'.format(e.returncode))
            else:
                print('model_timeseries - ocean dt files do not exist. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLT modules')
                env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
                env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE'

        return env
Ejemplo n.º 4
0
    def run_diagnostics(self, env, scomm):
        """ call the necessary plotting routines to generate diagnostics plots
        """
        super(modelVsModel, self).run_diagnostics(env, scomm)
        scomm.sync()

        # setup some global variables
        requested_plot_sets = list()
        local_requested_plots = list()
        local_html_list = list()

        # all the plot module XML vars start with 'set_'  need to strip that off
        for key, value in env.iteritems():
            if   ("wset_"in key and (value == 'True' or env['all_waccm_sets'] == 'True')):
                requested_plot_sets.append(key)
            elif ("cset_"in key and (value == 'True' or env['all_chem_sets'] == 'True')):
                requested_plot_sets.append(key)
            elif ("set_" in key and (value == 'True' or env['all_sets'] == 'True')):
                if ("wset_" not in key and "cset_" not in key):
                    requested_plot_sets.append(key)
        
        scomm.sync()

        # partition requested plots to all tasks
        # first, create plotting classes and get the number of plots each will created 
        requested_plots = {}
        set_sizes = {}
        plots_weights = []
        for plot_set in requested_plot_sets:
            requested_plots.update(atm_diags_plot_factory.atmosphereDiagnosticPlotFactory(plot_set,env))
        for plot_id,plot_class in requested_plots.iteritems(): 
            if hasattr(plot_class, 'weight'):
                factor = plot_class.weight
            else:
                factor = 1
            plots_weights.append((plot_id,len(plot_class.expectedPlots)*factor))
        # partition based on the number of plots each set will create
        local_plot_list = scomm.partition(plots_weights, func=partition.WeightBalanced(), involved=True)  

        timer = timekeeper.TimeKeeper()
        # loop over local plot lists - set env and then run plotting script         
        #for plot_id,plot_class in local_plot_list.interitems():
        timer.start(str(scomm.get_rank())+"ncl total time on task")
        for plot_set in local_plot_list:
            timer.start(str(scomm.get_rank())+plot_set)
            plot_class = requested_plots[plot_set]
            # set all env variables (global and particular to this plot call
            plot_class.check_prerequisites(env)
            # Stringify the env dictionary
            for name,value in plot_class.plot_env.iteritems():
                plot_class.plot_env[name] = str(value)
            # call script to create plots
            for script in plot_class.ncl_scripts:
                diagUtilsLib.generate_ncl_plots(plot_class.plot_env,script)
                plot_class.plot_env['NCDF_MODE'] = 'write'
                plot_class.plot_env['VAR_MODE'] = 'write'
            timer.stop(str(scomm.get_rank())+plot_set) 
        timer.stop(str(scomm.get_rank())+"ncl total time on task")
        scomm.sync() 
        print(timer.get_all_times())
        w = 0
        for p in plots_weights:
            if p[0] in local_plot_list:
                w = w + p[1]
        print(str(scomm.get_rank())+' weight:'+str(w))

        # set html files
        if scomm.is_manager():
            env['HTML_HOME'] = env['DIAG_HOME']+'/html/model1-model2/'
            # Get web dir name and create it if it does not exist
            #web_dir = '{0}/{1}-{2}'.format(env['test_path_diag'], env['test_casename'], env['cntl_casename'])
            web_dir = env['test_path_diag']
            #if not os.path.exists(web_dir):
            #    os.makedirs(web_dir)
            # Copy over some files needed by web pages
            if not os.path.exists(web_dir+'/images'):
                os.mkdir(web_dir+'/images')
            diag_imgs = glob.glob(env['DIAG_HOME']+'/html/images/*')
            for img in diag_imgs:
                shutil.copy(img,web_dir+'/images/')
          
            # Create set dirs, copy plots to set dir, and create html file for set 
            requested_plot_sets.append('sets') # Add 'sets' to create top level html files
            glob_set = list()
            for plot_set in requested_plot_sets:
                 if 'set_5' == plot_set or 'set_6' == plot_set:
                     glob_set.append(plot_set.replace('_',''))
                     plot_set = 'set5_6'
                 elif 'cset_1' == plot_set:
                     glob_set.append('table_soa')
                     glob_set.append('table_chem')
                     plot_set = plot_set.replace('_','')     
                 elif 'set_1' == plot_set:
                     glob_set.append('table_GLBL')
                     glob_set.append('table_NEXT')
                     glob_set.append('table_SEXT')
                     glob_set.append('table_TROP')
                     plot_set = plot_set.replace('_','')
                 elif 'sets' == plot_set:
                     set_dir = web_dir + '/' 
                 else:
                     glob_set.append(plot_set.replace('_',''))
                     plot_set = plot_set.replace('_','')

                 if 'sets' not in plot_set: #'sets' is top level, don't create directory or copy images files
                     set_dir = web_dir + '/' + plot_set
                     # Create the plot set web directory
                     if not os.path.exists(set_dir):
                         os.makedirs(set_dir) 
                     # Copy plots into the correct web dir
                     for gs in glob_set:
                         glob_string = env['test_path_diag']+'/'+gs+'*.*'
                         imgs = glob.glob(glob_string) 
                         if imgs > 0:
                             for img in imgs:
                                 new_fn = set_dir + '/' + os.path.basename(img)
                                 os.rename(img,new_fn)

                 # Copy/Process html files
                 if 'sets' in plot_set:
                     orig_html = env['HTML_HOME']+'/'+plot_set 
                 else:
                     orig_html = env['HTML_HOME']+'/'+plot_set+'/'+plot_set 
                 create_atm_html.create_plotset_html(orig_html,set_dir,plot_set,env,'model_vs_model')

            # Remove any plotvar netcdf files that exists in the diag directory
            if env['save_ncdfs'] == 'False':
                cesmEnvLib.purge(env['test_path_diag'], '.*\.nc')
                cesmEnvLib.purge(env['test_path_diag'], '/station_ids')
            
            # move all the plots to the diag_path with the years appended to the path
            endYr1 = (int(env['test_first_yr']) + int(env['test_nyrs'])) - 1 
            endYr2 = (int(env['cntl_first_yr']) + int(env['cntl_nyrs'])) - 1 
            diag_path = '{0}/diag/{1}.{2}_{3}-{4}.{5}_{6}'.format(env['OUTPUT_ROOT_PATH'], 
                          env['test_casename'], env['test_first_yr'], str(endYr1),
                          env['cntl_casename'], env['cntl_first_yr'], str(endYr2))
            move_files = True

            try:
                os.makedirs(diag_path)
            except OSError as exception:
                if exception.errno != errno.EEXIST:
                    err_msg = 'ERROR: {0} problem accessing directory {1}'.format(self.__class__.__name__, diag_path)
                    raise OSError(err_msg)
                    move_files = False

                elif env['CLEANUP_FILES'].lower() in ['t','true']:
                    # delete all the files in the diag_path directory
                    for root, dirs, files in os.walk(diag_path):
                        for f in files:
                            os.unlink(os.path.join(root, f))
                        for d in dirs:
                            shutil.rmtree(os.path.join(root, d))

                elif env['CLEANUP_FILES'].lower() in ['f','false']:
                    print('WARNING: {0} exists and is not empty and ATMDIAG_CLEANUP_FILES = False. Leaving new diagnostics files in {1}'.format(diag_path, web_dir))
                    diag_path = web_dir
                    move_files = False

            print('DEBUG: model vs. model web_dir = {0}'.format(web_dir))
            print('DEBUG: model vs. model diag_path = {0}'.format(diag_path))

            # move the files to the new diag_path 
            if move_files:
                try:
                    print('DEBUG: model_vs_model renaming web files')
                    os.rename(web_dir, diag_path)
                except OSError as e:
                    print ('WARNING: Error renaming %s to %s: %s' % (web_dir, diag_path, e))
                    diag_path = web_dir

            # setup the unique ATMDIAG_WEBDIR_MODEL_VS_OBS output file
            env_file = '{0}/env_diags_atm.xml'.format(env['PP_CASE_PATH'])
            key = 'ATMDIAG_WEBDIR_{0}'.format(self._name)
            value = diag_path
            web_file = '{0}/web_dirs/{1}.{2}'.format(env['PP_CASE_PATH'], key, datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S'))
            try:
                diagUtilsLib.write_web_file(web_file, 'atm', key, value)
            except:
                print('WARNING atm model_vs_model unable to write {0}={1} to {2}'.format(key, value, web_file))

            print('*******************************************************************************')
            print('Successfully completed generating atmosphere diagnostics model vs. model plots')
            print('*******************************************************************************')
Ejemplo n.º 5
0
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print("  Checking prerequisites for : {0}".format(self.__class__.__name__))
        super(modelVsObs, self).check_prerequisites(env)

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T','TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS'])

        # setup the gridfile based on the resolution
##        os.environ['gridfile'] = '{0}/tool_lib/zon_avg/grids/{1}_grid_info.nc'.format(env['DIAGROOTPATH'],env['RESOLUTION'])
        os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])
        if env['VERTICAL'] == '42':
##            os.environ['gridfile'] = '{0}/tool_lib/zon_avg/grids/{1}_42lev_grid_info.nc'.format(env['DIAGROOTPATH'],env['RESOLUTION'])
            ## this file doesn't exist! - not sure if this even works now or not
            os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])

        # check if gridfile exists and is readable
        rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read')
        if not rc:
            print('model_vs_obs:  check_prerequisites could not find gridfile = {0}'.format(os.environ['gridfile']))
            raise ocn_diags_bc.PrerequisitesError

        env['GRIDFILE'] = os.environ['gridfile']

        # check the resolution and decide if some plot modules should be turned off
        if env['RESOLUTION'] == 'tx0.1v2' :
            env['MVO_PM_VELISOPZ'] = os.environ['MVO_PM_VELISOPZ'] = 'FALSE'
            env['MVO_PM_KAPPAZ'] = os.environ['MVO_PM_KAPPAZ'] = 'FALSE'

        # create the global zonal average file used by most of the plotting classes
        print('    model vs. obs - calling create_za')
        diagUtilsLib.create_za( env['WORKDIR'], env['TAVGFILE'], env['GRIDFILE'], env['TOOLPATH'], env)

        return env
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print("  Checking prerequisites for : {0}".format(self.__class__.__name__))
        super(modelTimeseries, self).check_prerequisites(env)
        
        # chdir into the  working directory
        os.chdir(env['WORKDIR'])

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T','TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.log\.*')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pop\.d.\.*')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS'])

        # set the OBSROOT 
        env['OBSROOT'] = env['OBSROOTPATH']

        # check the resolution and decide if some plot modules should be turned off
        if env['RESOLUTION'] == 'tx0.1v2' :
            env['MTS_PM_MOCANN'] = os.environ['PM_MOCANN'] = 'FALSE'
            env['MTS_PM_MOCMON'] = os.environ['PM_MOCMON'] = 'FALSE'

        # check if cpl log file path is defined
        if len(env['CPLLOGFILEPATH']) == 0:
            # print a message that the cpl log path isn't defined and turn off CPLLOG plot module
            print('model timeseries - CPLLOGFILEPATH is undefined. Disabling MTS_PM_CPLLOG module')
            env['MTS_PM_CPLLOG'] = os.environ['PM_CPLLOG'] = 'FALSE'

        else:
            # check that cpl log files exist and gunzip them if necessary
            initcplLogs = cplLogs = list()
            initCplLogs = glob.glob('{0}/cpl.log.*'.format(env['CPLLOGFILEPATH']))
            if len(initCplLogs) > 0:
                for cplLog in initCplLogs:
                    logFileList = cplLog.split('/')
                    cplLogFile = logFileList[-1]
                    shutil.copy2(cplLog, '{0}/{1}'.format(env['WORKDIR'],cplLogFile))

                    # gunzip the cplLog in the workdir
                    if cplLogFile.lower().find('.gz') != -1:
                        cplLog_gunzip = cplLogFile[:-3]
                        inFile = gzip.open('{0}/{1}'.format(env['WORKDIR'],cplLogFile), 'rb')
                        outFile = open('{0}/{1}'.format(env['WORKDIR'],cplLog_gunzip), 'wb')
                        outFile.write( inFile.read() )
                        inFile.close()
                        outFile.close()

                        # append the gunzipped cpl log file to the cplLogs list
                        cplLogs.append('{0}/{1}'.format(env['WORKDIR'],cplLog_gunzip))

                        # remove the original .gz file in the workdir
                        os.remove('{0}/{1}'.format(env['WORKDIR'],cplLogFile))
                    else:
                        # append the original gunzipped cpl log file to the cplLogs list
                        cplLogs.append('{0}/{1}'.format(env['WORKDIR'],cplLogFile))

                # parse the cpllog depending on the coupler version - default to 7b
                print('model_timeseries: setting up heat and freshwater awk calls with cplLogs = {0}'.format(cplLogs))
                heatFile = 'cplheatbudget'
                freshWaterFile = 'cplfwbudget'
                cplVersion = 'cpl7b'
                env['ntailht'] = os.environ['ntailht'] = '22'
                env['ntailfw'] = os.environ['ntailfw'] = '16'

                if '7' == env['TS_CPL'] or '6' == env['TS_CPL']:
                    cplVersion = 'cpl{0}'.format(env['TS_CPL'])
                    env['ntailht'] = os.environ['ntailht'] = '21'
                    env['ntailfw'] = os.environ['ntailfw'] = '16'

                # expand the cpl.log* into a list
                cplLogsString = ' '.join(cplLogs)

                # define the awk scripts to parse the cpllog file
                heatPath = '{0}/process_{1}_logfiles_heat.awk'.format(env['TOOLPATH'], cplVersion)
                heatPath = os.path.abspath(heatPath)

                fwPath = '{0}/process_{1}_logfiles_fw.awk'.format(env['TOOLPATH'], cplVersion)
                fwPath = os.path.abspath(fwPath)
        
                heatCmd = '{0} y0={1} y1={2} {3}'.format(heatPath, env['TSERIES_YEAR0'], env['TSERIES_YEAR1'], cplLogsString).split(' ')
                freshWaterCmd = '{0} y0={1} y1={2} {3}'.format(fwPath, env['TSERIES_YEAR0'], env['TSERIES_YEAR1'], cplLogsString).split(' ')

                # run the awk scripts to generate the .txt files from the cpllogs
                cmdList = [ (heatCmd, heatFile, env['ntailht']), (freshWaterCmd, freshWaterFile, env['ntailfw']) ]
                for cmd in cmdList:
                    outFile = '{0}.txt'.format(cmd[1])
                    with open (outFile, 'w') as results:
                        try:
                            subprocess.check_call(cmd[0], stdout=results, env=env)
                        except subprocess.CalledProcessError as e:
                            print('WARNING: {0} time series error executing command:'.format(self._name))
                            print('    {0}'.format(e.cmd))
                            print('    rc = {0}'.format(e.returncode))

                        rc, err_msg = cesmEnvLib.checkFile(outFile, 'read')
                        if rc:
                            # get the tail of the .txt file and redirect to a .asc file for the web
                            ascFile = '{0}.asc'.format(cmd[1])
                            with open (ascFile, 'w') as results:
                                try:
                                    # TODO - read the .txt in and write just the lines needed to avoid subprocess call
                                    tailCmd = 'tail -{0} {1}.txt'.format(cmd[2], cmd[1]).split(' ')
                                    subprocess.check_call(tailCmd, stdout=results, env=env)
                                except subprocess.CalledProcessError as e:
                                    print('WARNING: {0} time series error executing command:'.format(self._name))
                                    print('    {0}'.format(e.cmd))
                                    print('    rc = {0}'.format(e.returncode))

            else:
                print('model timeseries - Coupler logs do not exist. Disabling MTS_PM_CPLLOG module')
                env['MTS_PM_CPLLOG'] = os.environ['PM_CPLLOG'] = 'FALSE'


        # check if ocn log files exist
        if len(env['OCNLOGFILEPATH']) == 0:
            # print a message that the ocn log path isn't defined and turn off POPLOG plot module
            print('model timeseries - OCNLOGFILEPATH is undefined. Disabling MTS_PM_YPOPLOG module')
            env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
        
        else:
            # check that ocn log files exist and gunzip them if necessary
            initOcnLogs = ocnLogs = list()
            initOcnLogs = glob.glob('{0}/ocn.log.*'.format(env['OCNLOGFILEPATH']))
            if len(initOcnLogs) > 0:
                for ocnLog in initOcnLogs:
                    logFileList = ocnLog.split('/')
                    ocnLogFile = logFileList[-1]
                    shutil.copy2(ocnLog, '{0}/{1}'.format(env['WORKDIR'],ocnLogFile))

                    # gunzip the ocnLog in the workdir
                    if ocnLogFile.lower().find('.gz') != -1:
                        ocnLog_gunzip = ocnLogFile[:-3]
                        inFile = gzip.open('{0}/{1}'.format(env['WORKDIR'],ocnLogFile), 'rb')
                        outFile = open('{0}/{1}'.format(env['WORKDIR'],ocnLog_gunzip), 'wb')
                        outFile.write( inFile.read() )
                        inFile.close()
                        outFile.close()

                        # append the gunzipped ocn log file to the ocnLogs list
                        ocnLogs.append('{0}/{1}'.format(env['WORKDIR'],ocnLog_gunzip))

                        # remove the original .gz file in the workdir
                        os.remove('{0}/{1}'.format(env['WORKDIR'],ocnLogFile))

                    else:
                        # append the original gunzipped ocn log file to the ocnLogs list
                        ocnLogs.append('{0}/{1}'.format(env['WORKDIR'],ocnLogFile))

                # expand the ocn.log* into a list
                ocnLogs.sort()
                ocnLogsString = ' '.join(ocnLogs)

                # define the awk script to parse the ocn log files
                globalDiagAwkPath = '{0}/process_pop2_logfiles.globaldiag.awk'.format(env['TOOLPATH'])
                globalDiagAwkCmd = '{0} {1}'.format(globalDiagAwkPath, ocnLogsString).split(' ')
                print('model_timeseries: globalDiagAwkCmd = {0}'.format(globalDiagAwkCmd))

                # run the awk scripts to generate the .txt files from the ocn logs
                try:
                    subprocess.check_call(globalDiagAwkCmd)
                except subprocess.CalledProcessError as e:
                    print('WARNING: {0} time series error executing command:'.format(self._name))
                    print('    {0}'.format(e.cmd))
                    print('    rc = {0}'.format(e.returncode))
            else:
                print('model timeseries - Ocean logs do not exist. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLTmodules')
                env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
                env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE'

        # check if dt files exist
        if len(env['DTFILEPATH']) == 0:
            # print a message that the dt file path isn't defined and turn off POPLOG plot module
            print('model timeseries - DTFILEPATH is undefined. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLTmodules')
            env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
            env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE'
        
        else:
            # check that dt files exist
            dtFiles = list()
            dtFiles = glob.glob('{0}/{1}.pop.dt.*'.format(env['DTFILEPATH'], env['CASE']))
            print('dtFiles = {0}'.format(dtFiles))
            if len(dtFiles) > 0:
                for dtFile in dtFiles:
                    logFileList = dtFile.split('/')
                    dtLogFile = logFileList[-1]
                    shutil.copy2(dtFile, '{0}/{1}'.format(env['WORKDIR'],dtLogFile))

                # expand the *.dt.* into a list
                dtFiles.sort()
                dtFilesString = ' '.join(dtFiles)

                # define the awk script to parse the dt log files
                dtFilesAwkPath = '{0}/process_pop2_dtfiles.awk'.format(env['TOOLPATH'])
                dtFilesAwkCmd = '{0} {1}'.format(dtFilesAwkPath, dtFilesString).split(' ')
                print('model_timeseries: dtFilesAwkCmd = {0}'.format(dtFilesAwkCmd))

                # run the awk scripts to generate the .txt files from the dt log files
                try:
                    subprocess.check_call(dtFilesAwkCmd)
                except subprocess.CalledProcessError as e:
                    print('WARNING: {0} time series error executing command:'.format(self._name))
                    print('    {0}'.format(e.cmd))
                    print('    rc = {0}'.format(e.returncode))
            else:
                print('model_timeseries - ocean dt files do not exist. Disabling MTS_PM_YPOPLOG and MTS_PM_ENSOWVLT modules')
                env['MTS_PM_YPOPLOG'] = os.environ['PM_YPOPLOG'] = 'FALSE'
                env['MTS_PM_ENSOWVLT'] = os.environ['PM_ENSOWVLT'] = 'FALSE'

        return env
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print("  Checking prerequisites for : {0}".format(
            self.__class__.__name__))
        super(modelVsObsEcosys, self).check_prerequisites(env)

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T', 'TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'],
                                     env['DEPTHS'])

        # read in the ecosys_vars.txt file and create the corresponding link files to the climatology files
        try:
            ecosys_vars_file = env['ECOSYSVARSFILE']
            f = open(ecosys_vars_file)
            ecosys_vars = f.read().split()
            f.close()
        except IOError as e:
            print('ERROR: unable to open {0} error {1} : {2}'.format(
                ecosys_vars_file, e.errno, e.strerror))
        except ValueError:
            print('ERROR: unable to split {0} into separate variable names'.
                  format(ecosys_vars_file))
        except:
            print('ERROR: unexpected error in {0}'.format(self._name))
            raise

        # loop over the ecosys_vars list and create the links to the mavg file
        sourceFile = os.path.join(
            env['WORKDIR'],
            'mavg.{0:04d}.{1:04d}.nc'.format(int(env['YEAR0']),
                                             int(env['YEAR1'])))
        for var in ecosys_vars:
            linkFile = os.path.join(
                env['WORKDIR'], '{0}.{1}.clim.{2:04d}-{3:04d}.nc'.format(
                    env['CASE'], var, int(env['YEAR0']), int(env['YEAR1'])))
            try:
                os.symlink(sourceFile, linkFile)
            except OSError as e:
                print(
                    'ERROR: unable to create symbolic link {0} to {1} error {2} : {3}'
                    .format(linkFile, sourceFile, e.errno, e.strerror))

        # create the POPDIAG and PME environment variables
        env['POPDIAGPY'] = env['POPDIAGPY2'] = env['POPDIAG'] = os.environ[
            'POPDIAG'] = 'TRUE'
        env['PME'] = os.environ['PME'] = '1'
        env['mappdir'] = env['ECODATADIR'] + '/mapping'

        # create the plot_depths.dat file
        fh = open('{0}/plot_depths.dat'.format(env['WORKDIR']), 'w')
        fh.write('{0}\n'.format(env['DEPTHS']))
        fh.close()

        return env
Ejemplo n.º 8
0
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print('  Checking prerequisites for : {0}'.format(self.__class__.__name__))
        self._name = '{0}_{1}'.format(self._name, env['CNTRLCASE'])
        super(modelVsControl, self).check_prerequisites(env)

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T','TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS'])

        # setup prerequisites for the model
        # setup the gridfile based on the resolution and levels
        os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])
        if env['VERTICAL'] == '42':
            os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])

        if env['VERTICAL'] == '62':
            os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])

        # check if gridfile exists and is readable
        rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read')
        if not rc:
            raise OSError(err_msg)
        env['GRIDFILE'] = os.environ['gridfile']

        # check the resolution and decide if some plot modules should be turned off
        if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3') :
            env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE'
            env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE'

        # create the global zonal average file used by most of the plotting classes
        print('   model vs. control - calling create_za')
        diagUtilsLib.create_za( env['WORKDIR'], env['TAVGFILE'], env['GRIDFILE'], env['TOOLPATH'], env)

        # setup prerequisites for the model control
        control = True
        env['CNTRL_MAVGFILE'], env['CNTRL_TAVGFILE'] = diagUtilsLib.createLinks(env['CNTRLYEAR0'], env['CNTRLYEAR1'], env['CNTRLTAVGDIR'], env['WORKDIR'], env['CNTRLCASE'], control)
        env['CNTRLFILE'] = env['CNTRL_TAVGFILE']

        # setup the gridfile based on the resolution and vertical levels
        os.environ['gridfilecntrl'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(env['DIAGOBSROOT'],env['CNTRLRESOLUTION'])
        if env['VERTICAL'] == '42':
            os.environ['gridfilecntrl'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['CNTRLRESOLUTION'])

        if env['VERTICAL'] == '62':
            os.environ['gridfilecntrl'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['CNTRLRESOLUTION'])

        # check if gridfile exists and is readable
        rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfilecntrl'], 'read')
        if not rc:
            raise OSError(err_msg)
        env['GRIDFILECNTRL'] = os.environ['gridfilecntrl']

        # check the resolution and decide if some plot modules should be turned off
        if (env['CNTRLRESOLUTION'] == 'tx0.1v2' or env['CNTRLRESOLUTION'] == 'tx0.1v3') :
            env['MVC_PM_VELISOPZ'] = os.environ['MVC_PM_VELISOPZ'] = 'FALSE'
            env['MVC_PM_KAPPAZ'] = os.environ['MVC_PM_KAPPAZ'] = 'FALSE'

        # create the control global zonal average file used by most of the plotting classes
        print('    model vs. control - calling create_za for control run')
        diagUtilsLib.create_za( env['WORKDIR'], env['CNTRL_TAVGFILE'], env['GRIDFILECNTRL'], env['TOOLPATH'], env)

        return env
Ejemplo n.º 9
0
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print("  Checking prerequisites for : {0}".format(self.__class__.__name__))
        super(modelVsObs, self).check_prerequisites(env)

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T','TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS'])

        # setup the gridfile based on the resolution and vertical levels
        os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])
        if env['VERTICAL'] == '42':
            os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_42lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])

        if env['VERTICAL'] == '62':
            os.environ['gridfile'] = '{0}/omwg/za_grids/{1}_62lev_grid_info.nc'.format(env['DIAGOBSROOT'],env['RESOLUTION'])

        # check if gridfile exists and is readable
        rc, err_msg = cesmEnvLib.checkFile(os.environ['gridfile'], 'read')
        if not rc:
            print('model_vs_obs:  check_prerequisites could not find gridfile = {0}'.format(os.environ['gridfile']))
            raise ocn_diags_bc.PrerequisitesError

        env['GRIDFILE'] = os.environ['gridfile']

        # check the resolution and decide if some plot modules should be turned off
        if (env['RESOLUTION'] == 'tx0.1v2' or env['RESOLUTION'] == 'tx0.1v3'):
            env['MVO_PM_VELISOPZ'] = os.environ['MVO_PM_VELISOPZ'] = 'FALSE'
            env['MVO_PM_KAPPAZ'] = os.environ['MVO_PM_KAPPAZ'] = 'FALSE'

        # create the global zonal average file used by most of the plotting classes
        print('    model vs. obs - calling create_za')
        diagUtilsLib.create_za( env['WORKDIR'], env['TAVGFILE'], env['GRIDFILE'], env['TOOLPATH'], env)

        return env
Ejemplo n.º 10
0
    def check_prerequisites(self, env):
        """ check prerequisites
        """
        print("  Checking prerequisites for : {0}".format(self.__class__.__name__))
        super(modelVsObsEcosys, self).check_prerequisites(env)

        # clean out the old working plot files from the workdir
        if env['CLEANUP_FILES'].upper() in ['T','TRUE']:
            cesmEnvLib.purge(env['WORKDIR'], '.*\.pro')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.gif')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.dat')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.ps')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.png')
            cesmEnvLib.purge(env['WORKDIR'], '.*\.html')

        # create the plot.dat file in the workdir used by all NCL plotting routines
        diagUtilsLib.create_plot_dat(env['WORKDIR'], env['XYRANGE'], env['DEPTHS'])

        # read in the ecosys_vars.txt file and create the corresponding link files to the climatology files
        try:
            ecosys_vars_file = env['ECOSYSVARSFILE']
            f = open(ecosys_vars_file)
            ecosys_vars = f.read().split()
            f.close()
        except IOError as e:
            print ('ERROR: unable to open {0} error {1} : {2}'.format(ecosys_vars_file, e.errno, e.strerror))
        except ValueError:
            print ('ERROR: unable to split {0} into separate variable names'.format(ecosys_vars_file))
        except:
            print ('ERROR: unexpected error in {0}'.format(self._name))
            raise
        
        # loop over the ecosys_vars list and create the links to the mavg file
        sourceFile = os.path.join(env['WORKDIR'],'mavg.{0:04d}.{1:04d}.nc'.format(int(env['YEAR0']),int(env['YEAR1'])))
        for var in ecosys_vars:
            linkFile = os.path.join(env['WORKDIR'],'{0}.{1}.clim.{2:04d}-{3:04d}.nc'.format(env['CASE'],var,int(env['YEAR0']),int(env['YEAR1'])))
            try:
                os.symlink(sourceFile, linkFile)
            except OSError as e:
                print ('ERROR: unable to create symbolic link {0} to {1} error {2} : {3}'.format(linkFile, sourceFile, e.errno, e.strerror))

        # create the POPDIAG and PME environment variables
        env['POPDIAGPY'] = env['POPDIAGPY2'] = env['POPDIAG'] = os.environ['POPDIAG'] = 'TRUE'
        env['PME'] = os.environ['PME'] = '1'
        env['mappdir'] = env['ECODATADIR']+'/mapping'

        # create the plot_depths.dat file
        fh = open('{0}/plot_depths.dat'.format(env['WORKDIR']),'w')
        fh.write('{0}\n'.format(env['DEPTHS']))
        fh.close()
        
        return env