예제 #1
0
def do_avg(infile, inpath, variable, nodata, outfile):
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(3)

    # note that this version will erase data whereever a nodata is found in the series
    avg=None
    nodatamask = None
    for ifile in infile:
        fname = os.path.join(inpath, ifile)
        if not os.path.exists(fname): messageOnExit('file {0} not found on path {1}. Exit(100).'.format(ifile, path), 100)
        thisfile = cdms2.open(fname, 'r')
        
        if avg is None:
            avg = numpy.array(thisfile[variable][:])
            nodatamask = avg >= nodata
        else:
            avg = avg + numpy.array(thisfile[variable][:])
        thisfile.close()

    avg = avg/len(infile)
    if nodatamask.any():
        avg[nodatamask] = nodata
        
    if os.path.exists(outfile): os.remove(outfile)
    outfh = cdms2.open(outfile, 'w')
    outvar=cdms2.createVariable(avg, typecode='f', id=variable, fill_value=nodata )
    outfh.write(outvar)
    outfh.close()
예제 #2
0
    def execute(self): 
        import cdms2, vcs
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)
        start_time = time.time()
        dataIn=self.loadData()[0]
        location = self.loadDomain()
        cdms2keyargs = self.domain2cdms(location)
        url = dataIn["url"]
        id = dataIn["id"]
        var_cache_id =  ":".join( [url,id] )
        dataset = self.loadFileFromURL( url )
        logging.debug( " $$$ Data Request: '%s', '%s' ", var_cache_id, str( cdms2keyargs ) )
        variable = dataset[ id ]

        read_start_time = time.time()
        result_variable = variable(**cdms2keyargs)
        result_data = result_variable.squeeze()[...]
        time_axis = result_variable.getTime()
        read_end_time = time.time()

        x = vcs.init()
        bf = x.createboxfill('new')
        x.plot( result_data, bf, 'default', variable=result_variable, bg=1 )
        x.gif(  OutputPath + '/plot.gif' )

        result_obj = {}

        result_obj['url'] = OutputDir + '/plot.gif'
        result_json = json.dumps( result_obj )
        self.result.setValue( result_json )
        final_end_time = time.time()
        logging.debug( " $$$ Execution time: %f (with init: %f) sec", (final_end_time-start_time), (final_end_time-self.init_time) )
예제 #3
0
def do_transform(infile, outfile, template):
    # for netcdf3:
    cdms2.setNetcdfShuffleFlag(0)
    cdms2.setNetcdfDeflateFlag(0)
    cdms2.setNetcdfDeflateLevelFlag(0)

    with open(infile, mode='rb') as file:
        fileContent = file.read()


    (referenceGrid, latAxis, lonAxis, latBounds, lonBounds)=makeGrid()
    if os.path.exists(outfile):os.remove(outfile)
    fout = cdms2.open(outfile, "w")


    #thisData = struct.unpack(template['read_type'] * ((template['read_nl'] * template['read_ns']) // template['read_type_size']) , fileContent[template['skip_byte']:template['skip_byte']+(template['read_nl']*template['read_ns'])*template['read_type_size']] )
    skip=4*8 

    thisData = numpy.array(struct.unpack('>64800f', fileContent[skip:skip + (180*360)*4]))
    thisVar = cdms2.createVariable(thisData.reshape( (template['read_ns'], template['read_nl']) ), typecode=template['read_type'], id=template['id'], \
                                       fill_value=template['nodata'], grid=referenceGrid, copaxes=1 )
    fout.write(thisVar)

#    thisData2 = numpy.array(struct.unpack('64800B', fileContent[skip + (180*360)*4 : skip + (180*360)*4 + 180*360]))
#    thisVar2 = cdms2.createVariable(thisData2.reshape( (template['read_ns'], template['read_nl']) ), typecode='B', id='ice', \
#                                       fill_value=0, grid=referenceGrid, copaxes=1 )
#    fout.write(thisVar2)

    fout.close()
예제 #4
0
    def execute(self, test_str, imagefilename, imagethreshold, ncfiles, rtol, atol):
        print test_str
        if imagethreshold is None:  # user didn't specify a value
            imagethreshold = regression.defaultThreshold
            # Silence annoying messages about how to set the NetCDF file type.  Anything will do.
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)

        # nonstandard, suitable for testing:
        proc = subprocess.Popen([self.diagstr], shell=True)
        proc_status = proc.wait()
        if proc_status != 0:
            raise DiagError("diags run failed")

        if self.keep:
            print "save ", imagefilename, ncfiles.keys()
            print "output directory is = ", self.outpath
        else:
            # Test of graphics (png) file match:
            # This just looks at combined plot, aka summary plot, which is a compound of three plots.

            imagefname = os.path.join(self.outpath, imagefilename)
            imagebaselinefname = os.path.join(self.baselinepath, imagefilename)
            # pdb.set_trace()
            print "OK THRESHOLD IS:", imagethreshold
            graphics_result = regression.check_result_image(imagefname, imagebaselinefname, imagethreshold)
            print "Graphics file", imagefname, "match difference:", graphics_result

            # initialize to successful graphics check
            GR_CLOSE = graphics_result == 0
            assert GR_CLOSE, "graphic images are not close"

            # Test of NetCDF data (nc) file match:
            NC_CLOSE = True
            for ncfilename, ncvars in ncfiles.items():
                for var in ncvars:
                    # print ncfilename, var
                    try:
                        # print ">>>>>>>>>>>>>", var, ncfilename
                        close = self.closeness(var, ncfilename, rtol, atol)
                        if not close:
                            print var, " in ", os.path.join(
                                self.outpath, ncfilename
                            ), " is not close from the one in:", os.path.join(self.baselinepath, ncfilename)
                    except:
                        print "NetCDF comparison failed for ", var, " in file: ", os.path.join(
                            self.outpath, ncfilename
                        ), "vs", os.path.join(self.baselinepath, ncfilename)
                        close = False
                    NC_CLOSE = NC_CLOSE and close
            assert NC_CLOSE, "NetCDF files are not close"

            # cleanup the temp files
            if GR_CLOSE and NC_CLOSE:
                shutil.rmtree(self.outpath)
예제 #5
0
def compute_and_write_climatologies_keepvars( varkeys, reduced_variables, season, case='', variant='', path='' ):
    """Computes climatologies and writes them to a file.
    Inputs: varkeys, names of variables whose climatologies are to be computed
            reduced_variables, dict (key:rv) where key is a variable name and rv an instance
               of the class reduced_variable
            season: the season on which the climatologies will be computed
            variant: a string to be inserted in the filename"""
    # Compute the value of every variable we need.
    varvals = {}
    # First compute all the reduced variables
    # Probably this loop consumes most of the running time.  It's what has to read in all the data.
    for key in varkeys:
        if key in reduced_variables:
            varvals[key] = reduced_variables[key].reduce()

    for key in varkeys:
        if key in reduced_variables:
            var = reduced_variables[key]
            if varvals[key] is not None:
                if 'case' in var._file_attributes.keys():
                    case = var._file_attributes['case']+'_'
                    break

    logger.info("writing climatology file for %s %s %s ",case,variant,season)
    if variant!='':
        variant = variant+'_'
    logger.info('case: %s',case)
    logger.info('variant: %s', variant)
    logger.info('season: %s', season)
    filename = case + variant + season + "_climo.nc"
    # ...actually we want to write this to a full directory structure like
    #    root/institute/model/realm/run_name/season/
    value=0
    cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
    cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
    cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

    g = cdms2.open( os.path.join(path,filename), 'w' )    # later, choose a better name and a path!
    store_provenance(g)
    for key in varkeys:
        if key in reduced_variables:
            var = reduced_variables[key]
            if varvals[key] is not None:
                varvals[key].id = var.variableid
                varvals[key].reduced_variable=varvals[key].id
                if hasattr(var,'units'):
                    varvals[key].units = var.units+'*'+var.units
                g.write(varvals[key])
                for attr,val in var._file_attributes.items():
                    if not hasattr( g, attr ):
                        setattr( g, attr, val )
    g.season = season
    g.close()
    return varvals,case
예제 #6
0
def quickSave(data, name, path):
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(0) #1
    cdms2.setNetcdfDeflateFlag(0) #1
    cdms2.setNetcdfDeflateLevelFlag(0) #3

    outname=os.path.join(path, name)
    if os.path.exists(outname): os.remove(outname)
    fh = cdms2.open(outname, 'w')
    variable = cdms2.createVariable(data, id='data')
    fh.write(variable)
    fh.close()
예제 #7
0
    def setUp(self):
        """
        Move to a temporary directory before executing the test module.
        """
        self._tmpdir = tempfile.mkdtemp('.tmp', 'test_cdms')
        os.chdir(self._tmpdir)

        # Enter NetCDF4 mode for these tests
        #!TODO: magically deactivate test if compiled with NetCDF3
        cdms2.setNetcdfShuffleFlag(1)
        cdms2.setNetcdfDeflateFlag(1)
        cdms2.setNetcdfDeflateLevelFlag(0)
def saveData(outfilename, data, typecode, id, fill_value, grid, copyaxes, attribute1, attribute2, latAxis, lonAxis):
    
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(3)

    if os.path.exists(outfilename): os.remove(outfilename)
    outfile = cdms2.open( outfilename, 'w')
    var = cdms2.createVariable(data, typecode=typecode, id=id, fill_value=fill_value, grid=grid, copyaxes=copyaxes, attributes=dict(long_name=attribute1, units=attribute2) )
    var.setAxisList((latAxis, lonAxis))
    outfile.write(var)
    outfile.close()
예제 #9
0
    def initCustomize(self, customPath, styles):
        if customPath is None:
            customPath = os.path.join(os.environ["HOME"], ".uvcdat", "customizeUVCDAT.py")

        if os.path.exists(customPath):
            execfile(customPath, customizeUVCDAT.__dict__, customizeUVCDAT.__dict__)

        if styles is None:
            styles = customizeUVCDAT.appStyles

        icon = QtGui.QIcon(customizeUVCDAT.appIcon)
        self.setWindowIcon(icon)

        ## cdms2 setup section
        cdms2.axis.time_aliases += customizeUVCDAT.timeAliases
        cdms2.axis.level_aliases += customizeUVCDAT.levelAliases
        cdms2.axis.latitude_aliases += customizeUVCDAT.latitudeAliases
        cdms2.axis.longitude_aliases += customizeUVCDAT.longitudeAliases
        cdms2.setNetcdfShuffleFlag(customizeUVCDAT.ncShuffle)
        cdms2.setNetcdfDeflateFlag(customizeUVCDAT.ncDeflate)
        cdms2.setNetcdfDeflateLevelFlag(customizeUVCDAT.ncDeflateLevel)

        ## StylesSheet
        st = ""
        if isinstance(styles, str):
            st = styles
        elif isinstance(styles, dict):
            for k in styles.keys():
                val = styles[k]
                if isinstance(val, QtGui.QColor):
                    val = str(val.name())
                st += "%s:%s; " % (k, val)
        if len(st) > 0:
            self.setStyleSheet(st)

        ###########################################################
        ###########################################################
        ## Prettyness
        ###########################################################
        ###########################################################
        # self.setGeometry(0,0, 1100,800)
        self.setWindowTitle("The Ultrascale Visualization Climate Data Analysis Tools - (UV-CDAT)")
        ## self.resize(1100,800)
        # self.setMinimumWidth(1100)
        self.main_window_placement()
예제 #10
0
 def nc(self):
     if self.netCDF3.isChecked():
         cdms2.useNetcdf3()
         self.ncShuffle.setEnabled(False)
         self.ncDeflate.setEnabled(False)
         self.ncDeflateLevel.setEnabled(False)
     else:
         self.ncShuffle.setEnabled(True)
         self.ncDeflate.setEnabled(True)
         self.ncDeflateLevel.setEnabled(True)
         if self.ncShuffle.isChecked():
             cdms2.setNetcdfShuffleFlag(1)
         else:
             cdms2.setNetcdfShuffleFlag(0)
         if self.ncDeflate.isChecked():
             cdms2.setNetcdfDeflateFlag(1)
         else:
             cdms2.setNetcdfDeflateFlag(0)
         cdms2.setNetcdfDeflateLevelFlag(self.ncDeflateLevel.value())
예제 #11
0
파일: diags_test.py 프로젝트: NESII/uvcdat
 def execute(self, test_str, imagefilename, imagethreshold, ncfiles, rtol, atol):
     print test_str
     # Silence annoying messages about how to set the NetCDF file type.  Anything will do.
     cdms2.setNetcdfShuffleFlag(0)
     cdms2.setNetcdfDeflateFlag(0)
     cdms2.setNetcdfDeflateLevelFlag(0)
     
     # nonstandard, suitable for testing:
     proc = subprocess.Popen([self.diagstr], shell=True)
     proc_status = proc.wait()
     if proc_status!=0: 
         raise DiagError("diags run failed")
 
     if self.keep:
         print "save ", imagefilename, ncfiles.keys()
         print "output directory is = ", self.outpath
     else:    
         # Test of graphics (png) file match:
         # This just looks at combined plot, aka summary plot, which is a compound of three plots.
         
         imagefname = os.path.join( self.outpath, imagefilename )
         imagebaselinefname = os.path.join( self.baselinepath, imagefilename )
         graphics_result = checkimage.check_result_image( imagefname, imagebaselinefname, imagethreshold )
         print "Graphics file", imagefname, "match difference:", graphics_result
         
         # Test of NetCDF data (nc) file match:
         CLOSE = True
         for ncfilename, ncvars in ncfiles.items():
             for var in ncvars:
                 #print ncfilename, var
                 try:
                     close = self.closeness( var, ncfilename, rtol, atol )
                     if not close:
                         print var, ' in ', ncfilename, ' is not close.'
                 except:
                     print 'comparison failed for ', var, ' in file: ', ncfilename
                     close = False
                 CLOSE = CLOSE and close
                 
         #cleanup the temp files
         shutil.rmtree(self.outpath)
         assert(CLOSE), 'data are not close'
예제 #12
0
def do_regrid(infileName, variable, outfileName, netcdfType=4):

    nodata = 1.e20

    if netcdfType==4:
        cdms2.setNetcdfShuffleFlag(1)
        cdms2.setNetcdfDeflateFlag(1)
        cdms2.setNetcdfDeflateLevelFlag(3)
    elif netcdfType==3:
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevel(0)
    else:
        exitWM('Unknown netcdf type {0}. Exit 2.'.format(netcdfType),2)

    infile = cdms2.open(infileName)
    unitsVar = infile[variable].units
    (referenceGrid, latAxis, lonAxis, latBounds, lonBounds, lvl_bounds, lvl) = makeGrid()
    regridded = infile[variable][:].regrid(referenceGrid)

    outvar = cdms2.createVariable(regridded, typecode='f',
                                  id=variable, fill_value=nodata,
                                  grid=referenceGrid, copyaxes=1,
                                  attributes=dict(long_name='regridded {0}'.format(variable), units=unitsVar))
    #final = do_hyperInterp(regridded, infile[variable].getLevel()[:], lvl, nodata)
    #outvar = cdms2.createVariable(final, typecode='f', id=variable, fill_value=nodata, attributes=dict(long_name='regridded {0}'.format(variable), units=unitsVar) )


    #gridBis = regridded.subSlice(longitude=0).crossSectionRegrid(lvl, latAxis, method="linear")

    #zregrid = tmpvar.crossSectionRegrid(lvl)

    #outvar.setAxisList((latAxis, lonAxis))
    if os.path.exists(outfileName): os.remove(outfileName)
    outfile=cdms2.open(outfileName, 'w')
    outfile.write(outvar)
    outfile.history='Created with '+__file__.encode('utf8')
    outfile.close()
    infile.close()
예제 #13
0
def doRegrid(infile, varname, outfile, lon, lat, lon_bnds, lat_bnds):

    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(2)

    fh = cdms2.open(infile)
    if fh is None:
        exitMessage("Could not open file {0}. Exit 2.".format(infile), 2)

    if varname not in fh.variables.keys():
        exitMessage('variable named '+varname+' could not be found. Exit 4.', 4)

    yVar = fh(varname)

    latAxis = cdms2.createAxis(lat, lat_bnds)
    latAxis.designateLatitude(True)
    latAxis.units = 'degree_north'
    latAxis.long_name = 'Latitude'

    lonAxis = cdms2.createAxis(lon, lon_bnds)
    lonAxis.designateLongitude(True, 360.0)
    lonAxis.units = 'degree_east'
    lonAxis.long_name='Longitude'

    listAxisOrg = yVar.getAxisList()
    timeAxis = listAxisOrg[0]
    

    grid = cdms2.createGenericGrid(latAxis, lonAxis, lat_bnds, lon_bnds)
    regridded = yVar.regrid(grid)

    g=cdms2.open(outfile, 'w')
    
    #g.write(regridded, None, None, None, varname, None, 1.e20, None, cdms2.CdFloat)
    temp1 = cdms2.createVariable(regridded, typecode='f', id=varname, fill_value=1.e20, axes=[timeAxis, latAxis, lonAxis], copyaxes=0, attributes=dict(long_name=yVar.long_name, units=yVar.units) )
    g.write(temp1)
    g.close()
#!/usr/local/cdat5.2/bin/python
# v2 adds seasonal and quarters calculations for ptot, r02, sdii function
# For sdii it's neccesary run everything again

"""Module for computing precipitation extreme stats mostly using CDO utilities"""

from sys import exit
from os import path, system, mkdir
from cdms2 import setNetcdfShuffleFlag, setNetcdfDeflateLevelFlag, setNetcdfDeflateFlag
from string import split
from datetime import datetime
from daily_stats_cdms_utils import MosaicFiles

setNetcdfShuffleFlag(0)
setNetcdfDeflateFlag(0)
setNetcdfDeflateLevelFlag(0)


RootDir = '/mnt/BCSD'

OUTROOT = '/mnt/data_climatewizard/AR5_Global_Daily_25k/out_stats'
if not path.isdir(OUTROOT):
    mkdir(OUTROOT)

OUTTEMP = '/mnt/workspace_cluster_12/ClimateWizard/AR5_Global_Daily_25k'#'/mnt/data_climatewizard/AR5_Global_Daily_25k'
if not path.isdir(OUTTEMP):
    mkdir(OUTTEMP)

# added as fgobal institution attribute to output files
txtinst = "Santa Clara U.,Climate Central,The Nature Conservancy,International Center for Tropical Agriculture"
#      https://github.com/PCMDI/cmor/blob/master/Test/<filename>.json
#      to the 'Test/' directory.

import cmor
import numpy
import unittest
import sys
import os
import tempfile
import time

try:
    import cdms2
    cdms2.setNetcdfShuffleFlag(0)
    cdms2.setNetcdfDeflateFlag(0)
    cdms2.setNetcdfDeflateLevelFlag(0)
except BaseException:
    print("This test code needs a recent cdms2 interface for i/0")
    sys.exit()


class TestCase(unittest.TestCase):
    def testCMIP6_defaultmissinginteger(self):
        # -------------------------------------------
        # Try to call cmor with a bad institution_ID
        # -------------------------------------------
        try:
            cmor.setup(inpath='TestTables',
                       netcdf_file_action=cmor.CMOR_REPLACE)
            cmor.dataset_json("Test/CMOR_input_TestTables.json")
            # ------------------------------------------
import os
from pathlib import Path

import cdms2

value = 0
cdms2.setNetcdfShuffleFlag(value)  # where value is either 0 or 1
cdms2.setNetcdfDeflateFlag(value)  # where value is either 0 or 1
cdms2.setNetcdfDeflateLevelFlag(
    value)  # where value is a integer between 0 and 9 included

# A script to convert high frequency single point E3SM output to per-variable per-site netcdf files as input for ARM diagostics.
# In this example 3 hourly output at ARM sites are saved on h1 tape using namelist as follows:
# fincl2 = 'PS', 'Q', 'T', 'Z3', 'CLOUD', 'CONCLD', 'CLDICE', 'CLDLIQ', 'LS_FLXPRC', 'LS_FLXSNW', 'ZMFLXPRC', 'ZMFLXSNW', 'FREQR', 'REI', 'REL', 'CV_REFFICE', 'CV_REFFLIQ', 'LS_REFFRAIN', 'LS_REFFSNOW', 'PRECT', 'TMQ', 'PRECC', 'TREFHT', 'QREFHT', 'OMEGA','CLDTOT', 'LHFLX', 'SHFLX', 'FLDS', 'FSDS', 'FLNS', 'FSNS', 'FLNSC', 'FSDSC', 'FSNSC', 'AODVIS', 'AODABS'
# fincl2lonlat = '262.5e_36.6n','204.6e_71.3n','147.4e_2.0s','166.9e_0.5s','130.9e_12.4s','331.97e_39.09n'
data_path = "/Users/zhang40/Documents/ACME/e3sm_arm_diags/data/20200922.F2010SC5.ne30pg2_r05.armsites/"
out_path = "/Users/zhang40/Documents/ACME/e3sm_arm_diags/data/post-processed/20200922.F2010SC5.ne30pg2_r05.armsites/"
p = Path(data_path)
cmd = "ncrcat -h " + data_path + "*h1*nc " + data_path + "armsites_all_time.nc"
os.popen(cmd).readlines()

filename = data_path + "armsites_all_time.nc"
fin = cdms2.open(filename)
variables = [
    "CLOUD",
    "CONCLD",
    "CLDICE",
    "CLDLIQ",
    "PRECT",
    "TMQ",
    "TREFHT",
예제 #17
0
def test_driver( path1, path2=None, filt2=None ):
    """ Test driver for setting up data for plots"""

    # First, find and index the data files.
    datafiles1 = dirtree_datafiles( path1 )
    logger.info("jfp datafiles1=%s",datafiles1)
    datafiles2 = dirtree_datafiles( path2, filt2 )
    logger.info("jfp datafiles2=%s",datafiles2)
    filetable1 = basic_filetable( datafiles1 )
    filetable2 = basic_filetable( datafiles2 )

    # Next we'll compute reduced variables.  They have generally been reduced by averaging in time,
    # and often more axes as well.  Reducing the data first is the fastest way to compute, important
    # if we need to be interactive.  And it is correct if whatever we plot is linear in the
    # variables, as is almost always the case.  But if we want to plot a highly nonlinear function
    # of the data variables, the averaging will have to wait until later.

    # The reduced_variables dict names and contains all the reduced variables which we have defined.
    # They will be used in defining instances of plotspec.
    reduced_variables = {
        'hyam_1': reduced_variable(
            variableid='hyam', filetable=filetable1,
            reduction_function=(lambda x,vid=None: x) ),
        'hybm_1': reduced_variable(
            variableid='hybm', filetable=filetable1,
            reduction_function=(lambda x,vid=None: x) ),
        'PS_ANN_1': reduced_variable(
            variableid='PS', filetable=filetable1,
            reduction_function=reduce2lat ),
        'T_CAM_ANN_1': reduced_variable(
            variableid='T', filetable=filetable1,
            reduction_function=reduce2levlat ),
        'T_CAM_ANN_2': reduced_variable(
            variableid='T', filetable=filetable2,
            reduction_function=reduce2levlat ),
        'TREFHT_ANN_latlon_Npole_1': reduced_variable(
            variableid='TREFHT', filetable=filetable1,
            reduction_function=(lambda x,vid=None: restrict_lat(reduce2latlon(x,vid=vid),50,90)) ),
        'TREFHT_ANN_latlon_Npole_2': reduced_variable(
            variableid='TREFHT', filetable=filetable2,
            reduction_function=(lambda x,vid=None: restrict_lat(reduce2latlon(x,vid=vid),50,90)) ),
        'TREFHT_ANN_lat_1': reduced_variable(
            variableid='TREFHT', filetable=filetable1,
            reduction_function=reduce2lat ),
        'TREFHT_DJF_lat_1': reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_DJF_lat_2': reduced_variable(
            variableid='TREFHT',
            filetable=filetable2,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_DJF_latlon_1': reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2latlon_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_DJF_latlon_2': reduced_variable(
            variableid='TREFHT',
            filetable=filetable2,
            reduction_function=(lambda x,vid=None: reduce2latlon_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_JJA': reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsJJA,vid=vid)) ),
        'PRECT_JJA_lat_1': reduced_variable(
            variableid='PRECT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsJJA,vid=vid)) ),
        'PRECT_JJA_lat_2': reduced_variable(
            variableid='PRECT',
            filetable=filetable2,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsJJA,vid=vid)) ),


        # CAM variables needed for heat transport:
            # FSNS, FLNS, FLUT, FSNTOA, FLNT, FSNT, SHFLX, LHFLX,
        'FSNS_1': reduced_variable(
            variableid='FSNS',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FSNS_ANN_latlon_1': reduced_variable(
            variableid='FSNS',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FLNS_1': reduced_variable(
            variableid='FLNS',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FLNS_ANN_latlon_1': reduced_variable(
            variableid='FLNS',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FLUT_ANN_latlon_1': reduced_variable(
            variableid='FLUT',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FSNTOA_ANN_latlon_1': reduced_variable(
            variableid='FSNTOA',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FLNT_1': reduced_variable(
            variableid='FLNT',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FLNT_ANN_latlon_1': reduced_variable(
            variableid='FLNT',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FSNT_1': reduced_variable(
            variableid='FSNT',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FSNT_ANN_latlon_1': reduced_variable(
            variableid='FSNT',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'QFLX_1': reduced_variable(
            variableid='QFLX',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'SHFLX_1': reduced_variable(
            variableid='SHFLX',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'SHFLX_ANN_latlon_1': reduced_variable(
            variableid='SHFLX',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'LHFLX_ANN_latlon_1': reduced_variable(
            variableid='LHFLX',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'ORO_ANN_latlon_1': reduced_variable(
            variableid='ORO',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'OCNFRAC_ANN_latlon_1': reduced_variable(
            variableid='OCNFRAC',
            filetable=filetable1,
            reduction_function=reduce2latlon ),


        'ts_lat_old': reduced_variable(
            variableid='surface_temperature', # normally a CF standard_name, even for non-CF data.
            filetable=filetable1,
            reduction_function=reduce2lat_old ),
        'ts_lat_new': reduced_variable(
            variableid='surface_temperature', # normally a CF standard_name, even for non-CF data.
            filetable=filetable1,
            reduction_function=reduce2lat 
            # The reduction function will take just one argument, a variable (MV).  But it might
            # be expressed here as a lambda wrapping a more general function.
            # Often there will be ranges in time, space, etc. specified here.  No range means
            # everything.
            ),
        'ts_scalar_tropical_o': reduced_variable(
            variableid = 'surface_temperature',
            filetable=filetable1,
            reduction_function=(lambda mv,vid=None: reduce2scalar_zonal_old(mv,-20,20,vid=vid))
            ),
        'ts_scalar_tropical_n': reduced_variable(
            variableid = 'surface_temperature',
            filetable=filetable1,
            reduction_function=(lambda mv,vid=None: reduce2scalar_zonal(mv,-20,20,vid=vid))
            )
        }

    # Derived variables have to be treated separately from reduced variables
    # because derived variables generally depend on reduced variables.
    # But N.B.: the dicts reduced_variables and derived_variables
    # must never use the same key!
    derived_variables = {
        'CAM_HEAT_TRANSPORT_ALL_1': derived_var(
            vid='CAM_HEAT_TRANSPORT_ALL_1',
            inputs=['FSNS_ANN_latlon_1', 'FLNS_ANN_latlon_1', 'FLUT_ANN_latlon_1',
                    'FSNTOA_ANN_latlon_1', 'FLNT_ANN_latlon_1', 'FSNT_ANN_latlon_1',
                    'SHFLX_ANN_latlon_1', 'LHFLX_ANN_latlon_1', 'OCNFRAC_ANN_latlon_1' ],
            outputs=['atlantic_heat_transport','pacific_heat_transport',
                     'indian_heat_transport', 'global_heat_transport' ],
            func=oceanic_heat_transport ),
        'NCEP_OBS_HEAT_TRANSPORT_ALL_2': derived_var(
            vid='NCEP_OBS_HEAT_TRANSPORT_ALL_2',
            inputs=[],
            outputs=('latitude', ['atlantic_heat_transport','pacific_heat_transport',
                     'indian_heat_transport', 'global_heat_transport' ]),
            func=(lambda: ncep_ocean_heat_transport(path2) ) ),
        'T_ANN_1': derived_var(
            vid='T_ANN_1', inputs=['T_CAM_ANN_1', 'hyam_1', 'hybm_1', 'PS_ANN_1', 'T_CAM_ANN_2'],
            outputs=('temperature'),
            func=verticalize )
        }

    plotvars = dict( reduced_variables.items() + derived_variables.items() )

    # The plotvspecs dict names and contains all plotspec objects which we have defined.
    # The plotspeckeys variable, below, names the ones for which we will generate output.
    # A dict value can be a plotspec object, or a list of such objects.  A list of
    # plotspec instances specifies a page containing multiple plots in separate panes.
    plotspecs = {
        'TREFHT_ANN_Npole_ALL':
            ['TREFHT_ANN_Npole_1', 'TREFHT_ANN_Npole_2', 'TREFHT_ANN_Npole_diff'],
        'TREFHT_ANN_Npole_1': plotspec(
            vid='TREFHT_ANN_Npole_1',
            xvars=['TREFHT_ANN_latlon_Npole_1'], xfunc = lonvar,
            yvars=['TREFHT_ANN_latlon_Npole_1'], yfunc = latvar,
            zvars=['TREFHT_ANN_latlon_Npole_1'], zfunc = (lambda z: z),
            plottype='polar contour plot' ),
        'TREFHT_ANN_Npole_2': plotspec(
            vid='TREFHT_ANN_Npole_2',
            xvars=['TREFHT_ANN_latlon_Npole_2'], xfunc = lonvar,
            yvars=['TREFHT_ANN_latlon_Npole_2'], yfunc = latvar,
            zvars=['TREFHT_ANN_latlon_Npole_2'], zfunc = (lambda z: z),
            plottype='polar contour plot' ),
        'TREFHT_ANN_Npole_diff': plotspec(
            vid='TREFHT_ANN_Npole_diff',
            xvars=['TREFHT_ANN_latlon_Npole_1','TREFHT_ANN_latlon_Npole_2'], xfunc = lonvar_min,
            yvars=['TREFHT_ANN_latlon_Npole_1','TREFHT_ANN_latlon_Npole_2'], yfunc = latvar_min,
            zvars=['TREFHT_ANN_latlon_Npole_1','TREFHT_ANN_latlon_Npole_2'], zfunc = aminusb_2ax,
            plottype='polar contour plot' ),
        'TREFHT_DJF_laton_ALL':
            ['TREFHT_DJF_latlon_1', 'TREFHT_DJF_latlon_2', 'TREFHT_DJF_latlon_diff'],
        'TREFHT_DJF_latlon_1': plotspec(
            vid='TREFHT_DJF_latlon_1',
            xvars=['TREFHT_DJF_latlon_1'], xfunc = lonvar,
            yvars=['TREFHT_DJF_latlon_1'], yfunc = latvar,
            zvars=['TREFHT_DJF_latlon_1'], zfunc = (lambda z: z),
            plottype='contour plot' ),
        'TREFHT_DJF_latlon_2': plotspec(
            vid='TREFHT_DJF_latlon_2',
            xvars=['TREFHT_DJF_latlon_2'], xfunc = lonvar,
            yvars=['TREFHT_DJF_latlon_2'], yfunc = latvar,
            zvars=['TREFHT_DJF_latlon_2'], zfunc = (lambda z: z),
            plottype='contour plot' ),
        'TREFHT_DJF_latlon_diff': plotspec(
            vid='TREFHT_DJF_latlon_diff',
            xvars=['TREFHT_DJF_latlon_1','TREFHT_DJF_latlon_2'], xfunc=lonvar_min,
            yvars=['TREFHT_DJF_latlon_1','TREFHT_DJF_latlon_2'], yfunc=latvar_min,
            zvars=['TREFHT_DJF_latlon_1','TREFHT_DJF_latlon_2'], zfunc= aminusb_2ax,
            plottype='contour_plot'
            ),
        'T_ANN_VERT_CAM_OBS_ALL':
            ['T_VERT_ANN_1', 'T_VERT_ANN_2', 'T_VERT_difference' ],
        'T_VERT_difference': plotspec(
            vid='T_VERT_difference', xvars=['T_ANN_1','T_CAM_ANN_2'], xfunc = latvar_min,
            yvars=['T_ANN_1','T_CAM_ANN_2'], yfunc = levvar_min,
            ya1vars=['T_ANN_1','T_CAM_ANN_2'], ya1func = (lambda y1,y2: heightvar(levvar_min(y1,y2))),
            zvars=['T_ANN_1','T_CAM_ANN_2'], zfunc=aminusb_ax2, plottype="contour plot" ),
        'T_VERT_ANN_2': plotspec(
            vid='T_ANN_2', xvars=['T_CAM_ANN_2'], xfunc=latvar,
            yvars=['T_CAM_ANN_2'], yfunc=levvar, ya1vars=['T_CAM_ANN_2'], ya1func=heightvar,
            zvars=['T_CAM_ANN_2'], plottype='contour plot',
            zrangevars=['T_ANN_1','T_CAM_ANN_2'], zrangefunc=minmin_maxmax ),
        'T_VERT_ANN_1': plotspec(
            vid='T_ANN_1', xvars=['T_ANN_1'], xfunc=latvar,
            yvars=['T_ANN_1'], yfunc=levvar, ya1vars=['T_ANN_1'], ya1func=heightvar,
            zvars=['T_ANN_1'], plottype='contour plot',
            zrangevars=['T_ANN_1','T_CAM_ANN_2'], zrangefunc=minmin_maxmax ),
        'NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][3]), plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_PACIFIC_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_PACIFIC_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][0]), plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_ATLANTIC_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_ATLANTIC_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][1]), plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_INDIAN_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_INDIAN_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][2]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_GLOBAL_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_GLOBAL_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[3]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_PACIFIC_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_PACIFIC_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[0]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_ATLANTIC_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_ATLANTIC_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[1]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_INDIAN_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_INDIAN_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[2]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_ALL_1':
            ['CAM_HEAT_TRANSPORT_GLOBAL_1','CAM_HEAT_TRANSPORT_PACIFIC_1',
             'CAM_HEAT_TRANSPORT_ATLANTIC_1','CAM_HEAT_TRANSPORT_INDIAN_1'],
        'CAM_NCEP_HEAT_TRANSPORT_GLOBAL': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_GLOBAL',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[3]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][3]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_PACIFIC': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[0]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][0]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_ATLANTIC',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[0]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][1]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_INDIAN': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_INDIAN',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[0]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][2]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_ALL':
            ['CAM_NCEP_HEAT_TRANSPORT_GLOBAL','CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
             'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC','CAM_NCEP_HEAT_TRANSPORT_INDIAN'],
        'past_CAM_HEAT_TRANSPORT_GLOBAL_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_GLOBAL_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['FSNS_ANN_latlon_1', 'FLNS_ANN_latlon_1', 'FLUT_ANN_latlon_1',
                    'FSNTOA_ANN_latlon_1', 'FLNT_ANN_latlon_1', 'FSNT_ANN_latlon_1',
                    'SHFLX_ANN_latlon_1', 'LHFLX_ANN_latlon_1', 'OCNFRAC_ANN_latlon_1' ],
            yfunc=oceanic_heat_transport, plottype='line plot'),
        'PRECT_JJA': ['PRECT_JJA_2line','PRECT_JJA_diff'],
        'PRECT_JJA_2line': plotspec(
            vid='PRECT_JJA_2line',
            x1vars=['PRECT_JJA_lat_1'], x1func = latvar,
            x2vars=['PRECT_JJA_lat_2'], x2func = latvar,
            y1vars=['PRECT_JJA_lat_1'], y1func=(lambda y: y),
            y2vars=['PRECT_JJA_lat_2'], y2func=(lambda y: y),
            plottype='2-line plot'),
        'PRECT_JJA_diff': plotspec(
            vid='PRECT_JJA_difference',
            xvars=['PRECT_JJA_lat_1','PRECT_JJA_lat_2'], xfunc = latvar_min,
            yvars=['PRECT_JJA_lat_1','PRECT_JJA_lat_2'],
            yfunc=aminusb_1ax,   # aminusb_1ax(y1,y2)=y1-y2; each y has 1 axis, use min axis
            plottype='line plot'),
        'TREFHT_ANN': plotspec(
            vid='TREFHT_ANN',xvars=['TREFHT_ANN_lat_1'], xfunc = latvar,
            yvars=['TREFHT_ANN_lat_1'], yfunc=(lambda y: y), plottype='line plot'),
        'TREFHT_DJF': ['TREFHT_DJF_2line','TREFHT_DJF_diff'],
        'TREFHT_DJF_2line': plotspec(
            vid='TREFHT_DJF_2line',
            x1vars=['TREFHT_DJF_lat_1'], x1func = latvar,
            x2vars=['TREFHT_DJF_lat_2'], x2func = latvar,
            y1vars=['TREFHT_DJF_lat_1'], y1func=(lambda y: y),
            y2vars=['TREFHT_DJF_lat_2'], y2func=(lambda y: y),
            plottype='2-line plot'),
        'TREFHT_DJF_diff': plotspec(
            vid='TREFHT_DJF_diff',
            xvars=['TREFHT_DJF_lat_1','TREFHT_DJF_lat_2'], xfunc = latvar_min,
            yvars=['TREFHT_DJF_lat_1','TREFHT_DJF_lat_2'],
            yfunc=aminusb_1ax,   # aminusb_1ax(y1,y2)=y1-y2; each y has 1 axis, use min axis
            plottype='line plot'),
        'TREFHT_DJF_line': plotspec(
            vid='TREFHT_DJF_line',
            xvars=['TREFHT_DJF_lat_1'], xfunc = latvar,
            yvars=['TREFHT_DJF_lat_1'], plottype='line plot'),
        'TREFHT_DJF_contour': plotspec(
            vid='TREFHT_DJF_contour',
            xvars=['TREFHT_DJF_latlon_1'], xfunc = (lambda x: x),
            plottype='line plot'),
        #plotspec( vid='TREFHT_JJA',xvars=['TREFHT_JJA'], xfiletable=filetable1, xfunc = latvar,
        #          yvars=['TREFHT_JJA'], yfunc=(lambda y: y), plottype='line plot'),
        #plotspec(
        #    vid='ts_by_lat_old',   # suitable for filenames
        #    xfiletable=filetable1,
        #    xfunc = latvar, # function to return x axis values
        #    xvars = ['ts_lat_old'],    # names of variables or axes, args of xfunc
        #    yfiletable=filetable1, # can differ from xfiletable, e.g. comparing 2 runs
        #    yfunc = (lambda y: y), # function to return y axis values
        #    yvars = ['ts_lat_old'], # names of variables or axes, args of xfunc
        #    zfiletable=filetable1,
        #    zfunc = (lambda: None),
        #    zvars = [],         # would be needed for countour or 3D plot
        #    # ... the ?vars variable will be converted (using the filetable and
        #    # plotvars) to actual variables which become the arguments for a call
        #    # of ?func, which returns the data we write out for plotting use.
        #    plottype='line plot' ),
        'ts_by_lat': plotspec(
            vid='ts_by_lat',   # suitable for filenames
            xfunc = latvar, # function to return x axis values
            xvars = ['ts_lat_new'],    # names of variables or axes, args of xfunc
            yfunc = (lambda y: y), # function to return y axis values
            yvars = ['ts_lat_new'], # names of variables or axes, args of xfunc
            zfunc = (lambda: None),
            zvars = [],         # would be needed for countour or 3D plot
            # ... the ?vars variable will be converted (using the filetable and
            # plotvars) to actual variables which become the arguments for a call
            # of ?func, which returns the data we write out for plotting use.
            plottype='line plot' ),
        #plotspec( vid="ts_global_old",xvars=['ts_scalar_tropical_o'], xfiletable=filetable1 ),
        'ts_global': plotspec( vid="ts_global",xvars=['ts_scalar_tropical_n'] ),
        }

    # Plotspeckeys specifies what plot data we will compute and write out.
    # In the future we may add a command line option, or provide other ways to
    # define plotspeckeys.
    #plotspeckeys = [['TREFHT_DJF_2line','TREFHT_DJF_difference']]
    #plotspeckeys = ['TREFHT_DJF_2line']
    #plotspeckeys = ['NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2','CAM_HEAT_TRANSPORT_ALL_1']
    #plotspeckeys = ['CAM_NCEP_HEAT_TRANSPORT_GLOBAL']
    #plotspeckeys = ['CAM_NCEP_HEAT_TRANSPORT_ALL']
    #plotspeckeys = ['T_ANN_VERT_CAM_OBS_ALL']
    #plotspeckeys = ['TREFHT_DJF_laton_ALL']
    #plotspeckeys = ['TREFHT_ANN_Npole_ALL']
    plotspeckeys = ['TREFHT_DJF']
    #plotspeckeys = ['GLOBAL_AVERAGES']

    # Find the variable names required by the plotspecs.
    varkeys = []
    for psk in plotspeckeys:
        if type(psk) is str and type(plotspecs[psk]) is list:
            psk = plotspecs[psk]
        if type(psk) is str:
            write_xml = False
            psl = [ plotspecs[psk] ]
        else:
            write_xml = True
            psl = [ plotspecs[k] for k in psk ]
            xml_name = '_'.join( [ ps._strid for ps in psl ] ) +'.xml'
            h = open( xml_name, 'w' )
            h.write("<plotdata>\n")
        for ps in psl:
            varkeys = varkeys+ps.xvars+ps.x1vars+ps.x2vars+ps.x3vars
            varkeys = varkeys+ps.yvars+ps.y1vars+ps.y2vars+ps.y3vars
            varkeys = varkeys + ps.zvars + ps.zrangevars
    for key in varkeys:
        if key in derived_variables.keys():
            varkeys = varkeys + derived_variables[key]._inputs
    varkeys = list( set(varkeys) )

    # Compute the value of every variable we need.
    varvals = {}
    # First compute all the reduced variables
    for key in varkeys:
        if key in reduced_variables.keys():
            varvals[key] = reduced_variables[key].reduce()
    # Then use the reduced variables to compute the derived variables
    #   Note that the derive() method is allowed to return a tuple.  This way
    #   we can use one function to compute what's really several variables.
    for key in varkeys:
        if key in derived_variables.keys():
            varvals[key] = derived_variables[key].derive(varvals)

    # Now use the reduced and derived variables to compute the plot data.
    for psk in plotspeckeys:
        if type(psk) is str and type(plotspecs[psk]) is list:
            psk = plotspecs[psk]
        if type(psk) is str:
            write_xml = False
            psl = [ plotspecs[psk] ]
        else:
            write_xml = True
            psl = [ plotspecs[k] for k in psk ]
            xml_name = '_'.join( [ ps._strid for ps in psl ] ) +'.xml'
            h = open( xml_name, 'w' )
            h.write("<plotdata>\n")
        varkeys = []
        for ps in psl:
            logger.info("jfp preparing data for %s", ps._strid)
            xrv = [ varvals[k] for k in ps.xvars ]
            x1rv = [ varvals[k] for k in ps.x1vars ]
            x2rv = [ varvals[k] for k in ps.x2vars ]
            x3rv = [ varvals[k] for k in ps.x3vars ]
            yrv = [ varvals[k] for k in ps.yvars ]
            y1rv = [ varvals[k] for k in ps.y1vars ]
            y2rv = [ varvals[k] for k in ps.y2vars ]
            y3rv = [ varvals[k] for k in ps.y3vars ]
            yarv = [ varvals[k] for k in ps.yavars ]
            ya1rv = [ varvals[k] for k in ps.ya1vars ]
            zrv = [ varvals[k] for k in ps.zvars ]
            zrrv = [ varvals[k] for k in ps.zrangevars ]
            xax = apply( ps.xfunc, xrv )
            x1ax = apply( ps.x1func, x1rv )
            x2ax = apply( ps.x2func, x2rv )
            x3ax = apply( ps.x3func, x3rv )
            yax = apply( ps.yfunc, yrv )
            y1ax = apply( ps.y1func, y1rv )
            y2ax = apply( ps.y2func, y2rv )
            y3ax = apply( ps.y3func, y3rv )
            yaax = apply( ps.yafunc, yarv )
            ya1ax = apply( ps.ya1func, ya1rv )
            zax = apply( ps.zfunc, zrv )
            zr = apply( ps.zrangefunc, zrrv )
            if      (xax is None or len(xrv)==0) and (x1ax is None or len(x1rv)==0)\
                and (x2ax is None or len(x2rv)==0) and (x3ax is None or len(x3rv)==0)\
                and (yax is None or len(yrv)==0) and (y1ax is None or len(y1rv)==0)\
                and (y2ax is None or len(y2rv)==0) and (y3ax is None or len(y3rv)==0)\
                and (zax is None or len(zrv)==0):
                continue
            filename = ps._strid+"_test.nc"
            value=0
            cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

            g = cdms2.open( filename, 'w' )    # later, choose a better name and a path!
            store_provenance(g)
            # Much more belongs in g, e.g. axis and graph names.
            if xax is not None and len(xrv)>0:
                xax.id = 'X'
                g.write(xax)
            if x1ax is not None and len(x1rv)>0:
                x1ax.id = 'X1'
                g.write(x1ax)
            if x2ax is not None and len(x2rv)>0:
                x2ax.id = 'X2'
                g.write(x2ax)
            if x3ax is not None and len(x3rv)>0:
                x3ax.id = 'X3'
                g.write(x3ax)
            if yax is not None and len(yrv)>0:
                yax.id = 'Y'
                g.write(yax)
            if y1ax is not None and len(y1rv)>0:
                y1ax.id = 'Y1'
                g.write(y1ax)
            if y2ax is not None and len(y2rv)>0:
                y2ax.id = 'Y2'
                g.write(y2ax)
            if y3ax is not None and len(y3rv)>0:
                y3ax.id = 'Y3'
                g.write(y3ax)
            if yaax is not None and len(yarv)>0:
                yaax.id = 'YA'
                g.write(yaax)
            if ya1ax is not None and len(ya1rv)>0:
                ya1ax.id = 'YA1'
                g.write(ya1ax)
            if zax is not None and len(zrv)>0:
                zax.id = 'Z'
                g.write(zax)
            if zr is not None:
                zr.id = 'Zrange'
                g.write(zr)
            g.presentation = ps.plottype
            # Note: For table output, it would be convenient to use a string-valued variable X
            # to specify string parts of the table.  Butcdms2 doesn't support them usefully.
            # Instead we'll manage with a convention that a table row plotspec's id is the name of
            # the row, thus available to be printed in, e.g., the first column.
            if ps.plottype=="table row":
                g.rowid = ps._strid
            g.close()
            if write_xml:
                h.write( "<ncfile>"+filename+"</ncfile>\n" )

        if write_xml:
            h.write( "</plotdata>\n" )
            h.close()
예제 #18
0
                    - Need to confirm PR unit conversion, m/s -> kg m-2 s-1 requires additional inputs

@author: durack1
"""

# Python module imports
import os,shutil,subprocess,sys
import cdms2 as cdm
# Add durolib to path
sys.path.insert(1,'/glade/u/home/durack1')
from durolib import globalAttWrite


# Set cdms preferences - no compression, no shuffling, no complaining
cdm.setNetcdfDeflateFlag(1)
cdm.setNetcdfDeflateLevelFlag(9) ; # 1-9, min to max - Comes at heavy IO (read/write time cost)
cdm.setNetcdfShuffleFlag(0)
cdm.setCompressionWarnings(0) ; # Turn off nag messages
# Set bounds automagically
#cdm.setAutoBounds(1) ; # Use with caution

# Set build info once
buildDate = '141104'

# Create input variable lists 
uvcdatInstall = ''.join(['/glade/u/home/durack1/',buildDate,'_metrics/PCMDI_METRICS/bin/'])
data =  [
        ['atmos','NCAR-CAM5_1deg','f.e12.FAMIPC5.ne30_g16.amip.002_','/glade/p/cgd/amp/people/hannay/amwg/climo/f.e12.FAMIPC5.ne30_g16.amip.002/0.9x1.25/'],
        ['atmos','NCAR-CAM5_0p25deg','FAMIPC5_ne120_79to05_03_omp2_','/glade/p/cgd/amp/people/hannay/amwg/climo/FAMIPC5_ne120_79to05_03_omp2/0.23x0.31/'],
        ['atmos','NCAR-CAM5_0p25deg_interp_1deg','FAMIPC5_ne120_79to05_03_omp2_','/glade/p/cgd/amp/people/hannay/amwg/climo/FAMIPC5_ne120_79to05_03_omp2/0.9x1.25/']
]
예제 #19
0
   def processCmdLine(self):
      parser = argparse.ArgumentParser(
         description='UV-CDAT Climate Modeling Diagnostics', 
         usage='%(prog)s --path1 [options]')

      parser.add_argument('--path', '-p', action='append', nargs=1, 
         help="Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2.")
      parser.add_argument('--path2', '-q', action='append', nargs=1, 
         help="Path to a second dataset.")
      parser.add_argument('--obspath', action='append', nargs=1,
                          help="Path to an observational dataset")
      parser.add_argument('--cachepath', nargs=1,
         help="Path for temporary and cachced files. Defaults to /tmp")
#      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
#         help="The realm type. Current valid options are 'land' and 'atmosphere'")
      parser.add_argument('--filter', '-f', nargs=1, 
         help="A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices.")
      parser.add_argument('--filter2', '-g', nargs=1, 
         help="A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices.")
      parser.add_argument('--new_filter', '-F', action='append', nargs=1, 
         help="A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices.")
      parser.add_argument('--packages', '--package', '-k', nargs='+', 
         help="The diagnostic packages to run against the dataset(s). Multiple packages can be specified.")
      parser.add_argument('--sets', '--set', '-s', nargs='+', 
         help="The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package") 
      parser.add_argument('--vars', '--var', '-v', nargs='+', 
         help="Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL") 
      parser.add_argument('--list', '-l', nargs=1, choices=['sets', 'vars', 'variables', 'packages', 'seasons', 'regions', 'translations', 'options'], 
         help="Determine which packages, sets, regions, variables, and variable options are available")
         # maybe eventually add compression level too....
      parser.add_argument('--compress', nargs=1, choices=['no', 'yes'],
         help="Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools") #no compression, add self state

      parser.add_argument('--outputpre', nargs=1,
         help="Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc")
      parser.add_argument('--outputpost', nargs=1,
         help="Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc")
      parser.add_argument('--outputdir', '-O', nargs=1,
         help="Directory in which output files will be written." )

      parser.add_argument('--seasons', nargs='+', choices=all_seasons,
         help="Specify which seasons to generate climatoogies for")
      parser.add_argument('--years', nargs='+',
         help="Specify which years to include when generating climatologies") 
      parser.add_argument('--months', nargs='+', choices=all_months,
         help="Specify which months to generate climatologies for")
      parser.add_argument('--climatologies', '-c', nargs=1, choices=['no','yes'],
         help="Specifies whether or not climatologies should be generated")
      parser.add_argument('--plots', '-t', nargs=1, choices=['no','yes'],
         help="Specifies whether or not plots should be generated")
      parser.add_argument('--plottype', nargs=1)
      parser.add_argument('--precomputed', nargs=1, choices=['no','yes'], 
         help="Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc")
      parser.add_argument('--json', '-j', nargs=1, choices=['no', 'yes'],
         help="Produce JSON output files as part of climatology/diags generation") # same
      parser.add_argument('--netcdf', '-n', nargs=1, choices=['no', 'yes'],
         help="Produce NetCDF output files as part of climatology/diags generation") # same
      parser.add_argument('--xml', '-x', nargs=1, choices=['no', 'yes'],
         help="Produce XML output files as part of climatology/diags generation")
      parser.add_argument('--seasonally', action='store_true',
         help="Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons")
      parser.add_argument('--monthly', action='store_true',
         help="Produce climatologies for all predefined months")
      parser.add_argument('--yearly', action='store_true',
         help="Produce annual climatogolies for all years in the dataset")
      parser.add_argument('--timestart', nargs=1,
         help="Specify the starting time for the dataset, such as 'months since Jan 2000'")
      parser.add_argument('--timebounds', nargs=1, choices=['daily', 'monthly', 'yearly'],
         help="Specify the time boudns for the dataset")
      parser.add_argument('--verbose', '-V', action='count',
         help="Increase the verbosity level. Each -v option increases the verbosity more.") # count
      parser.add_argument('--name', action='append', nargs=1,
         help="Specify option names for the datasets for plot titles, etc") #optional name for the set
      # This will be the standard list of region names NCAR has
      parser.add_argument('--regions', '--region', nargs='+', choices=all_regions.keys(),
         help="Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'")
      parser.add_argument('--starttime', nargs=1,
         help="Specify a start time in the dataset")
      parser.add_argument('--endtime', nargs=1, 
         help="Specify an end time in the dataset")
      parser.add_argument('--translate', nargs='?', default='y',
         help="Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1")
      parser.add_argument('--varopts', nargs='+',
         help="Variable auxillary options")



      args = parser.parse_args()

      if(args.list != None):
         if args.list[0] == 'translations':
            print "Default variable translations: "
            self.listTranslations()
            quit()
         if args.list[0] == 'regions':
            print "Available geographical regions: ", all_regions.keys()
            quit()

         if args.list[0] == 'seasons':
            print "Available seasons: ", all_seasons
            quit()

         if args.list[0] == 'packages':
            print "Listing available packages:"
            print self.all_packages.keys()
            quit()

         
         if args.list[0] == 'sets':
            if args.packages == None:
               print "Please specify package before requesting available diags sets"
               quit()
            for p in args.packages:
               print 'Avaialble sets for package ', p, ':'
               sets = self.listSets(p)
               keys = sets.keys()
               for k in keys:
                  print 'Set',k, ' - ', sets[k]
            quit()
               
         if args.list[0] == 'variables' or args.list[0] == 'vars':
            if args.path != None:
               for i in args.path:
                  self._opts['path'].append(i[0])
            else:
               print 'Must provide a dataset when requesting a variable listing'
               quit()
            self.listVariables(args.packages, args.sets)
            quit()
         if args.list[0] == 'options':
            if args.path!= None:
               for i in args.path:
                  self._opts['path'].append(i[0])
            else:
               print 'Must provide a dataset when requesting a variable listing'
               quit()
            self.listVarOptions(args.packages, args.sets, args.vars)
            quit()

      # Generally if we've gotten this far, it means no --list was specified. If we don't have
      # at least a path, we should exit.
      if(args.path != None):
         for i in args.path:
            self._opts['path'].append(i[0])
      else:
         print 'Must specify a path or the --list option at a minimum.'
         print 'For help, type "diags --help".'
         quit()
      if(args.path2 != None):
         for i in args.path2:
            self._opts['path2'].append(i[0])

      if(args.obspath != None):
         for i in args.obspath:
            self._opts['obspath'].append(i[0])

      # TODO: Should some pre-defined filters be "nameable" here?
      if(args.filter != None): # Only supports one filter argument, see filter2.
         self._opts['filter'] = args.filter[0]
         self._opts['user_filter'] = True
#         for i in args.filter:
#            self._opts['filter'].append(i[0])
      if(args.filter2 != None): # This is a second filter argument.
         self._opts['filter2'] = args.filter2[0]
         self._opts['user_filter'] = True
      if(args.new_filter != None):  # like filter but with multiple arguments
         for i in args.new_filter:
            self._opts['new_filter'].append(i[0])

      if(args.cachepath != None):
         self._opts['cachepath'] = args.cachepath[0]

      self._opts['seasonally'] = args.seasonally
      self._opts['monthly'] = args.monthly

      if(args.varopts != None):
         self._opts['varopts'] = args.varopts

      if(args.starttime != None):
         self._opts['start'] = args.starttime[0]

      if(args.endtime != None):
         self._opts['end'] = args.endtime[0]

      # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
      # they are still set after you set them once in the python process.
      if(args.compress != None):
         if(args.compress[0] == 'no'):
            self._opts['compress'] = False
         else:
            self._opts['compress'] = True


      if self._opts['compress'] == True:
         print 'Enabling compression for output netCDF files'
         cdms2.setNetcdfShuffleFlag(1)
         cdms2.setNetcdfDeflateFlag(1)
         cdms2.setNetcdfDeflateLevelFlag(9)
      else:
         print 'Disabling compression for output netCDF files'
         cdms2.setNetcdfShuffleFlag(0)
         cdms2.setNetcdfDeflateFlag(0)
         cdms2.setNetcdfDeflateLevelFlag(0)
         

      if(args.json != None):
         if(args.json[0] == 'no'):
            self._opts['json'] = False
         else:
            self._opts['json'] = True
      if(args.xml != None):
         if(args.xml[0] == 'no'):
            self._opts['xml'] = False
         else:
            self._opts['xml'] = True

      if(args.netcdf != None):
         if(args.netcdf[0] == 'no'):
            self._opts['netcdf'] = False
         else:
            self._opts['netcdf'] = True

      if(args.plots != None):
         if(args.plots[0].lower() == 'no' or args.plots[0] == 0):
            self._opts['plots'] = False
         else:
            self._opts['plots'] = True

      if(args.climatologies != None):
         if(args.climatologies[0] == 'no'):
            self._opts['climatologies'] = False
         else:
            self._opts['climatologies'] = True

      self._opts['verbose'] = args.verbose

      if(args.name != None):
         for i in args.name:
            self._opts['dsnames'].append(i[0])

      # Help create output file names
      if(args.outputpre != None):
         self._opts['outputpre'] = args.outputpre[0]
      if(args.outputpost != None):
         self._opts['outputpost'] = args.outputpost[0]

      # Output directory
      if(args.outputdir != None):
         if not os.path.isdir(args.outputdir[0]):
            print "ERROR, output directory",args.outputdir[0],"does not exist!"
            quit()
         self._opts['outputdir'] = args.outputdir[0]

      if(args.translate != 'y'):
         print args.translate
         print self._opts['translate']
         quit()
      # Timestart assumes a string like "months since 2000". I can't find documentation on
      # toRelativeTime() so I have no idea how to check for valid input
      # This is required for some of the land model sets I've seen
      if(args.timestart != None):
         self._opts['reltime'] = args.timestart
         
      # cdutil.setTimeBounds{bounds}(variable)
      if(args.timebounds != None):
         self._opts['bounds'] = args.timebounds

      # Check if a user specified package actually exists
      # Note: This is case sensitive.....
      if(args.packages != None):
         plist = []
         for x in args.packages:
            if x.upper() in self.all_packages.keys():
               plist.append(x)
            elif x in self.all_packages.keys():
               plist.append(x.lower())

         if plist == []:
            print 'Package name(s) ', args.packages, ' not valid'
            print 'Valid package names: ', self.all_packages.keys()
            quit()
         else:
            self._opts['packages'] = plist


      # TODO: Requires exact case; probably make this more user friendly and look for mixed case
      if(args.regions != None):
         rlist = []
         for x in args.regions:
            if x in all_regions.keys():
               rlist.append(x)
         print 'REGIONS: ', rlist
         self._opts['regions'] = rlist

      # Given user-selected packages, check for user specified sets
      # Note: If multiple packages have the same set names, then they are all added to the list.
      # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
      if(self._opts['packages'] == None and args.sets != None):
         print 'No package specified'
         self._opts['sets'] = args.sets

      if(args.sets != None and self._opts['packages'] != None):
         # unfortuantely, we have to go through all of this....
         # there should be a non-init of the class method to list sets/packages/etc,
         # ie a dictionary perhaps?
         sets = []
         import metrics.fileio.filetable as ft
         import metrics.fileio.findfiles as fi
         import metrics.packages.diagnostic_groups 
         package = self._opts['packages']
         if package[0].lower() == 'lmwg':
            import metrics.packages.lmwg.lmwg
         elif package[0].lower()=='amwg':
            import metrics.packages.amwg.amwg
         dtree = fi.dirtree_datafiles(self, pathid=0)
         filetable = ft.basic_filetable(dtree, self)
         dm = metrics.packages.diagnostic_groups.diagnostics_menu()

         pclass = dm[package[0].upper()]()

         slist = pclass.list_diagnostic_sets()
         keys = slist.keys()
         keys.sort()
         for k in keys:
            fields = k.split()
            for user in args.sets:
               if user == fields[0]:
                  sets.append(user)
         self._opts['sets'] = sets
         if sets != args.sets:
            print 'sets requested ', args.sets
            print 'sets available: ', slist
            exit(1)

      # TODO: Check against an actual list of variables from the set
      if args.vars != None:
         self._opts['vars'] = args.vars

      # If --yearly is set, then we will add 'ANN' to the list of climatologies
      if(args.yearly == True):
         self._opts['yearly'] = True
         self._opts['times'].append('ANN')

      # If --monthly is set, we add all months to the list of climatologies
      if(args.monthly == True):
         self._opts['monthly'] = True
         self._opts['times'].extend(all_months)

      # If --seasonally is set, we add all 4 seasons to the list of climatologies
      if(args.seasonally == True):
         self._opts['seasonally'] = True
         self._opts['times'].extend(all_seasons)

      # This allows specific individual months to be added to the list of climatologies
      if(args.months != None):
         if(args.monthly == True):
            print "Please specify just one of --monthly or --months"
            quit()
         else:
            mlist = [x for x in all_months if x in args.months]
            self._opts['times'] = self._opts['times']+mlist

      # This allows specific individual years to be added to the list of climatologies.
      # Note: Checkign for valid input is impossible until we look at the dataset
      # This has to be special cased since typically someone will be saying
      # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
      if(args.years != None):
         if(args.yearly == True):
            print "Please specify just one of --yearly or --years"
            quit()
         else:
            self._opts['years'] = args.years

      if(args.seasons != None):
         if(args.seasonally == True):
            print "Please specify just one of --seasonally or --seasons"
            quit()
         else:
            slist = [x for x in all_seasons if x in args.seasons]
            self._opts['times'] = self._opts['times']+slist
def monthlyAvg(variable, indir, outdir, minYear=2006, maxYear=2059):
    nodata=1.e20
    minVar=273.15 - 40
    maxVar=273.15 + 100
    unitsAvg=None
    # assume data are aligned
    pattern=re.compile('.*_BNU-ESM_.*') # problem on this grid (use shiftGrid to create a new version, discard this one).
    # maskpattern = re.compile('.*EC-EARTH.*') # nodate was set to 273.15
    
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(3)

    print minYear, maxYear

    dateList=[]
    for iyear in range(minYear, maxYear+1):
        for imonth in range(1,13):
            dateList.append('{0}{1:02}'.format(iyear,imonth))

    for idate in dateList:
        print 'processing date ',idate
        # get list of files for this iyear, excluding one file:
        lstFiles = [f for f in glob.glob(indir+'/{0}_*{2}*_{1}.nc'.format(variable, idate,select)) if not pattern.match(f) ]
        print indir+'/{0}_*{2}*_{1}.nc'.format(variable, idate,select)

        if len(lstFiles) > 1:
            print 'Model ensemble mean for date {0} with {1} files'.format(idate, len(lstFiles))
            # accumulate data
            accumVar=None
            accumN=None

            for iFile in lstFiles:
                print 'processing file ', iFile
                thisFile = cdms2.open(iFile)
                dimVar = numpy.squeeze(thisFile[variable][:]).shape # remove time-single dimension if exists
                thisVar = numpy.ravel(thisFile[variable][:])
 
                if accumVar is None:
                    accumVar  = numpy.zeros( dimVar[0]*dimVar[1] ) + nodata
                    accumN    = numpy.zeros( dimVar[0]*dimVar[1] )
                    unitsAvg  = thisFile[variable].units
                    oneMatrix = numpy.ones(dimVar[0]*dimVar[1])
                    maxEnsemble = thisVar.copy()
                    minEnsemble = thisVar.copy()

                # add to accumVar if accumVar is not no-data, and incoming data are in the range
                wtadd = (thisVar >= minVar ) * (thisVar <= maxVar) * (accumVar < nodata)
                # if the value in accumVar is no-data, replace it.
                wtreplace = (thisVar >= minVar ) * (thisVar <= maxVar) * ( accumVar >= nodata)
                # min, max
                wmax = (thisVar >= maxEnsemble) * (thisVar < nodata) * (thisVar >= minVar) * (thisVar <= maxVar)
                wmaxReplace = (maxEnsemble >= nodata) * (thisVar < nodata) * (thisVar >= minVar)
                wmin = (thisVar <= minEnsemble) * (thisVar >= minVar) * (thisVar <= maxVar) * (maxEnsemble < nodata)
                wminReplace = (minEnsemble >= nodata) * (thisVar < nodata) * (thisVar >= minVar)
                if wtadd.any():
                    accumVar[wtadd] = accumVar[wtadd] + thisVar[wtadd]
                    accumN[wtadd] = accumN[wtadd] + oneMatrix[wtadd]
                if wtreplace.any():
                    accumVar[wtreplace] = thisVar[wtreplace]
                    accumN[wtreplace] = oneMatrix[wtreplace]
                if wmax.any():
                    maxEnsemble[wmax] = thisVar[wmax]
                if wmin.any():
                    minEnsemble[wmin] = thisVar[wmin]
                if wmaxReplace.any():
                    maxEnsemble[wmaxReplace] = thisVar[wmaxReplace]
                if wminReplace.any():
                    minEnsemble[wminReplace] = thisVar[wminReplace]

                thisFile.close()

            # now compute the average, where accumN is not 0
            wnz = accumN > 0
            average = numpy.zeros(dimVar[0] * dimVar[1]) + nodata
            if wnz.any():
                average[wnz] = accumVar[wnz] / accumN[wnz]

            # and let's compute the std
            std = numpy.zeros(dimVar[0] * dimVar[1]) + nodata
            stdN = numpy.zeros(dimVar[0] * dimVar[1])
            for iFile in lstFiles:
                thisFile = cdms2.open(iFile)
                thisVar = numpy.ravel(thisFile[variable][:])
                wtadd = (thisVar < nodata ) * (average < nodata ) * (thisVar >= minVar) * (thisVar <= maxVar) # average should be clean, no need to implement a 'replace'
                if wtadd.any():
                    std[wtadd] = (average[wtadd] - thisVar[wtadd]) * (average[wtadd] - thisVar[wtadd])
                    stdN[wtadd] = stdN[wtadd] + 1.0
                thisFile.close()

            wtstd = (stdN > 0) * (std < nodata)
            std[wtstd] = numpy.sqrt( std[wtstd]/stdN[wtstd] )

            # save to disk
            outfilename='{0}/modelmean_{1}_{2}.nc'.format(outdir, variable, idate)
            (referenceGrid, latAxis, lonAxis, latBounds, lonBounds) = makeGrid()
            avgOut = cdms2.createVariable(numpy.reshape(average,dimVar), typecode='f', id=variable, fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='model average for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            avgOut.setAxisList((latAxis, lonAxis))

            accumOut = cdms2.createVariable(numpy.reshape(accumN,dimVar), typecode='i', id='count_{0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='count of valid for {0} at date {1}'.format(variable, idate), units=None))
            accumOut.setAxisList((latAxis, lonAxis))

            maxEns = cdms2.createVariable(numpy.reshape(maxEnsemble,dimVar), typecode='f', id='max {0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='max ensemble for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            maxEns.setAxisList((latAxis, lonAxis))

            minEns = cdms2.createVariable(numpy.reshape(minEnsemble,dimVar), typecode='f', id='min {0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='min ensemble for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            minEns.setAxisList((latAxis, lonAxis))

            stdVar = cdms2.createVariable(numpy.reshape(std,dimVar), typecode='f', id='std_{0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='model std for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            stdVar.setAxisList((latAxis, lonAxis))

            if os.path.exists(outfilename): os.remove(outfilename)
            print 'saving to file ', outfilename
            outfile = cdms2.open(outfilename, 'w')
            outfile.write(avgOut)
            outfile.write(accumOut)
            outfile.write(minEns)
            outfile.write(maxEns)
            outfile.write(stdVar)
            outfile.history='Created with '+__file__.encode('utf8')
            outfile.close()
예제 #21
0
@author: durack1
"""

import datetime,gc,os,sys
import cdms2 as cdm
import cdutil as cdu
import numpy as np
import MV2 as mv
sys.path.append('/export/durack1/git/durolib/durolib')
from durolib import globalAttWrite

# netCDF compression (use 0 for netCDF3)
cdm.setNetcdfShuffleFlag(1)
cdm.setNetcdfDeflateFlag(1)
cdm.setNetcdfDeflateLevelFlag(9) ; # 9(shuf=1) 466.6KB; 9(shuf=0) 504.1KB; 4(shuf=0) 822.93KB;
cdm.setAutoBounds(1)

#%% Change directory
os.chdir('/work/durack1/Shared/obs_data/WOD18/')
sourceDir = '190312'

#%%
#del(asc,count,depths,e,lat_ind,latitude,line,lon_ind,longitude,pi,tmp)
#del(asc,count,depths,lat_ind,latitude,line,lon_ind,longitude,tmp)

#%% Declare two grids
grids = {'1deg':'01','0p25deg':'04'}

for count,grid in enumerate(grids):
    gridId = grid
예제 #22
0
파일: base.py 프로젝트: PCMDI/pcmdi_metrics
 def setup_cdms2(self):
     cdms2.setNetcdfShuffleFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateLevelFlag(0)  # Argument is int between 0 and 9
예제 #23
0
    def processCmdLine(self):
        parser = argparse.ArgumentParser(
            description="UV-CDAT Climate Modeling Diagnostics", usage="%(prog)s --path1 [options]"
        )

        parser.add_argument(
            "--path",
            "-p",
            action="append",
            nargs=1,
            help="Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2.",
        )
        parser.add_argument("--path2", "-q", action="append", nargs=1, help="Path to a second dataset.")
        parser.add_argument("--obspath", action="append", nargs=1, help="Path to an observational dataset")
        parser.add_argument("--cachepath", nargs=1, help="Path for temporary and cachced files. Defaults to /tmp")
        #      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
        #         help="The realm type. Current valid options are 'land' and 'atmosphere'")
        parser.add_argument(
            "--filter",
            "-f",
            nargs=1,
            help="A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices.",
        )
        parser.add_argument(
            "--filter2",
            "-g",
            nargs=1,
            help="A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices.",
        )
        parser.add_argument(
            "--new_filter",
            "-F",
            action="append",
            nargs=1,
            help="A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices.",
        )
        parser.add_argument(
            "--packages",
            "--package",
            "-k",
            nargs="+",
            help="The diagnostic packages to run against the dataset(s). Multiple packages can be specified.",
        )
        parser.add_argument(
            "--sets",
            "--set",
            "-s",
            nargs="+",
            help="The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package",
        )
        parser.add_argument(
            "--vars",
            "--var",
            "-v",
            nargs="+",
            help="Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL",
        )
        parser.add_argument(
            "--list",
            "-l",
            nargs=1,
            choices=["sets", "vars", "variables", "packages", "seasons", "regions", "translations", "options"],
            help="Determine which packages, sets, regions, variables, and variable options are available",
        )
        # maybe eventually add compression level too....
        parser.add_argument(
            "--compress",
            nargs=1,
            choices=["no", "yes"],
            help="Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools",
        )  # no compression, add self state

        parser.add_argument(
            "--outputpre",
            nargs=1,
            help="Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc",
        )
        parser.add_argument(
            "--outputpost",
            nargs=1,
            help="Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc",
        )
        parser.add_argument("--outputdir", "-O", nargs=1, help="Directory in which output files will be written.")

        parser.add_argument(
            "--seasons", nargs="+", choices=all_seasons, help="Specify which seasons to generate climatoogies for"
        )
        parser.add_argument("--years", nargs="+", help="Specify which years to include when generating climatologies")
        parser.add_argument(
            "--months", nargs="+", choices=all_months, help="Specify which months to generate climatologies for"
        )
        parser.add_argument(
            "--climatologies",
            "-c",
            nargs=1,
            choices=["no", "yes"],
            help="Specifies whether or not climatologies should be generated",
        )
        parser.add_argument(
            "--plots", "-t", nargs=1, choices=["no", "yes"], help="Specifies whether or not plots should be generated"
        )
        parser.add_argument("--plottype", nargs=1)
        parser.add_argument(
            "--precomputed",
            nargs=1,
            choices=["no", "yes"],
            help="Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc",
        )
        parser.add_argument(
            "--json",
            "-j",
            nargs=1,
            choices=["no", "yes"],
            help="Produce JSON output files as part of climatology/diags generation",
        )  # same
        parser.add_argument(
            "--netcdf",
            "-n",
            nargs=1,
            choices=["no", "yes"],
            help="Produce NetCDF output files as part of climatology/diags generation",
        )  # same
        parser.add_argument(
            "--xml",
            "-x",
            nargs=1,
            choices=["no", "yes"],
            help="Produce XML output files as part of climatology/diags generation",
        )
        parser.add_argument(
            "--seasonally",
            action="store_true",
            help="Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons",
        )
        parser.add_argument("--monthly", action="store_true", help="Produce climatologies for all predefined months")
        parser.add_argument(
            "--yearly", action="store_true", help="Produce annual climatogolies for all years in the dataset"
        )
        parser.add_argument(
            "--timestart", nargs=1, help="Specify the starting time for the dataset, such as 'months since Jan 2000'"
        )
        parser.add_argument(
            "--timebounds",
            nargs=1,
            choices=["daily", "monthly", "yearly"],
            help="Specify the time boudns for the dataset",
        )
        parser.add_argument(
            "--verbose",
            "-V",
            action="count",
            help="Increase the verbosity level. Each -v option increases the verbosity more.",
        )  # count
        parser.add_argument(
            "--name", action="append", nargs=1, help="Specify option names for the datasets for plot titles, etc"
        )  # optional name for the set
        # This will be the standard list of region names NCAR has
        parser.add_argument(
            "--regions",
            "--region",
            nargs="+",
            choices=all_regions.keys(),
            help="Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'",
        )
        parser.add_argument("--starttime", nargs=1, help="Specify a start time in the dataset")
        parser.add_argument("--endtime", nargs=1, help="Specify an end time in the dataset")
        parser.add_argument(
            "--translate",
            nargs="?",
            default="y",
            help="Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1",
        )
        parser.add_argument("--varopts", nargs="+", help="Variable auxillary options")

        args = parser.parse_args()

        if args.list != None:
            if args.list[0] == "translations":
                print "Default variable translations: "
                self.listTranslations()
                quit()
            if args.list[0] == "regions":
                print "Available geographical regions: ", all_regions.keys()
                quit()

            if args.list[0] == "seasons":
                print "Available seasons: ", all_seasons
                quit()

            if args.list[0] == "packages":
                print "Listing available packages:"
                print self.all_packages.keys()
                quit()

            if args.list[0] == "sets":
                if args.packages == None:
                    print "Please specify package before requesting available diags sets"
                    quit()
                for p in args.packages:
                    print "Avaialble sets for package ", p, ":"
                    sets = self.listSets(p)
                    keys = sets.keys()
                    for k in keys:
                        print "Set", k, " - ", sets[k]
                quit()

            if args.list[0] == "variables" or args.list[0] == "vars":
                if args.path != None:
                    for i in args.path:
                        self._opts["path"].append(i[0])
                else:
                    print "Must provide a dataset when requesting a variable listing"
                    quit()
                self.listVariables(args.packages, args.sets)
                quit()
            if args.list[0] == "options":
                if args.path != None:
                    for i in args.path:
                        self._opts["path"].append(i[0])
                else:
                    print "Must provide a dataset when requesting a variable listing"
                    quit()
                self.listVarOptions(args.packages, args.sets, args.vars)
                quit()

        # Generally if we've gotten this far, it means no --list was specified. If we don't have
        # at least a path, we should exit.
        if args.path != None:
            for i in args.path:
                self._opts["path"].append(i[0])
        else:
            print "Must specify a path or the --list option at a minimum."
            print 'For help, type "diags --help".'
            quit()
        if args.path2 != None:
            for i in args.path2:
                self._opts["path2"].append(i[0])

        if args.obspath != None:
            for i in args.obspath:
                self._opts["obspath"].append(i[0])

        # TODO: Should some pre-defined filters be "nameable" here?
        if args.filter != None:  # Only supports one filter argument, see filter2.
            self._opts["filter"] = args.filter[0]
            self._opts["user_filter"] = True
        #         for i in args.filter:
        #            self._opts['filter'].append(i[0])
        if args.filter2 != None:  # This is a second filter argument.
            self._opts["filter2"] = args.filter2[0]
            self._opts["user_filter"] = True
        if args.new_filter != None:  # like filter but with multiple arguments
            for i in args.new_filter:
                self._opts["new_filter"].append(i[0])

        if args.cachepath != None:
            self._opts["cachepath"] = args.cachepath[0]

        self._opts["seasonally"] = args.seasonally
        self._opts["monthly"] = args.monthly

        if args.starttime != None:
            self._opts["start"] = args.starttime[0]

        if args.endtime != None:
            self._opts["end"] = args.endtime[0]

        # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
        # they are still set after you set them once in the python process.
        if args.compress != None:
            if args.compress[0] == "no":
                self._opts["compress"] = False
            else:
                self._opts["compress"] = True

        if self._opts["compress"] == True:
            print "Enabling compression for output netCDF files"
            cdms2.setNetcdfShuffleFlag(1)
            cdms2.setNetcdfDeflateFlag(1)
            cdms2.setNetcdfDeflateLevelFlag(9)
        else:
            print "Disabling compression for output netCDF files"
            cdms2.setNetcdfShuffleFlag(0)
            cdms2.setNetcdfDeflateFlag(0)
            cdms2.setNetcdfDeflateLevelFlag(0)

        if args.json != None:
            if args.json[0] == "no":
                self._opts["json"] = False
            else:
                self._opts["json"] = True
        if args.xml != None:
            if args.xml[0] == "no":
                self._opts["xml"] = False
            else:
                self._opts["xml"] = True

        if args.netcdf != None:
            if args.netcdf[0] == "no":
                self._opts["netcdf"] = False
            else:
                self._opts["netcdf"] = True

        if args.plots != None:
            if args.plots[0].lower() == "no" or args.plots[0] == 0:
                self._opts["plots"] = False
            else:
                self._opts["plots"] = True

        if args.climatologies != None:
            if args.climatologies[0] == "no":
                self._opts["climatologies"] = False
            else:
                self._opts["climatologies"] = True

        self._opts["verbose"] = args.verbose

        if args.name != None:
            for i in args.name:
                self._opts["dsnames"].append(i[0])

        # Help create output file names
        if args.outputpre != None:
            self._opts["outputpre"] = args.outputpre[0]
        if args.outputpost != None:
            self._opts["outputpost"] = args.outputpost[0]

        # Output directory
        if args.outputdir != None:
            if not os.path.isdir(args.outputdir[0]):
                print "ERROR, output directory", args.outputdir[0], "does not exist!"
                quit()
            self._opts["outputdir"] = args.outputdir[0]

        if args.translate != "y":
            print args.translate
            print self._opts["translate"]
            quit()
        # Timestart assumes a string like "months since 2000". I can't find documentation on
        # toRelativeTime() so I have no idea how to check for valid input
        # This is required for some of the land model sets I've seen
        if args.timestart != None:
            self._opts["reltime"] = args.timestart

        # cdutil.setTimeBounds{bounds}(variable)
        if args.timebounds != None:
            self._opts["bounds"] = args.timebounds

        # Check if a user specified package actually exists
        # Note: This is case sensitive.....
        if args.packages != None:
            plist = []
            for x in args.packages:
                if x.upper() in self.all_packages.keys():
                    plist.append(x)
                elif x in self.all_packages.keys():
                    plist.append(x.lower())

            if plist == []:
                print "Package name(s) ", args.packages, " not valid"
                print "Valid package names: ", self.all_packages.keys()
                quit()
            else:
                self._opts["packages"] = plist

        # TODO: Requires exact case; probably make this more user friendly and look for mixed case
        if args.regions != None:
            rlist = []
            for x in args.regions:
                if x in all_regions.keys():
                    rlist.append(x)
            print "REGIONS: ", rlist
            self._opts["regions"] = rlist

        # Given user-selected packages, check for user specified sets
        # Note: If multiple packages have the same set names, then they are all added to the list.
        # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
        if self._opts["packages"] == None and args.sets != None:
            print "No package specified"
            self._opts["sets"] = args.sets

        if args.sets != None and self._opts["packages"] != None:
            # unfortuantely, we have to go through all of this....
            # there should be a non-init of the class method to list sets/packages/etc,
            # ie a dictionary perhaps?
            sets = []
            import metrics.fileio.filetable as ft
            import metrics.fileio.findfiles as fi
            import metrics.packages.diagnostic_groups

            package = self._opts["packages"]
            if package[0].lower() == "lmwg":
                import metrics.packages.lmwg.lmwg
            elif package[0].lower() == "amwg":
                import metrics.packages.amwg.amwg
            dtree = fi.dirtree_datafiles(self, pathid=0)
            filetable = ft.basic_filetable(dtree, self)
            dm = metrics.packages.diagnostic_groups.diagnostics_menu()

            pclass = dm[package[0].upper()]()

            slist = pclass.list_diagnostic_sets()
            keys = slist.keys()
            keys.sort()
            for k in keys:
                fields = k.split()
                for user in args.sets:
                    if user == fields[0]:
                        sets.append(user)
            self._opts["sets"] = sets
            if sets != args.sets:
                print "sets requested ", args.sets
                print "sets available: ", slist
                exit(1)

        # check for some varopts first.
        if args.varopts != None:
            self._opts["varopts"] = args.varopts
        # Add some hackery here to convert pressure level vars to var+varopts
        if args.vars != None:
            self._opts["vars"] = args.vars

            vpl = ["Z3_300", "Z3_500", "U_200", "T_200", "T_850"]
            vl = list(set(args.vars) - set(vpl))
            if vl == args.vars:  # no pressure level vars made it this far.
                print "No pressure level vars found in input vars list."
            else:  # more complicated....
                print "Pressure level vars found in input vars list.... Processing...."
                vopts = []
                if (
                    self._opts["varopts"] != [] and self._opts["varopts"] != None
                ):  # hopefully the user didn't also specify varopts....
                    print "User passed in varopts but there are pressure-level variables in the vars list."
                    print "This will append the pressure levels found to the varopts array"
                    # see which pressure level vars were passed. this will be the super set of pressure levels.
                if "Z3_300" in self._opts["vars"]:
                    vopts.append("300")
                    self._opts["vars"] = [x.replace("Z3_300", "Z3") for x in self._opts["vars"]]
                if "Z3_500" in self._opts["vars"]:
                    vopts.append("500")
                    self._opts["vars"] = [x.replace("Z3_500", "Z3") for x in self._opts["vars"]]
                if "T_200" in self._opts["vars"]:
                    vopts.append("200")
                    self._opts["vars"] = [x.replace("T_200", "T") for x in self._opts["vars"]]
                if "T_850" in self._opts["vars"]:
                    vopts.append("850")
                    self._opts["vars"] = [x.replace("T_850", "T") for x in self._opts["vars"]]
                if "U_200" in self._opts["vars"]:
                    vopts.append("200")
                    self._opts["vars"] = [x.replace("U_200", "U") for x in self._opts["vars"]]
                vopts = list(set(vopts))
                if self._opts["varopts"] == [] or self._opts["varopts"] == None:
                    self._opts["varopts"] = vopts
                else:
                    self._opts["varopts"].extend(vopts)
                    self._opts["varopts"] = list(set(self._opts["varopts"]))
                print "Updated vars list: ", self._opts["vars"]

        # If --yearly is set, then we will add 'ANN' to the list of climatologies
        if args.yearly == True:
            self._opts["yearly"] = True
            self._opts["times"].append("ANN")

        # If --monthly is set, we add all months to the list of climatologies
        if args.monthly == True:
            self._opts["monthly"] = True
            self._opts["times"].extend(all_months)

        # If --seasonally is set, we add all 4 seasons to the list of climatologies
        if args.seasonally == True:
            self._opts["seasonally"] = True
            self._opts["times"].extend(all_seasons)

        # This allows specific individual months to be added to the list of climatologies
        if args.months != None:
            if args.monthly == True:
                print "Please specify just one of --monthly or --months"
                quit()
            else:
                mlist = [x for x in all_months if x in args.months]
                self._opts["times"] = self._opts["times"] + mlist

        # This allows specific individual years to be added to the list of climatologies.
        # Note: Checkign for valid input is impossible until we look at the dataset
        # This has to be special cased since typically someone will be saying
        # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
        if args.years != None:
            if args.yearly == True:
                print "Please specify just one of --yearly or --years"
                quit()
            else:
                self._opts["years"] = args.years

        if args.seasons != None:
            if args.seasonally == True:
                print "Please specify just one of --seasonally or --seasons"
                quit()
            else:
                slist = [x for x in all_seasons if x in args.seasons]
                self._opts["times"] = self._opts["times"] + slist
# XMLS PRODUCED BY THIS CODES COMBINE THESE SO THAT THE MONTHLY CLIMATOLOGICAL ANNUAL CYCLE CAN BE READ BY A SINGLE (XML) FILE
# THE VARIABLES "filenamea" AND "filenameb" BELOW ARE NOT LIKELY TO WORK FOR THE GENERAL CASE.  THE LOGIC TO TRAP months "01", "02"... and "10", "11" and "12" MAY NEED TO BE MODIFIED DEPENDING ON THE FILES AVAILABLE.

# LAST UPDATE 6/29/16 PJG

####

import cdms2 as cdms
import os, string
import time
import sys
import argparse

# Set cdms preferences - no compression, no shuffling, no complaining
cdms.setNetcdfDeflateFlag(1)
cdms.setNetcdfDeflateLevelFlag(9)
# 1-9, min to max - Comes at heavy IO (read/write time cost)
cdms.setNetcdfShuffleFlag(0)
cdms.setCompressionWarnings(0)
# Turn off nag messages
# Set bounds automagically
#cdm.setAutoBounds(1) ; # Use with caution

parser = argparse.ArgumentParser(
    description=
    'Given a list of directories with simulation clims, use cdscan to produce xmls for annual cycle climatologies '
)
parser.add_argument(
    "-b",
    "--basedir",
    help=
예제 #25
0
import argparse
import string
import numpy as npy
import numpy.ma as ma
import cdutil as cdu
from genutil import statistics
import support_density as sd
import time as timc
import timeit
#import matplotlib.pyplot as plt
#
# netCDF compression (use 0 for netCDF3)
comp = 0
cdm.setNetcdfShuffleFlag(comp)
cdm.setNetcdfDeflateFlag(comp)
cdm.setNetcdfDeflateLevelFlag(comp)
cdm.setAutoBounds('on')
#
# == Arguments
#
# 
# == Inits
#
home   = os.getcwd()
outdir = os.getcwd()
hist_file_dir=home
#
# == Arguments
#
# == get command line options
parser = argparse.ArgumentParser(description='Script to perform density bining analysis')
import cdms2

###################################### 
value = 0 
cdms2.setNetcdfShuffleFlag(value)
cdms2.setNetcdfDeflateFlag(value)
cdms2.setNetcdfDeflateLevelFlag(value)
######################################

### EXECUTE MODULE WITH VARIOUS FUNCTIONS
execfile('modules_and_functions/misc_module.py')
execfile('modules_and_functions/getOurModelData.py')

### OPTIONS FOR REGRIDDING: METHOD AND TARGET GRID
exp = 'cmip5'
rgridMeth = 'regrid2'
targetGrid = '4x5'
targetGrid = '2.5x2.5'

### OUTPUT DIRECTORY
outdir = '/work/metricspackage/130522/data/inhouse_model_clims/samplerun/atm/mo/ac/'
## SEE END OF THIS CODE FOR OUTPUT FILENAMES

### VARIABLES TO LOOP OVER (NAMES ASSUMED TO BE CONSISTENT WITH CMIP5)
vars = ['rlut','pr']

######################################

############# GET OBS TARGET GRID
obsg = get_target_grid(targetGrid)
############# 
예제 #27
0
    def write_plot_data( self, format="", where="" ):
        """Writes the plot's data in the specified file format and to the location given."""
        if format=="" or format=="NetCDF" or format=="NetCDF file":
            format = "NetCDF file"
        elif format=="JSON string":
            pass
        elif format=="JSON file":
            pass
        else:
            logger.warning("write_plot_data cannot recognize format name %s",format)
            logger.warning("will write a NetCDF file.")
            format = "NetCDF file"

        filename = self.outfile( format, where )

        if format=="NetCDF file":
            value=0
            cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

            writer = cdms2.open( filename, 'w' )    # later, choose a better name and a path!
            store_provenance(writer)
        elif format=="JSON file":
            logger.error("JSON file not implemented yet")
        elif format=="JSON string":
            return json.dumps(self,cls=DiagsEncoder)

        writer.source = "UV-CDAT Diagnostics"
        writer.presentation = self.ptype
        plot_these = []
        for zax in self.vars:
            try:
                if not hasattr(zax,'filetableid'):
                    zax.filetableid = zax.filetable.id()
                del zax.filetable  # we'll write var soon, and can't write a filetable
                if hasattr(zax,'filetable2'):
                    zax.filetable2id = zax.filetable2.id()
                    del zax.filetable2 # we'll write var soon, and can't write a filetable
            except:
                pass
            try:
                zax._filetableid= zax.filetableid  # and the named tuple ids aren't writeable as such
                zax.filetableid= str(zax.filetableid)  # and the named tuple ids aren't writeable as such
            except:
                pass
            try:
                zax._filetable2id= zax.filetable2id  # and the named tuple ids aren't writeable as such
                zax.filetable2id= str(zax.filetable2id)  # and the named tuple ids aren't writeable as such
            except:
                pass
            for ax in zax.getAxisList():
                try:
                    del ax.filetable
                except:
                    pass
            writer.write( zax )
            plot_these.append( str(seqgetattr(zax,'id','')) )
        writer.plot_these = ' '.join(plot_these)
        # Once the finalized method guarantees that varmax,varmin are numbers...
        #if self.finalized==True:
        #    writer.varmax = self.varmax
        #    writer.varmin = self.varmin

        writer.close()
        return [filename]
예제 #28
0
def surfTransf(fileFx, fileTos, fileSos, fileHef, fileWfo, varNames, outFile, debug=True, timeint='all',noInterp=False, domain='global'):
    '''
    The surfTransf() function takes files and variable arguments and creates
    density bined surface transformation fields which are written to a specified outfile
    Author:    Eric Guilyardi : [email protected]
    Co-author: Paul J. Durack : [email protected] : @durack1.
    
    Created on Wed Oct  8 09:15:59 CEST 2014

    Inputs:
    ------
    - fileTos(time,lat,lon)     - 3D SST array
    - fileSos(time,lat,lon)     - 3D SSS array
    - fileHef(time,lat,lon)     - 3D net surface heat flux array
    - fileWfo(time,lat,lon)     - 3D fresh water flux array
    - fileFx(lat,lon)           - 2D array containing the cell area values
    - varNames[4]               - 1D array containing the names of the variables
    - outFile(str)              - output file with full path specified.
    - debug <optional>          - boolean value
    - timeint <optional>        - specify temporal step for binning <init_idx>,<ncount>
    - noInterp <optional>       - if true no interpolation to target grid
    - domain <optional>         - specify domain for averaging when interpolated to WOA grid ('global','north',
                                  'north40', 'south' for now)

    Outputs:
    --------
    - netCDF file with monthly surface rhon, density fluxes, transformation (global and per basin)
    - use cdo yearmean to compute annual mean

    Usage:
    ------
    '>>> from binDensity import surfTransf
    '>>> surfTransf(file_fx, file_tos, file_sos, file_hef, file_wfo, [var1,var2,var3,var4]./output.nc, debug=True,timeint='all')

    Notes:
    -----
    - EG   8 Oct 2014   - Initial function write and tests ok
    - PJD 22 Nov 2014   - Code cleanup
    - EG   4 Oct 2017   - code on ciclad, more cleanup and options
    - EG  12 Sep 2018   - Add North vs. South calculation

    '''
    # Keep track of time (CPU and elapsed)
    cpu0 = timc.clock()
    #
    # netCDF compression (use 0 for netCDF3)
    comp = 1
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # 
    # == Inits
    #
    npy.set_printoptions(precision = 2)
    # Determine file name from inputs
    modeln = fileTos.split('/')[-1].split('.')[1]
    #
    if debug:
        print ' Debug - File names:'
        print '    ', fileTos
        print '    ', fileSos
        debugp = True
    else:
        debugp = False
    #
    # Open files
    ftos  = cdm.open(fileTos)
    fsos  = cdm.open(fileSos)
    fhef  = cdm.open(fileHef)
    fwfo  = cdm.open(fileWfo)
    #timeax = ftos.getAxis('time')
    timeax = ftos.getAxis('time_counter')
    #print 'timeax'
    #print timeax
    #
    # Dates to read
    if timeint == 'all':
        tmin = 0
        tmax = timeax.shape[0]
        timeaxis = timeax
    else:
        tmin = int(timeint.split(',')[0]) - 1
        tmax = tmin + int(timeint.split(',')[1])
        # update time axis
        timeaxis   = cdm.createAxis(timeax[tmin:tmax])
        timeaxis.id       = 'time'
        timeaxis.units    = timeax.units
        timeaxis.designateTime()
        #print timeaxis

    if debugp:
        print; print ' Debug mode'
 
    # Read file attributes to carry on to output files
    list_file   = ftos.attributes.keys()
    file_dic    = {}
    for i in range(0,len(list_file)):
        file_dic[i]=list_file[i],ftos.attributes[list_file[i] ]
    #
    # Read data
        
    # varnames
    tos_name = varNames[0]
    sos_name = varNames[1]
    hef_name = varNames[2]
    wfo_name = varNames[3]

    if debugp:
        print' Read ',tos_name, sos_name,tmin,tmax
    tos = ftos(tos_name , time = slice(tmin,tmax))
    sos = fsos(sos_name , time = slice(tmin,tmax))
    if debugp:
        print' Read ',hef_name, wfo_name
    qnet = fhef(hef_name, time = slice(tmin,tmax))
    try:
        emp  = fwfo(wfo_name , time = slice(tmin,tmax))
        empsw = 0
    except Exception,err:
        emp  = fwfo('wfos' , time = slice(tmin,tmax))
        print ' Reading concentration dillution fresh water flux'
        empsw = 0
예제 #29
0
#!/usr/local/cdat5.2/bin/python
"""Module for computing precipitation extreme stats mostly using CDO utilities"""

from sys import exit
from os import path, system, mkdir
from cdms2 import setNetcdfShuffleFlag, setNetcdfDeflateLevelFlag, setNetcdfDeflateFlag
from string import split
from datetime import datetime
from daily_stats_cdms_utils import MosaicFiles

setNetcdfShuffleFlag(0)
setNetcdfDeflateFlag(0)
setNetcdfDeflateLevelFlag(0)

RootDir = '/mnt/BCSD'

OUTROOT = '/mnt/out_stats'
if not path.isdir(OUTROOT):
    mkdir(OUTROOT)

OUTTEMP = '/home/edarague'
if not path.isdir(OUTTEMP):
    mkdir(OUTTEMP)

# added as fgobal institution attribute to output files
txtinst = "Santa Clara U.,Climate Central,The Nature Conservancy,International Center for Tropical Agriculture"

# input files are on 0->360 longitude convention. To switch to a -180->180 grid:
# cdo sellonlatbox,-180,180,-90,90 ifile ofile
# which works for global domains only.For smaller domains:
# cdo griddes ifile > mygrid ; then edit mygrid and set xfirst to the new value
예제 #30
0
from pywps.Process import WPSProcess
import os,numpy,sys
import logging, json
import cdms2
import random
from pywps import config
import ConfigParser
# Path where output will be stored/cached

cdms2.setNetcdfShuffleFlag(0) ## where value is either 0 or 1
cdms2.setNetcdfDeflateFlag(0) ## where value is either 0 or 1
cdms2.setNetcdfDeflateLevelFlag(0) ## where value is a integer between 0 and 9 included

wps_config = ConfigParser.ConfigParser()
wps_config.read(os.path.join(os.path.dirname(__file__),"..","wps.cfg"))
try:
    DAP_DATA = wps_config.get("dapserver","dap_data")
except:
    warnings.warn("Could not READ DAP_DATA from wps.cfg will store files in /tmp")
    DAP_DATA = "/tmp"
try:
    DAP_INI = wps_config.get("dapserver","dap_ini")
except:
    DAP_INI = None
try:
    DAP_HOST = wps_config.get("dapserver","dap_host")
except:
    DAP_HOST = None
try:
    DAP_PORT = wps_config.get("dapserver","dap_port")
except:
예제 #31
0
@author: durack1
"""
import gc,glob,os,socket,sys
sys.path.append('/export/durack1/git/durolib/durolib')
from durolib import globalAttWrite
from string import replace
import cdms2 as cdm
import genutil as gnu
#import MV2 as mv
import numpy as np

# Set nc classic as outputs
cdm.setNetcdfShuffleFlag(0)
cdm.setNetcdfDeflateFlag(1)
cdm.setNetcdfDeflateLevelFlag(9) ; # Set to auto
cdm.setCompressionWarnings(False)
cdm.setAutoBounds(True)

#%% Set areas
earthAreaKm2 = 510.072e6 ; # Earth area in km^2
earthAreaM2 = earthAreaKm2*1e6 ; # Earth area km^2 -> m^2
earthWaterAreaKm2 = 361.132e6
earthLandAreaKm2 = 148.94e6

#%% Get input grids
os.chdir('/work/durack1/Shared/190213_data_density')
inFiles = glob.glob('*thetao*.xml')

#%% Determine machine and host files
host = socket.gethostname()
예제 #32
0
def correctFile(idxcorr, ncorr, inFile, inDir, outFile, outDir):
    '''
    Correct density binned files (undefined ptop & long 0 issue)
    idxcorr = [idx_i,idx_i1,jmax] indices for longitude correction - if [0,0,0] ignore
    ncorr   = number of corrections: 1 or 2
    '''
    # CDMS initialisation - netCDF compression
    comp = 1 # 0 for no compression
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # Numpy initialisation
    npy.set_printoptions(precision=2)

    varList3D = ['isondepthg','isonthickg', 'sog','thetaog']
    varList2D = ['ptopsoxy','ptopdepthxy','ptopsigmaxy','ptopthetaoxy','persistmxy']

    # First test, read level by level and write level by level (memory management)
    # use ncpdq -a time,lev,lat,lon to recover the dimension order

    fi = cdm.open(inDir+'/'+inFile)
    fo = cdm.open(outDir+'/'+outFile,'w')
    isondg  = fi['isondepthg'] ; # Create variable handle
    # Get grid objects
    #axesList = isondg.getAxisList()
    #sigmaGrd = isondg.getLevel()
    lonN = isondg.shape[3]
    latN = isondg.shape[2]
    levN = isondg.shape[1]
    timN = isondg.shape[0]
    #valmask = isondg.missing_value

    if ncorr == 2:
        ic1 = idxcorr[0][0]
        ic2 = idxcorr[0][1]
        jcmax = idxcorr[0][2]
        ic12 = idxcorr[1][0]
        ic22 = idxcorr[1][1]
        jcmax2 = idxcorr[1][2]
        if ic2 >= lonN-1:
            ic2 = 0
        if ic22 >= lonN-1:
            ic22 = 0
    elif ncorr == 1:
        ic1 = idxcorr[0]
        ic2 = idxcorr[1]
        jcmax = idxcorr[2]
        if ic2 >= lonN-1:
            ic2 = 0
    #print ic1,ic2,jcmax
    corr_long = True
    if ic1 == 0 and ic2 == 0 and jcmax == 0:
        corr_long = False
    #testp = 10
    for it in range(timN):
        #if it/testp*testp == it:
        #    print ' year =',it
        # test
        #i = 90
        #j = 90
        #i2d = 6
        #j2d = 12
        #ij = j*lonN+i
        #ij2d = j2d*lonN+i2d
        #print 'ij=',ij
        # 3D variables
        for iv in varList3D:
            #print iv
            outVar = fi(iv,time = slice(it,it+1))
            # Correct for longitude interpolation issue
            if corr_long:
                for jt in range(jcmax):
                    outVar[:,:,jt,ic1] = (outVar[:,:,jt,ic1-1]+outVar[:,:,jt,ic2+1])/2
                    outVar[:,:,jt,ic2] = outVar[:,:,jt,ic1]
                if ncorr == 2:
                    for jt in range(jcmax2):
                        outVar[:,:,jt,ic12] = (outVar[:,:,jt,ic12-1]+outVar[:,:,jt,ic22+1])/2
                        outVar[:,:,jt,ic22] = outVar[:,:,jt,ic12]
            # Correct Bowl properties
            if iv =='isondepthg':
                vardepth = npy.reshape(outVar,(levN,latN*lonN))
                #print 'test'
                #print outVar[:,:,j2d,i2d]
                #print vardepth[:,ij2d]
                # find values of surface points
                vardepthBowl = npy.min(npy.reshape(outVar,(levN,latN*lonN)),axis=0)
                vardepthBowlTile = npy.repeat(vardepthBowl,levN,axis=0).reshape((latN*lonN,levN)).transpose()
                #print vardepthBowlTile.shape
                #print vardepthBowl[ij2d], vardepthBowlTile[:,ij2d]
                levs = outVar.getAxisList()[1][:]
                #print 'levs',levs
                levs3d  = mv.reshape(npy.tile(levs,latN*lonN),(latN*lonN,levN)).transpose()
                varsigmaBowl = npy.max(npy.where(vardepth == vardepthBowlTile,levs3d,0),axis=0)
                #print varsigmaBowl[ij2d],levs3d[:,ij2d]

            elif iv == 'sog':
                varsog = npy.reshape(outVar,(levN,latN*lonN))
                varsoBowl = npy.max(npy.where(vardepth == vardepthBowlTile,varsog,0),axis=0)
                #print varsoBowl[ij2d], varsog[:,ij2d]
                #print vardepth[:,ij2d],vardepthBowlTile[:,ij2d]
                del (varsog); gc.collect()
            elif iv =='thetaog':
                varthetao = npy.reshape(outVar,(levN,latN*lonN))
                varthetaoBowl = npy.max(npy.where(vardepth == vardepthBowlTile,varthetao,-1000),axis=0)
                #print varthetaoBowl[ij2d],varthetao[:,ij2d]
                del (varthetao); gc.collect()
            # Write
            fo.write(outVar.astype('float32'), extend = 1, index = it)
            fo.sync()
        del (vardepth); gc.collect()
        # 2D variables and correct isondepthg = 0
        for iv in varList2D:
            outVar = fi(iv,time = slice(it,it+1))
            # Correct for longitude interpolation issue
            if corr_long:
                for jt in range(jcmax):
                    outVar[:,jt,ic1] = (outVar[:,jt,ic1-1]+outVar[:,jt,ic2+1])/2
                    outVar[:,jt,ic2] = outVar[:,jt,ic1]
                if ncorr == 2:
                    for jt in range(jcmax2):
                        outVar[:,jt,ic12] = (outVar[:,jt,ic12-1]+outVar[:,jt,ic22+1])/2
                        outVar[:,jt,ic22] = outVar[:,jt,ic12]
            # Correct for ptopsoxy < 30
            #print 'before',outVar[:,j2d,i2d]
            if iv == 'ptopsoxy':
                testso = npy.reshape(outVar,(latN*lonN)) < 30.
                #print 'testdepth', testdepth[ij2d]
                #print npy.argwhere(testdepth)[0:10]/lonN, npy.argwhere(testdepth)[0:10]-npy.argwhere(testdepth)[0:10]/lonN*lonN
                outVar.data[...] = npy.where(testso,varsoBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            elif iv == 'ptopdepthxy':
                outVar.data[...] = npy.where(testso,vardepthBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            elif iv == 'ptopthetaoxy':
                outVar.data[...] = npy.where(testso,varthetaoBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            elif iv == 'ptopsigmaxy':
                outVar.data[...] = npy.where(testso,varsigmaBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            #print 'after',outVar[:,j2d,i2d]

            # Write
            fo.write(outVar.astype('float32'), extend = 1, index = it)
            fo.sync()

    fi.close()
    fo.close()

# testing

#model = 'CCSM4'
#idxcorr=[139,140,145]
#ncorr = 1
#inFile = 'cmip5.CCSM4.historical24.r1i1p1.an.ocn.Omon.density.ver-v20121128.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.CCSM4.historical24.outtest.nc'

#model = 'CanESM2'
#idxcorr=[179,180,180]
#ncorr = 1
#inFile = 'cmip5.CanESM2.historical24.r1i1p1.an.ocn.Omon.density.ver-1.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.CanESM2.historical24.outtest.nc'

#model = 'IPSL-CM5A-LR'
#idxcorr=[0,0,0]
#ncorr=1
#inFile = 'cmip5.IPSL-CM5A-LR.historical24.r1i1p1.an.ocn.Omon.density.ver-v20111119.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.IPSL-CM5A-LR.historical24.outtest.nc'


#model = 'Ishii'
#idxcorr=[[359,359,39],[180,180,180]]
#idxcorr=[359,359,39]
#ncorr = 1
#inFile = 'obs.Ishii.historical.r0i0p0.an.ocn.Omon.density.ver-1.latestX.nc'
#inDir='/Volumes/hciclad/data/Density_binning/Prod_density_obs_april16'
#outFile = 'obs.Ishii.historical.r0i0p0.an.ocn.Omon.density.ver-1.latestXCorr.nc'

#model = 'EN4'
#idxcorr=[[359,359,39],[180,180,180]]
#idxcorr=[359,359,39]
#ncorr = 2
#inFile = 'obs.EN4.historical.r0i0p0.mo.ocn.Omon.density.ver-1.latestX.nc'
#inDir='/Volumes/hciclad/data/Density_binning/Prod_density_obs_april16'
#outFile = 'obs.EN4.historical.r0i0p0.mo.ocn.Omon.density.ver-1.latestXCorr.nc'

#outDir = inDir


#correctFile(idxcorr, ncorr, inFile, inDir, outFile, outDir)
예제 #33
0
#!/usr/bin/env python
# -*- coding:UTF-8 -*-

import os, sys

import cdtime
import cdms2, MV2

import constants as cc

value = 0
cdms2.setNetcdfShuffleFlag(value)  ## where value is either 0 or 1
cdms2.setNetcdfDeflateFlag(value)  ## where value is either 0 or 1
cdms2.setNetcdfDeflateLevelFlag(value)


def f_zcb(zf, zneb):

    time = zneb.getTime()
    lev = zneb.getLevel()
    nt, nlev = zneb.shape

    zcb = MV2.zeros(nt, typecode=MV2.float32) + cc.missing

    if zf[0, 0] > zf[0, 1]:
        lpos = False
    else:
        lpos = True

    for it in range(0, nt):
        lfound = False
예제 #34
0
#!/usr/bin/env python

# Writes a copy of a file with one variable renamed.  Only that one variable, and supporting
# variables such as axes, will be written; any others will be ignored.  Most attributes also
# will be ignored.
# This script is filled with ad-hoc fixes to other data oddities.  Read this before running it!

import sys, argparse, numpy
import cdms2, cdutil
from pprint import pprint
import debug

cdms2.setNetcdfDeflateFlag(0)
cdms2.setNetcdfDeflateLevelFlag(0)
cdms2.setNetcdfShuffleFlag(0)   

def rename_variable( invar, outvar, infn ):
    outfn = '_'.join([outvar,infn])
    print 'Writing from %s to %s' % (infn, outfn)
    fi=cdms2.open(infn)
    fo=cdms2.open(outfn,'w')
    V=fi(invar)
    
    # Various fixups, mostly ad-hoc...

    # Fix up time
    if V.getTime() is not None:
        t=V.getTime()
        ## ad-hoc fix for NCAR data with units in "days since...": put the day in the middle of the month:
        #  t[:] = t[:] - 15.0
        ## For NCAR or other data without time bounds, set them:
예제 #35
0
def compute_and_write_climatologies( varkeys, reduced_variables, season, case='', variant='', path='' ):
    """Computes climatologies and writes them to a file.
    Inputs: varkeys, names of variables whose climatologies are to be computed
            reduced_variables, dict (key:rv) where key is a variable name and rv an instance
               of the class reduced_variable
            season: the season on which the climatologies will be computed
            variant: a string to be inserted in the filename"""
    # Compute the value of every variable we need.
    # This function does not return the variable values, or even keep them.

    # First compute all the reduced variables
    # Probably this loop consumes most of the running time.  It's what has to read in all the data.
    firsttime = True
    for key in varkeys:
        if key in reduced_variables:
            time0 = time.time()
            #print "jfp",time.ctime()
            varval = reduced_variables[key].reduce()
            #print "jfp",time.ctime(),"reduced",key,"in time",time.time()-time0
            pmemusg = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # "maximum resident set size"
            pmemusg = pmemusg / 1024./1024.  # On Linux, should be 1024 for MB
            #print "jfp   peak memory",pmemusg,"MB (GB on Linux)"
            #requires psutil process = psutil.Process(os.getpid())
            #requires psutil mem = process.get_memory_info()[0] / float(2**20)
            #print "jfp   process memory",mem,"MB"
        else:
            continue
        if varval is None:
            continue

        var = reduced_variables[key]
        if firsttime:
            firsttime = False
            if case=='':
                case = getattr( var, 'case', '' )
                if case!='':
                    case = var._file_attributes['case']+'_'
            if case=='':
                case = 'nocase_'
            if variant!='':
                variant = variant+'_'
            filename = case + variant + season + "_climo.nc"
            value=0
            cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

            g = cdms2.open( os.path.join(path,filename), 'w' )    # later, choose a better name and a path!
            # ...actually we want to write this to a full directory structure like
            #    root/institute/model/realm/run_name/season/

        logger.info("writing %s",key,"in climatology file %s",filename)
        varval.id = var.variableid
        varval.reduced_variable=varval.id
        if hasattr(var,'units'):
            varval.units = var.units+'*'+var.units
        g.write(varval)
        for attr,val in var._file_attributes.items():
            if not hasattr( g, attr ):
                setattr( g, attr, val )
    if firsttime:
        logger.error("No variables found.  Did you specify the right input data?")
    else:
        g.season = season
        g.close()
    return case
예제 #36
0
# Python module imports
import os
import shutil
import subprocess
import sys
import cdms2 as cdm
# Add durolib to path
sys.path.insert(1, '/export/durack1/git/pylib')  # Assumes crunchy/oceanonly
from durolib import globalAttWrite


# Set cdms preferences - no compression, no shuffling, no complaining
cdm.setNetcdfDeflateFlag(1)
# 1-9, min to max - Comes at heavy IO (read/write time cost)
cdm.setNetcdfDeflateLevelFlag(9)
cdm.setNetcdfShuffleFlag(0)
cdm.setCompressionWarnings(0)  # Turn off nag messages
# Set bounds automagically
# cdm.setAutoBounds(1) ; # Use with caution

# Set build info once
buildDate = '141126'
outPath = '/work/durack1/Shared/141126_metrics-acme'
# Create input variable lists
uvcdatInstall = ''.join(
    ['/export/durack1/', buildDate, '_pcmdi_metrics/PCMDI_METRICS/bin/'])
# Specify inputs:
#        Realm   ModelId               InputFiles    SourceDirectory
data = [
    ['atmos',
예제 #37
0
 def setup_cdms2(self):
     cdms2.setNetcdfShuffleFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateLevelFlag(0)  # Argument is int between 0 and 9