Example #1
0
def do_avg(infile, inpath, variable, nodata, outfile):
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(3)

    # note that this version will erase data whereever a nodata is found in the series
    avg=None
    nodatamask = None
    for ifile in infile:
        fname = os.path.join(inpath, ifile)
        if not os.path.exists(fname): messageOnExit('file {0} not found on path {1}. Exit(100).'.format(ifile, path), 100)
        thisfile = cdms2.open(fname, 'r')
        
        if avg is None:
            avg = numpy.array(thisfile[variable][:])
            nodatamask = avg >= nodata
        else:
            avg = avg + numpy.array(thisfile[variable][:])
        thisfile.close()

    avg = avg/len(infile)
    if nodatamask.any():
        avg[nodatamask] = nodata
        
    if os.path.exists(outfile): os.remove(outfile)
    outfh = cdms2.open(outfile, 'w')
    outvar=cdms2.createVariable(avg, typecode='f', id=variable, fill_value=nodata )
    outfh.write(outvar)
    outfh.close()
Example #2
0
def save_ncfiles(set_num, test, ref, diff, parameter):
    """Saves the test, reference, and difference nc files."""
    # Save files being plotted
    # Set cdms preferences - no compression, no shuffling, no complaining
    cdms2.setNetcdfDeflateFlag(1)
    # 1-9, min to max - Comes at heavy IO (read/write time cost)
    cdms2.setNetcdfDeflateLevelFlag(0)
    cdms2.setNetcdfShuffleFlag(0)
    cdms2.setCompressionWarnings(0)  # Turn off warning messages
    # Save test file
    pth = get_output_dir(set_num, parameter)
    file_test = cdms2.open(pth + '/' + parameter.output_file + '_test.nc',
                           'w+')
    test.id = parameter.var_id
    file_test.write(test)
    file_test.close()

    # Save reference file
    file_ref = cdms2.open(pth + '/' + parameter.output_file + '_ref.nc', 'w+')
    ref.id = parameter.var_id
    file_ref.write(ref)
    file_ref.close()

    # Save difference file
    file_diff = cdms2.open(pth + '/' + parameter.output_file + '_diff.nc',
                           'w+')
    diff.id = parameter.var_id + '(test - reference)'
    file_diff.write(diff)
    file_diff.close()
Example #3
0
def save_ncfiles(set_num, test, ref, diff, parameter):
    """
    Saves the test, reference, and difference
    data being plotted as nc files.
    """
    if parameter.save_netcdf:
        # Save files being plotted
        # Set cdms preferences - no compression, no shuffling, no complaining
        cdms2.setNetcdfDeflateFlag(1)
        # 1-9, min to max - Comes at heavy IO (read/write time cost)
        cdms2.setNetcdfDeflateLevelFlag(0)
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setCompressionWarnings(0)  # Turn off warning messages

        pth = get_output_dir(set_num, parameter)

        # Save test file
        test.id = parameter.var_id
        test_pth = os.path.join(pth, parameter.output_file + '_test.nc')
        with cdms2.open(test_pth, 'w+') as file_test:
            file_test.write(test)

        # Save reference file
        ref.id = parameter.var_id
        ref_pth = os.path.join(pth, parameter.output_file + '_ref.nc')
        with cdms2.open(ref_pth, 'w+') as file_ref:
            file_ref.write(ref)

        # Save difference file
        diff.id = parameter.var_id + '(test - reference)'
        diff_pth = os.path.join(pth, parameter.output_file + '_diff.nc')
        with cdms2.open(diff_pth, 'w+') as file_diff:
            file_diff.write(diff)
    def testInFileUnlimitedDimAlter(self):

        fnm = os.path.join(cdat_info.get_sampledata_path(), "clt.nc")
        f = cdms2.open(fnm)
        s = f("clt")
        f.close()
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdf4Flag(1)
        cdms2.setNetcdfClassicFlag(1)
        f = cdms2.open("nc4.nc", "w")
        f.write(s)
        f.close()

        timesValues = s.getTime()[:]
        f = cdms2.open("nc4.nc", "r+")
        t = f["time"]
        t[:] = t[:] * 100.
        f.close()

        f = cdms2.open("nc4.nc")
        s = f("clt")
        t = s.getTime()
        self.assertEqual(len(t), len(timesValues))
        self.assertTrue(numpy.allclose(t[:], timesValues * 100.))
        os.remove("nc4.nc")
Example #5
0
def do_transform(infile, outfile, template):
    # for netcdf3:
    cdms2.setNetcdfShuffleFlag(0)
    cdms2.setNetcdfDeflateFlag(0)
    cdms2.setNetcdfDeflateLevelFlag(0)

    with open(infile, mode='rb') as file:
        fileContent = file.read()


    (referenceGrid, latAxis, lonAxis, latBounds, lonBounds)=makeGrid()
    if os.path.exists(outfile):os.remove(outfile)
    fout = cdms2.open(outfile, "w")


    #thisData = struct.unpack(template['read_type'] * ((template['read_nl'] * template['read_ns']) // template['read_type_size']) , fileContent[template['skip_byte']:template['skip_byte']+(template['read_nl']*template['read_ns'])*template['read_type_size']] )
    skip=4*8 

    thisData = numpy.array(struct.unpack('>64800f', fileContent[skip:skip + (180*360)*4]))
    thisVar = cdms2.createVariable(thisData.reshape( (template['read_ns'], template['read_nl']) ), typecode=template['read_type'], id=template['id'], \
                                       fill_value=template['nodata'], grid=referenceGrid, copaxes=1 )
    fout.write(thisVar)

#    thisData2 = numpy.array(struct.unpack('64800B', fileContent[skip + (180*360)*4 : skip + (180*360)*4 + 180*360]))
#    thisVar2 = cdms2.createVariable(thisData2.reshape( (template['read_ns'], template['read_nl']) ), typecode='B', id='ice', \
#                                       fill_value=0, grid=referenceGrid, copaxes=1 )
#    fout.write(thisVar2)

    fout.close()
Example #6
0
    def execute(self): 
        import cdms2, vcs
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)
        start_time = time.time()
        dataIn=self.loadData()[0]
        location = self.loadDomain()
        cdms2keyargs = self.domain2cdms(location)
        url = dataIn["url"]
        id = dataIn["id"]
        var_cache_id =  ":".join( [url,id] )
        dataset = self.loadFileFromURL( url )
        logging.debug( " $$$ Data Request: '%s', '%s' ", var_cache_id, str( cdms2keyargs ) )
        variable = dataset[ id ]

        read_start_time = time.time()
        result_variable = variable(**cdms2keyargs)
        result_data = result_variable.squeeze()[...]
        time_axis = result_variable.getTime()
        read_end_time = time.time()

        x = vcs.init()
        bf = x.createboxfill('new')
        x.plot( result_data, bf, 'default', variable=result_variable, bg=1 )
        x.gif(  OutputPath + '/plot.gif' )

        result_obj = {}

        result_obj['url'] = OutputDir + '/plot.gif'
        result_json = json.dumps( result_obj )
        self.result.setValue( result_json )
        final_end_time = time.time()
        logging.debug( " $$$ Execution time: %f (with init: %f) sec", (final_end_time-start_time), (final_end_time-self.init_time) )
Example #7
0
def save_transient_variables_to_netcdf(set_num, variables_dict, label, parameter):
    """
    Save the transient variables to nc file.
    """
    if parameter.save_netcdf:
        for (variable_name, variable) in variables_dict.items():
            # Set cdms preferences - no compression, no shuffling, no complaining
            cdms2.setNetcdfDeflateFlag(1)
            # 1-9, min to max - Comes at heavy IO (read/write time cost)
            cdms2.setNetcdfDeflateLevelFlag(0)
            cdms2.setNetcdfShuffleFlag(0)
            cdms2.setCompressionWarnings(0)  # Turn off warning messages

            path = get_output_dir(set_num, parameter)
            # Save variable
            try:
                variable.id = parameter.var_id
            except AttributeError:
                print("Could not save variable.id for {}".format(variable_name))
            file_name = "{}_{}_{}.nc".format(
                parameter.output_file, variable_name, label
            )
            test_pth = os.path.join(path, file_name)
            with cdms2.open(test_pth, "w+") as file_test:
                try:
                    file_test.write(variable)
                except AttributeError:
                    print("Could not write variable {}".format(variable_name))
Example #8
0
    def execute(self, test_str, imagefilename, imagethreshold, ncfiles, rtol,
                atol):
        print test_str
        if imagethreshold is None:  # user didn't specify a value
            imagethreshold = regression.defaultThreshold
        # Silence annoying messages about how to set the NetCDF file type.  Anything will do.
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)

        # nonstandard, suitable for testing:
        proc = subprocess.Popen([self.diagstr], shell=True)
        proc_status = proc.wait()
        if proc_status != 0:
            raise DiagError("diags run failed")

        if self.keep:
            print "save ", imagefilename, ncfiles.keys()
            print "output directory is = ", self.outpath
        else:
            # Test of graphics (png) file match:
            # This just looks at combined plot, aka summary plot, which is a compound of three plots.

            imagefname = os.path.join(self.outpath, imagefilename)
            imagebaselinefname = os.path.join(self.baselinepath, imagefilename)
            #pdb.set_trace()
            print "OK THRESHOLD IS:", imagethreshold
            graphics_result = regression.check_result_image(
                imagefname, imagebaselinefname, imagethreshold)
            print "Graphics file", imagefname, "match difference:", graphics_result

            #initialize to successful graphics check
            GR_CLOSE = (graphics_result == 0)
            assert (GR_CLOSE), 'graphic images are not close'

            # Test of NetCDF data (nc) file match:
            NC_CLOSE = True
            for ncfilename, ncvars in ncfiles.items():
                for var in ncvars:
                    #print ncfilename, var
                    try:
                        #print ">>>>>>>>>>>>>", var, ncfilename
                        close = self.closeness(var, ncfilename, rtol, atol)
                        if not close:
                            print var, ' in ', os.path.join(
                                self.outpath, ncfilename
                            ), ' is not close from the one in:', os.path.join(
                                self.baselinepath, ncfilename)
                    except:
                        print 'NetCDF comparison failed for ', var, ' in file: ', os.path.join(
                            self.outpath, ncfilename), "vs", os.path.join(
                                self.baselinepath, ncfilename)
                        close = False
                    NC_CLOSE = NC_CLOSE and close
            assert (NC_CLOSE), 'NetCDF files are not close'

            #cleanup the temp files
            if GR_CLOSE and NC_CLOSE:
                shutil.rmtree(self.outpath)
Example #9
0
    def testJustDeflate6(self):
        a = cdms2.MV2.zeros((1000, 2100), 'd')
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(1)
        cdms2.setNetcdfDeflateLevelFlag(6)

        f = self.getTempFile("justdeflate6.nc", 'w')
        f.write(a)
Example #10
0
    def testZeroAllSettings(self):
        a = cdms2.MV2.zeros((1000, 2100), 'd')
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)

        f = self.getTempFile("nothing.nc", 'w')
        f.write(a)
Example #11
0
    def execute(self, test_str, imagefilename, imagethreshold, ncfiles, rtol, atol):
        print test_str
        if imagethreshold is None:  # user didn't specify a value
            imagethreshold = regression.defaultThreshold
            # Silence annoying messages about how to set the NetCDF file type.  Anything will do.
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)

        # nonstandard, suitable for testing:
        proc = subprocess.Popen([self.diagstr], shell=True)
        proc_status = proc.wait()
        if proc_status != 0:
            raise DiagError("diags run failed")

        if self.keep:
            print "save ", imagefilename, ncfiles.keys()
            print "output directory is = ", self.outpath
        else:
            # Test of graphics (png) file match:
            # This just looks at combined plot, aka summary plot, which is a compound of three plots.

            imagefname = os.path.join(self.outpath, imagefilename)
            imagebaselinefname = os.path.join(self.baselinepath, imagefilename)
            # pdb.set_trace()
            print "OK THRESHOLD IS:", imagethreshold
            graphics_result = regression.check_result_image(imagefname, imagebaselinefname, imagethreshold)
            print "Graphics file", imagefname, "match difference:", graphics_result

            # initialize to successful graphics check
            GR_CLOSE = graphics_result == 0
            assert GR_CLOSE, "graphic images are not close"

            # Test of NetCDF data (nc) file match:
            NC_CLOSE = True
            for ncfilename, ncvars in ncfiles.items():
                for var in ncvars:
                    # print ncfilename, var
                    try:
                        # print ">>>>>>>>>>>>>", var, ncfilename
                        close = self.closeness(var, ncfilename, rtol, atol)
                        if not close:
                            print var, " in ", os.path.join(
                                self.outpath, ncfilename
                            ), " is not close from the one in:", os.path.join(self.baselinepath, ncfilename)
                    except:
                        print "NetCDF comparison failed for ", var, " in file: ", os.path.join(
                            self.outpath, ncfilename
                        ), "vs", os.path.join(self.baselinepath, ncfilename)
                        close = False
                    NC_CLOSE = NC_CLOSE and close
            assert NC_CLOSE, "NetCDF files are not close"

            # cleanup the temp files
            if GR_CLOSE and NC_CLOSE:
                shutil.rmtree(self.outpath)
Example #12
0
 def testShuffleDeflateFlags(self):
     cdms2.setNetcdfShuffleFlag(1)
     cdms2.setNetcdfDeflateFlag(1)
     cdms2.setNetcdfDeflateLevelFlag(4)
     self.assertEqual(cdms2.getNetcdfShuffleFlag(), 1)
     self.assertEqual(cdms2.getNetcdfDeflateFlag(), 1)
     self.assertEqual(cdms2.getNetcdfDeflateLevelFlag(), 4)
     cdms2.setNetcdfShuffleFlag(0)
     cdms2.setNetcdfDeflateFlag(0)
     self.assertEqual(cdms2.getNetcdfShuffleFlag(), 0)
     self.assertEqual(cdms2.getNetcdfDeflateFlag(), 0)
Example #13
0
def compute_and_write_climatologies_keepvars( varkeys, reduced_variables, season, case='', variant='', path='' ):
    """Computes climatologies and writes them to a file.
    Inputs: varkeys, names of variables whose climatologies are to be computed
            reduced_variables, dict (key:rv) where key is a variable name and rv an instance
               of the class reduced_variable
            season: the season on which the climatologies will be computed
            variant: a string to be inserted in the filename"""
    # Compute the value of every variable we need.
    varvals = {}
    # First compute all the reduced variables
    # Probably this loop consumes most of the running time.  It's what has to read in all the data.
    for key in varkeys:
        if key in reduced_variables:
            varvals[key] = reduced_variables[key].reduce()

    for key in varkeys:
        if key in reduced_variables:
            var = reduced_variables[key]
            if varvals[key] is not None:
                if 'case' in var._file_attributes.keys():
                    case = var._file_attributes['case']+'_'
                    break

    logger.info("writing climatology file for %s %s %s ",case,variant,season)
    if variant!='':
        variant = variant+'_'
    logger.info('case: %s',case)
    logger.info('variant: %s', variant)
    logger.info('season: %s', season)
    filename = case + variant + season + "_climo.nc"
    # ...actually we want to write this to a full directory structure like
    #    root/institute/model/realm/run_name/season/
    value=0
    cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
    cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
    cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

    g = cdms2.open( os.path.join(path,filename), 'w' )    # later, choose a better name and a path!
    store_provenance(g)
    for key in varkeys:
        if key in reduced_variables:
            var = reduced_variables[key]
            if varvals[key] is not None:
                varvals[key].id = var.variableid
                varvals[key].reduced_variable=varvals[key].id
                if hasattr(var,'units'):
                    varvals[key].units = var.units+'*'+var.units
                g.write(varvals[key])
                for attr,val in var._file_attributes.items():
                    if not hasattr( g, attr ):
                        setattr( g, attr, val )
    g.season = season
    g.close()
    return varvals,case
Example #14
0
def quickSave(data, name, path):
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(0) #1
    cdms2.setNetcdfDeflateFlag(0) #1
    cdms2.setNetcdfDeflateLevelFlag(0) #3

    outname=os.path.join(path, name)
    if os.path.exists(outname): os.remove(outname)
    fh = cdms2.open(outname, 'w')
    variable = cdms2.createVariable(data, id='data')
    fh.write(variable)
    fh.close()
Example #15
0
    def setUp(self):
        """
        Move to a temporary directory before executing the test module.
        """
        self._tmpdir = tempfile.mkdtemp('.tmp', 'test_cdms')
        os.chdir(self._tmpdir)

        # Enter NetCDF4 mode for these tests
        #!TODO: magically deactivate test if compiled with NetCDF3
        cdms2.setNetcdfShuffleFlag(1)
        cdms2.setNetcdfDeflateFlag(1)
        cdms2.setNetcdfDeflateLevelFlag(0)
def saveData(outfilename, data, typecode, id, fill_value, grid, copyaxes, attribute1, attribute2, latAxis, lonAxis):
    
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(3)

    if os.path.exists(outfilename): os.remove(outfilename)
    outfile = cdms2.open( outfilename, 'w')
    var = cdms2.createVariable(data, typecode=typecode, id=id, fill_value=fill_value, grid=grid, copyaxes=copyaxes, attributes=dict(long_name=attribute1, units=attribute2) )
    var.setAxisList((latAxis, lonAxis))
    outfile.write(var)
    outfile.close()
Example #17
0
    def initCustomize(self, customPath, styles):
        if customPath is None:
            customPath = os.path.join(os.environ["HOME"], ".uvcdat",
                                      "customizeUVCDAT.py")

        if os.path.exists(customPath):
            execfile(customPath, customizeUVCDAT.__dict__,
                     customizeUVCDAT.__dict__)

        if styles is None:
            styles = customizeUVCDAT.appStyles

        icon = QtGui.QIcon(customizeUVCDAT.appIcon)
        self.setWindowIcon(icon)

        ## cdms2 setup section
        cdms2.axis.time_aliases += customizeUVCDAT.timeAliases
        cdms2.axis.level_aliases += customizeUVCDAT.levelAliases
        cdms2.axis.latitude_aliases += customizeUVCDAT.latitudeAliases
        cdms2.axis.longitude_aliases += customizeUVCDAT.longitudeAliases
        cdms2.setNetcdfShuffleFlag(customizeUVCDAT.ncShuffle)
        cdms2.setNetcdfDeflateFlag(customizeUVCDAT.ncDeflate)
        cdms2.setNetcdfDeflateLevelFlag(customizeUVCDAT.ncDeflateLevel)

        ## StylesSheet
        st = ""
        if isinstance(styles, str):
            st = styles
        elif isinstance(styles, dict):
            for k in styles.keys():
                val = styles[k]
                if isinstance(val, QtGui.QColor):
                    val = str(val.name())
                st += "%s:%s; " % (k, val)
        if len(st) > 0: self.setStyleSheet(st)

        ###########################################################
        ###########################################################
        ## Prettyness
        ###########################################################
        ###########################################################
        #self.setGeometry(0,0, 1100,800)
        self.setWindowTitle(
            'The Ultrascale Visualization Climate Data Analysis Tools - (UV-CDAT)'
        )
        ## self.resize(1100,800)
        #self.setMinimumWidth(1100)
        self.main_window_placement()
Example #18
0
    def initCustomize(self, customPath, styles):
        if customPath is None:
            customPath = os.path.join(os.environ["HOME"], ".uvcdat", "customizeUVCDAT.py")

        if os.path.exists(customPath):
            execfile(customPath, customizeUVCDAT.__dict__, customizeUVCDAT.__dict__)

        if styles is None:
            styles = customizeUVCDAT.appStyles

        icon = QtGui.QIcon(customizeUVCDAT.appIcon)
        self.setWindowIcon(icon)

        ## cdms2 setup section
        cdms2.axis.time_aliases += customizeUVCDAT.timeAliases
        cdms2.axis.level_aliases += customizeUVCDAT.levelAliases
        cdms2.axis.latitude_aliases += customizeUVCDAT.latitudeAliases
        cdms2.axis.longitude_aliases += customizeUVCDAT.longitudeAliases
        cdms2.setNetcdfShuffleFlag(customizeUVCDAT.ncShuffle)
        cdms2.setNetcdfDeflateFlag(customizeUVCDAT.ncDeflate)
        cdms2.setNetcdfDeflateLevelFlag(customizeUVCDAT.ncDeflateLevel)

        ## StylesSheet
        st = ""
        if isinstance(styles, str):
            st = styles
        elif isinstance(styles, dict):
            for k in styles.keys():
                val = styles[k]
                if isinstance(val, QtGui.QColor):
                    val = str(val.name())
                st += "%s:%s; " % (k, val)
        if len(st) > 0:
            self.setStyleSheet(st)

        ###########################################################
        ###########################################################
        ## Prettyness
        ###########################################################
        ###########################################################
        # self.setGeometry(0,0, 1100,800)
        self.setWindowTitle("The Ultrascale Visualization Climate Data Analysis Tools - (UV-CDAT)")
        ## self.resize(1100,800)
        # self.setMinimumWidth(1100)
        self.main_window_placement()
Example #19
0
    def execute(self, test_str, imagefilename, imagethreshold, ncfiles, rtol,
                atol):
        print test_str
        # Silence annoying messages about how to set the NetCDF file type.  Anything will do.
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)

        # nonstandard, suitable for testing:
        proc = subprocess.Popen([self.diagstr], shell=True)
        proc_status = proc.wait()
        if proc_status != 0:
            raise DiagError("diags run failed")

        if self.keep:
            print "save ", imagefilename, ncfiles.keys()
            print "output directory is = ", self.outpath
        else:
            # Test of graphics (png) file match:
            # This just looks at combined plot, aka summary plot, which is a compound of three plots.

            imagefname = os.path.join(self.outpath, imagefilename)
            imagebaselinefname = os.path.join(self.baselinepath, imagefilename)
            graphics_result = checkimage.check_result_image(
                imagefname, imagebaselinefname, imagethreshold)
            print "Graphics file", imagefname, "match difference:", graphics_result

            # Test of NetCDF data (nc) file match:
            CLOSE = True
            for ncfilename, ncvars in ncfiles.items():
                for var in ncvars:
                    #print ncfilename, var
                    try:
                        close = self.closeness(var, ncfilename, rtol, atol)
                        if not close:
                            print var, ' in ', ncfilename, ' is not close.'
                    except:
                        print 'comparison failed for ', var, ' in file: ', ncfilename
                        close = False
                    CLOSE = CLOSE and close

            #cleanup the temp files
            shutil.rmtree(self.outpath)
            assert (CLOSE), 'data are not close'
Example #20
0
def save_ncfiles(set_num, test, ref, diff, parameter):
    """
    Saves the test, reference, and difference
    data being plotted as nc files.
    """
    if parameter.save_netcdf:
        # Save files being plotted
        # Set cdms preferences - no compression, no shuffling, no complaining
        cdms2.setNetcdfDeflateFlag(1)
        # 1-9, min to max - Comes at heavy IO (read/write time cost)
        cdms2.setNetcdfDeflateLevelFlag(0)
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setCompressionWarnings(0)  # Turn off warning messages

        pth = get_output_dir(set_num, parameter)

        # Save test file
        if test.id.startswith("variable_"):
            test.id = parameter.var_id
        test_pth = os.path.join(pth, parameter.output_file + "_test.nc")

        cdms_arg = "w"

        if Path(test_pth).is_file():
            cdms_arg = "a"

        with cdms2.open(test_pth, cdms_arg) as file_test:
            file_test.write(test)

        # Save reference file
        if ref.id.startswith("variable_"):
            ref.id = parameter.var_id
        ref_pth = os.path.join(pth, parameter.output_file + "_ref.nc")
        with cdms2.open(ref_pth, cdms_arg) as file_ref:
            file_ref.write(ref)

        # Save difference file
        if diff is not None:
            if diff.id.startswith("variable_"):
                diff.id = parameter.var_id + "_diff"
            diff_pth = os.path.join(pth, parameter.output_file + "_diff.nc")
            with cdms2.open(diff_pth, cdms_arg) as file_diff:
                file_diff.write(diff)
Example #21
0
 def nc(self):
     if self.netCDF3.isChecked():
         cdms2.useNetcdf3()
         self.ncShuffle.setEnabled(False)
         self.ncDeflate.setEnabled(False)
         self.ncDeflateLevel.setEnabled(False)
     else:
         self.ncShuffle.setEnabled(True)
         self.ncDeflate.setEnabled(True)
         self.ncDeflateLevel.setEnabled(True)
         if self.ncShuffle.isChecked():
             cdms2.setNetcdfShuffleFlag(1)
         else:
             cdms2.setNetcdfShuffleFlag(0)
         if self.ncDeflate.isChecked():
             cdms2.setNetcdfDeflateFlag(1)
         else:
             cdms2.setNetcdfDeflateFlag(0)
         cdms2.setNetcdfDeflateLevelFlag(self.ncDeflateLevel.value())
Example #22
0
 def nc(self):
     if self.netCDF3.isChecked():
         cdms2.useNetcdf3()
         self.ncShuffle.setEnabled(False)
         self.ncDeflate.setEnabled(False)
         self.ncDeflateLevel.setEnabled(False)
     else:
         self.ncShuffle.setEnabled(True)
         self.ncDeflate.setEnabled(True)
         self.ncDeflateLevel.setEnabled(True)
         if self.ncShuffle.isChecked():
             cdms2.setNetcdfShuffleFlag(1)
         else:
             cdms2.setNetcdfShuffleFlag(0)
         if self.ncDeflate.isChecked():
             cdms2.setNetcdfDeflateFlag(1)
         else:
             cdms2.setNetcdfDeflateFlag(0)
         cdms2.setNetcdfDeflateLevelFlag(self.ncDeflateLevel.value())
Example #23
0
 def execute(self, test_str, imagefilename, imagethreshold, ncfiles, rtol, atol):
     print test_str
     # Silence annoying messages about how to set the NetCDF file type.  Anything will do.
     cdms2.setNetcdfShuffleFlag(0)
     cdms2.setNetcdfDeflateFlag(0)
     cdms2.setNetcdfDeflateLevelFlag(0)
     
     # nonstandard, suitable for testing:
     proc = subprocess.Popen([self.diagstr], shell=True)
     proc_status = proc.wait()
     if proc_status!=0: 
         raise DiagError("diags run failed")
 
     if self.keep:
         print "save ", imagefilename, ncfiles.keys()
         print "output directory is = ", self.outpath
     else:    
         # Test of graphics (png) file match:
         # This just looks at combined plot, aka summary plot, which is a compound of three plots.
         
         imagefname = os.path.join( self.outpath, imagefilename )
         imagebaselinefname = os.path.join( self.baselinepath, imagefilename )
         graphics_result = checkimage.check_result_image( imagefname, imagebaselinefname, imagethreshold )
         print "Graphics file", imagefname, "match difference:", graphics_result
         
         # Test of NetCDF data (nc) file match:
         CLOSE = True
         for ncfilename, ncvars in ncfiles.items():
             for var in ncvars:
                 #print ncfilename, var
                 try:
                     close = self.closeness( var, ncfilename, rtol, atol )
                     if not close:
                         print var, ' in ', ncfilename, ' is not close.'
                 except:
                     print 'comparison failed for ', var, ' in file: ', ncfilename
                     close = False
                 CLOSE = CLOSE and close
                 
         #cleanup the temp files
         shutil.rmtree(self.outpath)
         assert(CLOSE), 'data are not close'
def do_regrid(infileName, variable, outfileName, netcdfType=4):

    nodata = 1.e20

    if netcdfType==4:
        cdms2.setNetcdfShuffleFlag(1)
        cdms2.setNetcdfDeflateFlag(1)
        cdms2.setNetcdfDeflateLevelFlag(3)
    elif netcdfType==3:
        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevel(0)
    else:
        exitWM('Unknown netcdf type {0}. Exit 2.'.format(netcdfType),2)

    infile = cdms2.open(infileName)
    unitsVar = infile[variable].units
    (referenceGrid, latAxis, lonAxis, latBounds, lonBounds, lvl_bounds, lvl) = makeGrid()
    regridded = infile[variable][:].regrid(referenceGrid)

    outvar = cdms2.createVariable(regridded, typecode='f',
                                  id=variable, fill_value=nodata,
                                  grid=referenceGrid, copyaxes=1,
                                  attributes=dict(long_name='regridded {0}'.format(variable), units=unitsVar))
    #final = do_hyperInterp(regridded, infile[variable].getLevel()[:], lvl, nodata)
    #outvar = cdms2.createVariable(final, typecode='f', id=variable, fill_value=nodata, attributes=dict(long_name='regridded {0}'.format(variable), units=unitsVar) )


    #gridBis = regridded.subSlice(longitude=0).crossSectionRegrid(lvl, latAxis, method="linear")

    #zregrid = tmpvar.crossSectionRegrid(lvl)

    #outvar.setAxisList((latAxis, lonAxis))
    if os.path.exists(outfileName): os.remove(outfileName)
    outfile=cdms2.open(outfileName, 'w')
    outfile.write(outvar)
    outfile.history='Created with '+__file__.encode('utf8')
    outfile.close()
    infile.close()
Example #25
0
def doRegrid(infile, varname, outfile, lon, lat, lon_bnds, lat_bnds):

    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(2)

    fh = cdms2.open(infile)
    if fh is None:
        exitMessage("Could not open file {0}. Exit 2.".format(infile), 2)

    if varname not in fh.variables.keys():
        exitMessage('variable named '+varname+' could not be found. Exit 4.', 4)

    yVar = fh(varname)

    latAxis = cdms2.createAxis(lat, lat_bnds)
    latAxis.designateLatitude(True)
    latAxis.units = 'degree_north'
    latAxis.long_name = 'Latitude'

    lonAxis = cdms2.createAxis(lon, lon_bnds)
    lonAxis.designateLongitude(True, 360.0)
    lonAxis.units = 'degree_east'
    lonAxis.long_name='Longitude'

    listAxisOrg = yVar.getAxisList()
    timeAxis = listAxisOrg[0]
    

    grid = cdms2.createGenericGrid(latAxis, lonAxis, lat_bnds, lon_bnds)
    regridded = yVar.regrid(grid)

    g=cdms2.open(outfile, 'w')
    
    #g.write(regridded, None, None, None, varname, None, 1.e20, None, cdms2.CdFloat)
    temp1 = cdms2.createVariable(regridded, typecode='f', id=varname, fill_value=1.e20, axes=[timeAxis, latAxis, lonAxis], copyaxes=0, attributes=dict(long_name=yVar.long_name, units=yVar.units) )
    g.write(temp1)
    g.close()
Example #26
0
 def setup_cdms2(self):
     cdms2.setNetcdfShuffleFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateLevelFlag(0)  # Argument is int between 0 and 9
import cdms2

######################################
value = 0
cdms2.setNetcdfShuffleFlag(value)
cdms2.setNetcdfDeflateFlag(value)
cdms2.setNetcdfDeflateLevelFlag(value)
######################################

### EXECUTE MODULE WITH VARIOUS FUNCTIONS
execfile('modules_and_functions/misc_module.py')
execfile('modules_and_functions/getOurModelData.py')

### OPTIONS FOR REGRIDDING: METHOD AND TARGET GRID
exp = 'cmip5'
rgridMeth = 'regrid2'
targetGrid = '4x5'
targetGrid = '2.5x2.5'

### OUTPUT DIRECTORY
outdir = '/work/metricspackage/130522/data/inhouse_model_clims/samplerun/atm/mo/ac/'
## SEE END OF THIS CODE FOR OUTPUT FILENAMES

### VARIABLES TO LOOP OVER (NAMES ASSUMED TO BE CONSISTENT WITH CMIP5)
vars = ['rlut', 'pr']

######################################

############# GET OBS TARGET GRID
obsg = get_target_grid(targetGrid)
#############
def monthlyAvg(variable, indir, outdir, minYear=2006, maxYear=2059):
    nodata=1.e20
    minVar=273.15 - 40
    maxVar=273.15 + 100
    unitsAvg=None
    # assume data are aligned
    pattern=re.compile('.*_BNU-ESM_.*') # problem on this grid (use shiftGrid to create a new version, discard this one).
    # maskpattern = re.compile('.*EC-EARTH.*') # nodate was set to 273.15
    
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(3)

    print minYear, maxYear

    dateList=[]
    for iyear in range(minYear, maxYear+1):
        for imonth in range(1,13):
            dateList.append('{0}{1:02}'.format(iyear,imonth))

    for idate in dateList:
        print 'processing date ',idate
        # get list of files for this iyear, excluding one file:
        lstFiles = [f for f in glob.glob(indir+'/{0}_*{2}*_{1}.nc'.format(variable, idate,select)) if not pattern.match(f) ]
        print indir+'/{0}_*{2}*_{1}.nc'.format(variable, idate,select)

        if len(lstFiles) > 1:
            print 'Model ensemble mean for date {0} with {1} files'.format(idate, len(lstFiles))
            # accumulate data
            accumVar=None
            accumN=None

            for iFile in lstFiles:
                print 'processing file ', iFile
                thisFile = cdms2.open(iFile)
                dimVar = numpy.squeeze(thisFile[variable][:]).shape # remove time-single dimension if exists
                thisVar = numpy.ravel(thisFile[variable][:])
 
                if accumVar is None:
                    accumVar  = numpy.zeros( dimVar[0]*dimVar[1] ) + nodata
                    accumN    = numpy.zeros( dimVar[0]*dimVar[1] )
                    unitsAvg  = thisFile[variable].units
                    oneMatrix = numpy.ones(dimVar[0]*dimVar[1])
                    maxEnsemble = thisVar.copy()
                    minEnsemble = thisVar.copy()

                # add to accumVar if accumVar is not no-data, and incoming data are in the range
                wtadd = (thisVar >= minVar ) * (thisVar <= maxVar) * (accumVar < nodata)
                # if the value in accumVar is no-data, replace it.
                wtreplace = (thisVar >= minVar ) * (thisVar <= maxVar) * ( accumVar >= nodata)
                # min, max
                wmax = (thisVar >= maxEnsemble) * (thisVar < nodata) * (thisVar >= minVar) * (thisVar <= maxVar)
                wmaxReplace = (maxEnsemble >= nodata) * (thisVar < nodata) * (thisVar >= minVar)
                wmin = (thisVar <= minEnsemble) * (thisVar >= minVar) * (thisVar <= maxVar) * (maxEnsemble < nodata)
                wminReplace = (minEnsemble >= nodata) * (thisVar < nodata) * (thisVar >= minVar)
                if wtadd.any():
                    accumVar[wtadd] = accumVar[wtadd] + thisVar[wtadd]
                    accumN[wtadd] = accumN[wtadd] + oneMatrix[wtadd]
                if wtreplace.any():
                    accumVar[wtreplace] = thisVar[wtreplace]
                    accumN[wtreplace] = oneMatrix[wtreplace]
                if wmax.any():
                    maxEnsemble[wmax] = thisVar[wmax]
                if wmin.any():
                    minEnsemble[wmin] = thisVar[wmin]
                if wmaxReplace.any():
                    maxEnsemble[wmaxReplace] = thisVar[wmaxReplace]
                if wminReplace.any():
                    minEnsemble[wminReplace] = thisVar[wminReplace]

                thisFile.close()

            # now compute the average, where accumN is not 0
            wnz = accumN > 0
            average = numpy.zeros(dimVar[0] * dimVar[1]) + nodata
            if wnz.any():
                average[wnz] = accumVar[wnz] / accumN[wnz]

            # and let's compute the std
            std = numpy.zeros(dimVar[0] * dimVar[1]) + nodata
            stdN = numpy.zeros(dimVar[0] * dimVar[1])
            for iFile in lstFiles:
                thisFile = cdms2.open(iFile)
                thisVar = numpy.ravel(thisFile[variable][:])
                wtadd = (thisVar < nodata ) * (average < nodata ) * (thisVar >= minVar) * (thisVar <= maxVar) # average should be clean, no need to implement a 'replace'
                if wtadd.any():
                    std[wtadd] = (average[wtadd] - thisVar[wtadd]) * (average[wtadd] - thisVar[wtadd])
                    stdN[wtadd] = stdN[wtadd] + 1.0
                thisFile.close()

            wtstd = (stdN > 0) * (std < nodata)
            std[wtstd] = numpy.sqrt( std[wtstd]/stdN[wtstd] )

            # save to disk
            outfilename='{0}/modelmean_{1}_{2}.nc'.format(outdir, variable, idate)
            (referenceGrid, latAxis, lonAxis, latBounds, lonBounds) = makeGrid()
            avgOut = cdms2.createVariable(numpy.reshape(average,dimVar), typecode='f', id=variable, fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='model average for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            avgOut.setAxisList((latAxis, lonAxis))

            accumOut = cdms2.createVariable(numpy.reshape(accumN,dimVar), typecode='i', id='count_{0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='count of valid for {0} at date {1}'.format(variable, idate), units=None))
            accumOut.setAxisList((latAxis, lonAxis))

            maxEns = cdms2.createVariable(numpy.reshape(maxEnsemble,dimVar), typecode='f', id='max {0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='max ensemble for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            maxEns.setAxisList((latAxis, lonAxis))

            minEns = cdms2.createVariable(numpy.reshape(minEnsemble,dimVar), typecode='f', id='min {0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='min ensemble for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            minEns.setAxisList((latAxis, lonAxis))

            stdVar = cdms2.createVariable(numpy.reshape(std,dimVar), typecode='f', id='std_{0}'.format(variable), fill_value=1.e20, grid=referenceGrid, copyaxes=1, attributes=dict(long_name='model std for {0} at date {1}'.format(variable, idate), units=unitsAvg))
            stdVar.setAxisList((latAxis, lonAxis))

            if os.path.exists(outfilename): os.remove(outfilename)
            print 'saving to file ', outfilename
            outfile = cdms2.open(outfilename, 'w')
            outfile.write(avgOut)
            outfile.write(accumOut)
            outfile.write(minEns)
            outfile.write(maxEns)
            outfile.write(stdVar)
            outfile.history='Created with '+__file__.encode('utf8')
            outfile.close()
Example #29
0
def surfTransf(fileFx, fileTos, fileSos, fileHef, fileWfo, varNames, outFile, debug=True, timeint='all',noInterp=False, domain='global'):
    '''
    The surfTransf() function takes files and variable arguments and creates
    density bined surface transformation fields which are written to a specified outfile
    Author:    Eric Guilyardi : [email protected]
    Co-author: Paul J. Durack : [email protected] : @durack1.
    
    Created on Wed Oct  8 09:15:59 CEST 2014

    Inputs:
    ------
    - fileTos(time,lat,lon)     - 3D SST array
    - fileSos(time,lat,lon)     - 3D SSS array
    - fileHef(time,lat,lon)     - 3D net surface heat flux array
    - fileWfo(time,lat,lon)     - 3D fresh water flux array
    - fileFx(lat,lon)           - 2D array containing the cell area values
    - varNames[4]               - 1D array containing the names of the variables
    - outFile(str)              - output file with full path specified.
    - debug <optional>          - boolean value
    - timeint <optional>        - specify temporal step for binning <init_idx>,<ncount>
    - noInterp <optional>       - if true no interpolation to target grid
    - domain <optional>         - specify domain for averaging when interpolated to WOA grid ('global','north',
                                  'north40', 'south' for now)

    Outputs:
    --------
    - netCDF file with monthly surface rhon, density fluxes, transformation (global and per basin)
    - use cdo yearmean to compute annual mean

    Usage:
    ------
    '>>> from binDensity import surfTransf
    '>>> surfTransf(file_fx, file_tos, file_sos, file_hef, file_wfo, [var1,var2,var3,var4]./output.nc, debug=True,timeint='all')

    Notes:
    -----
    - EG   8 Oct 2014   - Initial function write and tests ok
    - PJD 22 Nov 2014   - Code cleanup
    - EG   4 Oct 2017   - code on ciclad, more cleanup and options
    - EG  12 Sep 2018   - Add North vs. South calculation

    '''
    # Keep track of time (CPU and elapsed)
    cpu0 = timc.clock()
    #
    # netCDF compression (use 0 for netCDF3)
    comp = 1
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # 
    # == Inits
    #
    npy.set_printoptions(precision = 2)
    # Determine file name from inputs
    modeln = fileTos.split('/')[-1].split('.')[1]
    #
    if debug:
        print ' Debug - File names:'
        print '    ', fileTos
        print '    ', fileSos
        debugp = True
    else:
        debugp = False
    #
    # Open files
    ftos  = cdm.open(fileTos)
    fsos  = cdm.open(fileSos)
    fhef  = cdm.open(fileHef)
    fwfo  = cdm.open(fileWfo)
    #timeax = ftos.getAxis('time')
    timeax = ftos.getAxis('time_counter')
    #print 'timeax'
    #print timeax
    #
    # Dates to read
    if timeint == 'all':
        tmin = 0
        tmax = timeax.shape[0]
        timeaxis = timeax
    else:
        tmin = int(timeint.split(',')[0]) - 1
        tmax = tmin + int(timeint.split(',')[1])
        # update time axis
        timeaxis   = cdm.createAxis(timeax[tmin:tmax])
        timeaxis.id       = 'time'
        timeaxis.units    = timeax.units
        timeaxis.designateTime()
        #print timeaxis

    if debugp:
        print; print ' Debug mode'
 
    # Read file attributes to carry on to output files
    list_file   = ftos.attributes.keys()
    file_dic    = {}
    for i in range(0,len(list_file)):
        file_dic[i]=list_file[i],ftos.attributes[list_file[i] ]
    #
    # Read data
        
    # varnames
    tos_name = varNames[0]
    sos_name = varNames[1]
    hef_name = varNames[2]
    wfo_name = varNames[3]

    if debugp:
        print' Read ',tos_name, sos_name,tmin,tmax
    tos = ftos(tos_name , time = slice(tmin,tmax))
    sos = fsos(sos_name , time = slice(tmin,tmax))
    if debugp:
        print' Read ',hef_name, wfo_name
    qnet = fhef(hef_name, time = slice(tmin,tmax))
    try:
        emp  = fwfo(wfo_name , time = slice(tmin,tmax))
        empsw = 0
    except Exception,err:
        emp  = fwfo('wfos' , time = slice(tmin,tmax))
        print ' Reading concentration dillution fresh water flux'
        empsw = 0
Example #30
0
import os, sys
#import socket
import argparse
import string
import numpy as npy
import numpy.ma as ma
import cdutil as cdu
from genutil import statistics
import support_density as sd
import time as timc
import timeit
#import matplotlib.pyplot as plt
#
# netCDF compression (use 0 for netCDF3)
comp = 0
cdm.setNetcdfShuffleFlag(comp)
cdm.setNetcdfDeflateFlag(comp)
cdm.setNetcdfDeflateLevelFlag(comp)
cdm.setAutoBounds('on')
#
# == Arguments
#
# 
# == Inits
#
home   = os.getcwd()
outdir = os.getcwd()
hist_file_dir=home
#
# == Arguments
#
from pywps.Process import WPSProcess
import os,numpy,sys
import logging, json
import cdms2
import random
from pywps import config
import ConfigParser
# Path where output will be stored/cached

cdms2.setNetcdfShuffleFlag(0) ## where value is either 0 or 1
cdms2.setNetcdfDeflateFlag(0) ## where value is either 0 or 1
cdms2.setNetcdfDeflateLevelFlag(0) ## where value is a integer between 0 and 9 included

wps_config = ConfigParser.ConfigParser()
wps_config.read(os.path.join(os.path.dirname(__file__),"..","wps.cfg"))
try:
    DAP_DATA = wps_config.get("dapserver","dap_data")
except:
    warnings.warn("Could not READ DAP_DATA from wps.cfg will store files in /tmp")
    DAP_DATA = "/tmp"
try:
    DAP_INI = wps_config.get("dapserver","dap_ini")
except:
    DAP_INI = None
try:
    DAP_HOST = wps_config.get("dapserver","dap_host")
except:
    DAP_HOST = None
try:
    DAP_PORT = wps_config.get("dapserver","dap_port")
except:
Example #32
0
#!/usr/local/cdat5.2/bin/python
"""Module for computing temperature extreme stats mostly using CDO utilities"""

from sys import exit
from os import path, system, mkdir
from cdms2 import setNetcdfShuffleFlag, setNetcdfDeflateLevelFlag, setNetcdfDeflateFlag
from string import split
from datetime import datetime
from daily_stats_cdms_utils import MosaicFiles

setNetcdfShuffleFlag(0)
setNetcdfDeflateFlag(0)
setNetcdfDeflateLevelFlag(0)

# necesario para temperaturas promedio calculadas
RootDir = '/mnt/BCSD'

OUTROOT = '/mnt/out_stats'
if not path.isdir(OUTROOT):
    mkdir(OUTROOT)

OUTTEMP = '/home/edarague'
if not path.isdir(OUTTEMP):
    mkdir(OUTTEMP)

# added as fgobal institution attribute to output files
txtinst = "Santa Clara U.,Climate Central,The Nature Conservancy"

# input files are on 0->360 longitude convention. To switch to a -180->180 grid:
# cdo sellonlatbox,-180,180,-90,90 ifile ofile
# which works for global domains only.For smaller domains:
Example #33
0
    def processCmdLine(self):
        parser = argparse.ArgumentParser(
            description='UV-CDAT Climate Modeling Diagnostics',
            usage='%(prog)s --path1 [options]')

        parser.add_argument(
            '--path',
            '-p',
            action='append',
            nargs=1,
            help=
            "Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2."
        )
        parser.add_argument('--path2',
                            '-q',
                            action='append',
                            nargs=1,
                            help="Path to a second dataset.")
        parser.add_argument('--obspath',
                            action='append',
                            nargs=1,
                            help="Path to an observational dataset")
        parser.add_argument(
            '--cachepath',
            nargs=1,
            help="Path for temporary and cachced files. Defaults to /tmp")
        #      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
        #         help="The realm type. Current valid options are 'land' and 'atmosphere'")
        parser.add_argument(
            '--filter',
            '-f',
            nargs=1,
            help=
            "A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices."
        )
        parser.add_argument(
            '--filter2',
            '-g',
            nargs=1,
            help=
            "A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices."
        )
        parser.add_argument(
            '--new_filter',
            '-F',
            action='append',
            nargs=1,
            help=
            "A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices."
        )
        parser.add_argument(
            '--packages',
            '--package',
            '-k',
            nargs='+',
            help=
            "The diagnostic packages to run against the dataset(s). Multiple packages can be specified."
        )
        parser.add_argument(
            '--sets',
            '--set',
            '-s',
            nargs='+',
            help=
            "The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package"
        )
        parser.add_argument(
            '--vars',
            '--var',
            '-v',
            nargs='+',
            help=
            "Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL"
        )
        parser.add_argument(
            '--list',
            '-l',
            nargs=1,
            choices=[
                'sets', 'vars', 'variables', 'packages', 'seasons', 'regions',
                'translations', 'options'
            ],
            help=
            "Determine which packages, sets, regions, variables, and variable options are available"
        )
        # maybe eventually add compression level too....
        parser.add_argument(
            '--compress',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools"
        )  #no compression, add self state

        parser.add_argument(
            '--outputpre',
            nargs=1,
            help=
            "Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc"
        )
        parser.add_argument(
            '--outputpost',
            nargs=1,
            help=
            "Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc"
        )
        parser.add_argument(
            '--outputdir',
            '-O',
            nargs=1,
            help="Directory in which output files will be written.")

        parser.add_argument(
            '--seasons',
            nargs='+',
            choices=all_seasons,
            help="Specify which seasons to generate climatoogies for")
        parser.add_argument(
            '--years',
            nargs='+',
            help="Specify which years to include when generating climatologies"
        )
        parser.add_argument(
            '--months',
            nargs='+',
            choices=all_months,
            help="Specify which months to generate climatologies for")
        parser.add_argument(
            '--climatologies',
            '-c',
            nargs=1,
            choices=['no', 'yes'],
            help="Specifies whether or not climatologies should be generated")
        parser.add_argument(
            '--plots',
            '-t',
            nargs=1,
            choices=['no', 'yes'],
            help="Specifies whether or not plots should be generated")
        parser.add_argument('--plottype', nargs=1)
        parser.add_argument(
            '--precomputed',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc"
        )
        parser.add_argument(
            '--json',
            '-j',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Produce JSON output files as part of climatology/diags generation"
        )  # same
        parser.add_argument(
            '--netcdf',
            '-n',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Produce NetCDF output files as part of climatology/diags generation"
        )  # same
        parser.add_argument(
            '--xml',
            '-x',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Produce XML output files as part of climatology/diags generation")
        parser.add_argument(
            '--seasonally',
            action='store_true',
            help=
            "Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons"
        )
        parser.add_argument(
            '--monthly',
            action='store_true',
            help="Produce climatologies for all predefined months")
        parser.add_argument(
            '--yearly',
            action='store_true',
            help="Produce annual climatogolies for all years in the dataset")
        parser.add_argument(
            '--timestart',
            nargs=1,
            help=
            "Specify the starting time for the dataset, such as 'months since Jan 2000'"
        )
        parser.add_argument('--timebounds',
                            nargs=1,
                            choices=['daily', 'monthly', 'yearly'],
                            help="Specify the time boudns for the dataset")
        parser.add_argument(
            '--verbose',
            '-V',
            action='count',
            help=
            "Increase the verbosity level. Each -v option increases the verbosity more."
        )  # count
        parser.add_argument(
            '--name',
            action='append',
            nargs=1,
            help="Specify option names for the datasets for plot titles, etc"
        )  #optional name for the set
        # This will be the standard list of region names NCAR has
        parser.add_argument(
            '--regions',
            '--region',
            nargs='+',
            choices=all_regions.keys(),
            help=
            "Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'"
        )
        parser.add_argument('--starttime',
                            nargs=1,
                            help="Specify a start time in the dataset")
        parser.add_argument('--endtime',
                            nargs=1,
                            help="Specify an end time in the dataset")
        parser.add_argument(
            '--translate',
            nargs='?',
            default='y',
            help=
            "Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1"
        )
        parser.add_argument('--varopts',
                            nargs='+',
                            help="Variable auxillary options")

        args = parser.parse_args()

        if (args.list != None):
            if args.list[0] == 'translations':
                print "Default variable translations: "
                self.listTranslations()
                quit()
            if args.list[0] == 'regions':
                print "Available geographical regions: ", all_regions.keys()
                quit()

            if args.list[0] == 'seasons':
                print "Available seasons: ", all_seasons
                quit()

            if args.list[0] == 'packages':
                print "Listing available packages:"
                print self.all_packages.keys()
                quit()

            if args.list[0] == 'sets':
                if args.packages == None:
                    print "Please specify package before requesting available diags sets"
                    quit()
                for p in args.packages:
                    print 'Avaialble sets for package ', p, ':'
                    sets = self.listSets(p)
                    keys = sets.keys()
                    for k in keys:
                        print 'Set', k, ' - ', sets[k]
                quit()

            if args.list[0] == 'variables' or args.list[0] == 'vars':
                if args.path != None:
                    for i in args.path:
                        self._opts['path'].append(i[0])
                else:
                    print 'Must provide a dataset when requesting a variable listing'
                    quit()
                self.listVariables(args.packages, args.sets)
                quit()
            if args.list[0] == 'options':
                if args.path != None:
                    for i in args.path:
                        self._opts['path'].append(i[0])
                else:
                    print 'Must provide a dataset when requesting a variable listing'
                    quit()
                self.listVarOptions(args.packages, args.sets, args.vars)
                quit()

        # Generally if we've gotten this far, it means no --list was specified. If we don't have
        # at least a path, we should exit.
        if (args.path != None):
            for i in args.path:
                self._opts['path'].append(i[0])
        else:
            print 'Must specify a path or the --list option at a minimum.'
            print 'For help, type "diags --help".'
            quit()
        if (args.path2 != None):
            for i in args.path2:
                self._opts['path2'].append(i[0])

        if (args.obspath != None):
            for i in args.obspath:
                self._opts['obspath'].append(i[0])

        # TODO: Should some pre-defined filters be "nameable" here?
        if (args.filter !=
                None):  # Only supports one filter argument, see filter2.
            self._opts['filter'] = args.filter[0]
            self._opts['user_filter'] = True


#         for i in args.filter:
#            self._opts['filter'].append(i[0])
        if (args.filter2 != None):  # This is a second filter argument.
            self._opts['filter2'] = args.filter2[0]
            self._opts['user_filter'] = True
        if (args.new_filter !=
                None):  # like filter but with multiple arguments
            for i in args.new_filter:
                self._opts['new_filter'].append(i[0])

        if (args.cachepath != None):
            self._opts['cachepath'] = args.cachepath[0]

        self._opts['seasonally'] = args.seasonally
        self._opts['monthly'] = args.monthly

        if (args.starttime != None):
            self._opts['start'] = args.starttime[0]

        if (args.endtime != None):
            self._opts['end'] = args.endtime[0]

        # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
        # they are still set after you set them once in the python process.
        if (args.compress != None):
            if (args.compress[0] == 'no'):
                self._opts['compress'] = False
            else:
                self._opts['compress'] = True

        if self._opts['compress'] == True:
            print 'Enabling compression for output netCDF files'
            cdms2.setNetcdfShuffleFlag(1)
            cdms2.setNetcdfDeflateFlag(1)
            cdms2.setNetcdfDeflateLevelFlag(9)
        else:
            print 'Disabling compression for output netCDF files'
            cdms2.setNetcdfShuffleFlag(0)
            cdms2.setNetcdfDeflateFlag(0)
            cdms2.setNetcdfDeflateLevelFlag(0)

        if (args.json != None):
            if (args.json[0] == 'no'):
                self._opts['json'] = False
            else:
                self._opts['json'] = True
        if (args.xml != None):
            if (args.xml[0] == 'no'):
                self._opts['xml'] = False
            else:
                self._opts['xml'] = True

        if (args.netcdf != None):
            if (args.netcdf[0] == 'no'):
                self._opts['netcdf'] = False
            else:
                self._opts['netcdf'] = True

        if (args.plots != None):
            if (args.plots[0].lower() == 'no' or args.plots[0] == 0):
                self._opts['plots'] = False
            else:
                self._opts['plots'] = True

        if (args.climatologies != None):
            if (args.climatologies[0] == 'no'):
                self._opts['climatologies'] = False
            else:
                self._opts['climatologies'] = True

        self._opts['verbose'] = args.verbose

        if (args.name != None):
            for i in args.name:
                self._opts['dsnames'].append(i[0])

        # Help create output file names
        if (args.outputpre != None):
            self._opts['outputpre'] = args.outputpre[0]
        if (args.outputpost != None):
            self._opts['outputpost'] = args.outputpost[0]

        # Output directory
        if (args.outputdir != None):
            if not os.path.isdir(args.outputdir[0]):
                print "ERROR, output directory", args.outputdir[
                    0], "does not exist!"
                quit()
            self._opts['outputdir'] = args.outputdir[0]

        if (args.translate != 'y'):
            print args.translate
            print self._opts['translate']
            quit()
        # Timestart assumes a string like "months since 2000". I can't find documentation on
        # toRelativeTime() so I have no idea how to check for valid input
        # This is required for some of the land model sets I've seen
        if (args.timestart != None):
            self._opts['reltime'] = args.timestart

        # cdutil.setTimeBounds{bounds}(variable)
        if (args.timebounds != None):
            self._opts['bounds'] = args.timebounds

        # Check if a user specified package actually exists
        # Note: This is case sensitive.....
        if (args.packages != None):
            plist = []
            for x in args.packages:
                if x.upper() in self.all_packages.keys():
                    plist.append(x)
                elif x in self.all_packages.keys():
                    plist.append(x.lower())

            if plist == []:
                print 'Package name(s) ', args.packages, ' not valid'
                print 'Valid package names: ', self.all_packages.keys()
                quit()
            else:
                self._opts['packages'] = plist

        # TODO: Requires exact case; probably make this more user friendly and look for mixed case
        if (args.regions != None):
            rlist = []
            for x in args.regions:
                if x in all_regions.keys():
                    rlist.append(x)
            print 'REGIONS: ', rlist
            self._opts['regions'] = rlist

        # Given user-selected packages, check for user specified sets
        # Note: If multiple packages have the same set names, then they are all added to the list.
        # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
        if (self._opts['packages'] == None and args.sets != None):
            print 'No package specified'
            self._opts['sets'] = args.sets

        if (args.sets != None and self._opts['packages'] != None):
            # unfortuantely, we have to go through all of this....
            # there should be a non-init of the class method to list sets/packages/etc,
            # ie a dictionary perhaps?
            sets = []
            import metrics.fileio.filetable as ft
            import metrics.fileio.findfiles as fi
            import metrics.packages.diagnostic_groups
            package = self._opts['packages']
            if package[0].lower() == 'lmwg':
                import metrics.packages.lmwg.lmwg
            elif package[0].lower() == 'amwg':
                import metrics.packages.amwg.amwg
            dtree = fi.dirtree_datafiles(self, pathid=0)
            filetable = ft.basic_filetable(dtree, self)
            dm = metrics.packages.diagnostic_groups.diagnostics_menu()

            pclass = dm[package[0].upper()]()

            slist = pclass.list_diagnostic_sets()
            keys = slist.keys()
            keys.sort()
            for k in keys:
                fields = k.split()
                for user in args.sets:
                    if user == fields[0]:
                        sets.append(user)
            self._opts['sets'] = sets
            if sets != args.sets:
                print 'sets requested ', args.sets
                print 'sets available: ', slist
                exit(1)

        # check for some varopts first.
        if (args.varopts != None):
            self._opts['varopts'] = args.varopts
        # Add some hackery here to convert pressure level vars to var+varopts
        if args.vars != None:
            self._opts['vars'] = args.vars

            vpl = ['Z3_300', 'Z3_500', 'U_200', 'T_200', 'T_850']
            vl = list(set(args.vars) - set(vpl))
            if vl == args.vars:  # no pressure level vars made it this far.
                print 'No pressure level vars found in input vars list.'
            else:  # more complicated....
                print 'Pressure level vars found in input vars list.... Processing....'
                vopts = []
                if self._opts['varopts'] != [] and self._opts[
                        'varopts'] != None:  # hopefully the user didn't also specify varopts....
                    print 'User passed in varopts but there are pressure-level variables in the vars list.'
                    print 'This will append the pressure levels found to the varopts array'
                    # see which pressure level vars were passed. this will be the super set of pressure levels.
                if 'Z3_300' in self._opts['vars']:
                    vopts.append('300')
                    self._opts['vars'] = [
                        x.replace('Z3_300', 'Z3') for x in self._opts['vars']
                    ]
                if 'Z3_500' in self._opts['vars']:
                    vopts.append('500')
                    self._opts['vars'] = [
                        x.replace('Z3_500', 'Z3') for x in self._opts['vars']
                    ]
                if 'T_200' in self._opts['vars']:
                    vopts.append('200')
                    self._opts['vars'] = [
                        x.replace('T_200', 'T') for x in self._opts['vars']
                    ]
                if 'T_850' in self._opts['vars']:
                    vopts.append('850')
                    self._opts['vars'] = [
                        x.replace('T_850', 'T') for x in self._opts['vars']
                    ]
                if 'U_200' in self._opts['vars']:
                    vopts.append('200')
                    self._opts['vars'] = [
                        x.replace('U_200', 'U') for x in self._opts['vars']
                    ]
                vopts = list(set(vopts))
                if self._opts['varopts'] == [] or self._opts['varopts'] == None:
                    self._opts['varopts'] = vopts
                else:
                    self._opts['varopts'].extend(vopts)
                    self._opts['varopts'] = list(set(self._opts['varopts']))
                print 'Updated vars list: ', self._opts['vars']

        # If --yearly is set, then we will add 'ANN' to the list of climatologies
        if (args.yearly == True):
            self._opts['yearly'] = True
            self._opts['times'].append('ANN')

        # If --monthly is set, we add all months to the list of climatologies
        if (args.monthly == True):
            self._opts['monthly'] = True
            self._opts['times'].extend(all_months)

        # If --seasonally is set, we add all 4 seasons to the list of climatologies
        if (args.seasonally == True):
            self._opts['seasonally'] = True
            self._opts['times'].extend(all_seasons)

        # This allows specific individual months to be added to the list of climatologies
        if (args.months != None):
            if (args.monthly == True):
                print "Please specify just one of --monthly or --months"
                quit()
            else:
                mlist = [x for x in all_months if x in args.months]
                self._opts['times'] = self._opts['times'] + mlist

        # This allows specific individual years to be added to the list of climatologies.
        # Note: Checkign for valid input is impossible until we look at the dataset
        # This has to be special cased since typically someone will be saying
        # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
        if (args.years != None):
            if (args.yearly == True):
                print "Please specify just one of --yearly or --years"
                quit()
            else:
                self._opts['years'] = args.years

        if (args.seasons != None):
            if (args.seasonally == True):
                print "Please specify just one of --seasonally or --seasons"
                quit()
            else:
                slist = [x for x in all_seasons if x in args.seasons]
                self._opts['times'] = self._opts['times'] + slist
Example #34
0
    def displayCell(self, res30, row, column, sheet="Sheet 1", dropInfo=True):
        """Display result into one cell defined by row/col args"""
        import pdb
        projectController = self.parent().get_current_project_controller()
        if dropInfo:
            projectController.get_sheet_widget(sheet).deleteCell(row, column)
        projectController.enable_animation = False  # I (JfP) don't know why I need this, it didn't
        # used to be necessary.
        if res30 is None:
            return
        if not hasattr(
                res30, 'presentation'
        ) or res30.presentation is None or res30.presentation is "text":
            return
        pvars = res30.vars
        labels = res30.labels
        title = res30.title
        presentation = res30.presentation
        Gtype = res30.type
        if Gtype == "Taylor":
            Gtype = "Taylordiagram"

        pm = projectController.plot_manager
        VCS_LIST = pm._plot_list["VCS"]
        gm = res30.presentation
        from packages.uvcdat_cdms.init import get_canvas, get_gm_attributes, original_gm_attributes
        from gui.uvcdat.uvcdatCommons import gmInfos

        if False:  # standard diagnostics prints:
            print "pvars:", [p.id for p in pvars]
            print "labels:", labels
            print "title:", title
            print "presentation:", presentation
            print "x min,max:", getattr(presentation, 'datawc_x1',
                                        None), getattr(presentation,
                                                       'datawc_x2', None)
            print "y min,max:", getattr(presentation, 'datawc_y1',
                                        None), getattr(presentation,
                                                       'datawc_y2', None)
            print "res", res30.type

        #define where to drag and drop
        import cdms2
        from packages.uvcdat_cdms.init import CDMSVariable
        from core.utils import InstanceObject
        from metrics.frontend.uvcdat import diagnostics_template
        tm = diagnostics_template()  # template name is 'diagnostic'
        if dropInfo:
            tmplDropInfo = ('diagnostic', sheet, row, column)
            projectController.template_was_dropped(tmplDropInfo)

        if Gtype == 'Vector':
            pvars = pvars[0]
        for varindex, V in enumerate(pvars):
            if Gtype != 'Vector':
                V.title = title  # VCS looks the title of the variable, not the plot.
                V.long_name = V.title  # VCS overrides title with long_name!

            # Until I know better storing vars in tempfile....
            f = tempfile.NamedTemporaryFile()
            filename = f.name
            f.close()
            value = 0
            cdms2.setNetcdfShuffleFlag(value)  ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value)  ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(
                value)  ## where value is a integer between 0 and 9 included
            fd = cdms2.open(filename, "w")
            fd.write(V)
            fd.close()
            cdmsFile = cdms2.open(filename)
            #define name of variable to appear in var widget
            if Gtype == 'Vector':
                name_in_var_widget = V[0].id
            else:
                name_in_var_widget = V.id
            #get uri if exists
            url = None
            if hasattr(cdmsFile, 'uri'):
                url = cdmsFile.uri
            #create vistrails module
            cdmsVar = CDMSVariable(filename=cdmsFile.id,
                                   url=url,
                                   name=name_in_var_widget,
                                   varNameInFile=name_in_var_widget)  #V.id)
            #get variable widget and project controller
            definedVariableWidget = self.parent().dockVariable.widget()
            #add variable to display widget and controller
            definedVariableWidget.addVariable(V)
            projectController.add_defined_variable(cdmsVar)

            # simulate drop variable
            varDropInfo = (name_in_var_widget, sheet, row, column)
            projectController.variable_was_dropped(varDropInfo)

            # Trying to add method to plot list....
            #from gui.application import get_vistrails_application
            #_app = get_vistrails_application()
            #d = _app.uvcdatWindow.dockPlot
            # simulate drop plot

            G = VCS_LIST[Gtype]
            if not gm.name in G.keys():
                G[gm.name] = pm._registry.add_plot(gm.name, "VCS", None, None,
                                                   Gtype)
                G[gm.name].varnum = int(gmInfos[Gtype]["nSlabs"])

            #add initial attributes to global dict
            canvas = get_canvas()
            method_name = "get" + Gtype.lower()
            attributes = get_gm_attributes(Gtype)

            attrs = {}
            for attr in attributes:
                attrs[attr] = getattr(gm, attr)
            original_gm_attributes[Gtype][gm.name] = InstanceObject(**attrs)

            if Gtype in ["Scatter", "Vector"] and varindex == 0:
                #to plot a scatter plot or vector plot, requires both axes passed to plotspec.
                #so dont plot the 1st one until the 2nd variable is processed.
                pass
            else:
                # simulate drop plot
                plot = projectController.plot_manager.new_plot(
                    'VCS', Gtype, gm.name)
                #plot = projectController.plot_manager.new_plot('VCS', Gtype, "default" )
                plotDropInfo = (plot, sheet, row, column)
                projectController.plot_was_dropped(plotDropInfo)
Example #35
0
def surfTransf(fileFx,
               fileTos,
               fileSos,
               fileHef,
               fileWfo,
               varNames,
               outFile,
               debug=True,
               timeint='all',
               noInterp=False,
               domain='global'):
    '''
    The surfTransf() function takes files and variable arguments and creates
    density bined surface transformation fields which are written to a specified outfile
    Author:    Eric Guilyardi : [email protected]
    Co-author: Paul J. Durack : [email protected] : @durack1.
    
    Created on Wed Oct  8 09:15:59 CEST 2014

    Inputs:
    ------
    - fileTos(time,lat,lon)     - 3D SST array
    - fileSos(time,lat,lon)     - 3D SSS array
    - fileHef(time,lat,lon)     - 3D net surface heat flux array
    - fileWfo(time,lat,lon)     - 3D fresh water flux array
    - fileFx(lat,lon)           - 2D array containing the cell area values
    - varNames[4]               - 1D array containing the names of the variables
    - outFile(str)              - output file with full path specified.
    - debug <optional>          - boolean value
    - timeint <optional>        - specify temporal step for binning <init_idx>,<ncount>
    - noInterp <optional>       - if true no interpolation to target grid
    - domain <optional>         - specify domain for averaging when interpolated to WOA grid ('global','north',
                                  'north40', 'south' for now)

    Outputs:
    --------
    - netCDF file with monthly surface rhon, density fluxes, transformation (global and per basin)
    - use cdo yearmean to compute annual mean

    Usage:
    ------
    '>>> from binDensity import surfTransf
    '>>> surfTransf(file_fx, file_tos, file_sos, file_hef, file_wfo, [var1,var2,var3,var4]./output.nc, debug=True,timeint='all')

    Notes:
    -----
    - EG   8 Oct 2014   - Initial function write and tests ok
    - PJD 22 Nov 2014   - Code cleanup
    - EG   4 Oct 2017   - code on ciclad, more cleanup and options
    - EG  12 Sep 2018   - Add North vs. South calculation

    '''
    # Keep track of time (CPU and elapsed)
    cpu0 = timc.clock()
    #
    # netCDF compression (use 0 for netCDF3)
    comp = 1
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    #
    # == Inits
    #
    npy.set_printoptions(precision=2)
    # Determine file name from inputs
    modeln = fileTos.split('/')[-1].split('.')[1]
    #
    if debug:
        print ' Debug - File names:'
        print '    ', fileTos
        print '    ', fileSos
        debugp = True
    else:
        debugp = False
    #
    # Open files
    ftos = cdm.open(fileTos)
    fsos = cdm.open(fileSos)
    fhef = cdm.open(fileHef)
    fwfo = cdm.open(fileWfo)
    #timeax = ftos.getAxis('time')
    timeax = ftos.getAxis('time_counter')
    #print 'timeax'
    #print timeax
    #
    # Dates to read
    if timeint == 'all':
        tmin = 0
        tmax = timeax.shape[0]
        timeaxis = timeax
    else:
        tmin = int(timeint.split(',')[0]) - 1
        tmax = tmin + int(timeint.split(',')[1])
        # update time axis
        timeaxis = cdm.createAxis(timeax[tmin:tmax])
        timeaxis.id = 'time'
        timeaxis.units = timeax.units
        timeaxis.designateTime()
        #print timeaxis

    if debugp:
        print
        print ' Debug mode'

    # Read file attributes to carry on to output files
    list_file = ftos.attributes.keys()
    file_dic = {}
    for i in range(0, len(list_file)):
        file_dic[i] = list_file[i], ftos.attributes[list_file[i]]
    #
    # Read data

    # varnames
    tos_name = varNames[0]
    sos_name = varNames[1]
    hef_name = varNames[2]
    wfo_name = varNames[3]

    if debugp:
        print ' Read ', tos_name, sos_name, tmin, tmax
    tos = ftos(tos_name, time=slice(tmin, tmax))
    sos = fsos(sos_name, time=slice(tmin, tmax))
    if debugp:
        print ' Read ', hef_name, wfo_name
    qnet = fhef(hef_name, time=slice(tmin, tmax))
    try:
        emp = fwfo(wfo_name, time=slice(tmin, tmax))
        empsw = 0
    except Exception, err:
        emp = fwfo('wfos', time=slice(tmin, tmax))
        print ' Reading concentration dillution fresh water flux'
        empsw = 0
Example #36
0
def test_driver( path1, path2=None, filt2=None ):
    """ Test driver for setting up data for plots"""

    # First, find and index the data files.
    datafiles1 = dirtree_datafiles( path1 )
    logger.info("jfp datafiles1=%s",datafiles1)
    datafiles2 = dirtree_datafiles( path2, filt2 )
    logger.info("jfp datafiles2=%s",datafiles2)
    filetable1 = basic_filetable( datafiles1 )
    filetable2 = basic_filetable( datafiles2 )

    # Next we'll compute reduced variables.  They have generally been reduced by averaging in time,
    # and often more axes as well.  Reducing the data first is the fastest way to compute, important
    # if we need to be interactive.  And it is correct if whatever we plot is linear in the
    # variables, as is almost always the case.  But if we want to plot a highly nonlinear function
    # of the data variables, the averaging will have to wait until later.

    # The reduced_variables dict names and contains all the reduced variables which we have defined.
    # They will be used in defining instances of plotspec.
    reduced_variables = {
        'hyam_1': reduced_variable(
            variableid='hyam', filetable=filetable1,
            reduction_function=(lambda x,vid=None: x) ),
        'hybm_1': reduced_variable(
            variableid='hybm', filetable=filetable1,
            reduction_function=(lambda x,vid=None: x) ),
        'PS_ANN_1': reduced_variable(
            variableid='PS', filetable=filetable1,
            reduction_function=reduce2lat ),
        'T_CAM_ANN_1': reduced_variable(
            variableid='T', filetable=filetable1,
            reduction_function=reduce2levlat ),
        'T_CAM_ANN_2': reduced_variable(
            variableid='T', filetable=filetable2,
            reduction_function=reduce2levlat ),
        'TREFHT_ANN_latlon_Npole_1': reduced_variable(
            variableid='TREFHT', filetable=filetable1,
            reduction_function=(lambda x,vid=None: restrict_lat(reduce2latlon(x,vid=vid),50,90)) ),
        'TREFHT_ANN_latlon_Npole_2': reduced_variable(
            variableid='TREFHT', filetable=filetable2,
            reduction_function=(lambda x,vid=None: restrict_lat(reduce2latlon(x,vid=vid),50,90)) ),
        'TREFHT_ANN_lat_1': reduced_variable(
            variableid='TREFHT', filetable=filetable1,
            reduction_function=reduce2lat ),
        'TREFHT_DJF_lat_1': reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_DJF_lat_2': reduced_variable(
            variableid='TREFHT',
            filetable=filetable2,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_DJF_latlon_1': reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2latlon_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_DJF_latlon_2': reduced_variable(
            variableid='TREFHT',
            filetable=filetable2,
            reduction_function=(lambda x,vid=None: reduce2latlon_seasonal(x,seasonsDJF,vid=vid)) ),
        'TREFHT_JJA': reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsJJA,vid=vid)) ),
        'PRECT_JJA_lat_1': reduced_variable(
            variableid='PRECT',
            filetable=filetable1,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsJJA,vid=vid)) ),
        'PRECT_JJA_lat_2': reduced_variable(
            variableid='PRECT',
            filetable=filetable2,
            reduction_function=(lambda x,vid=None: reduce2lat_seasonal(x,seasonsJJA,vid=vid)) ),


        # CAM variables needed for heat transport:
            # FSNS, FLNS, FLUT, FSNTOA, FLNT, FSNT, SHFLX, LHFLX,
        'FSNS_1': reduced_variable(
            variableid='FSNS',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FSNS_ANN_latlon_1': reduced_variable(
            variableid='FSNS',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FLNS_1': reduced_variable(
            variableid='FLNS',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FLNS_ANN_latlon_1': reduced_variable(
            variableid='FLNS',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FLUT_ANN_latlon_1': reduced_variable(
            variableid='FLUT',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FSNTOA_ANN_latlon_1': reduced_variable(
            variableid='FSNTOA',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FLNT_1': reduced_variable(
            variableid='FLNT',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FLNT_ANN_latlon_1': reduced_variable(
            variableid='FLNT',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'FSNT_1': reduced_variable(
            variableid='FSNT',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'FSNT_ANN_latlon_1': reduced_variable(
            variableid='FSNT',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'QFLX_1': reduced_variable(
            variableid='QFLX',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'SHFLX_1': reduced_variable(
            variableid='SHFLX',filetable=filetable1,reduction_function=(lambda x,vid:x) ),
        'SHFLX_ANN_latlon_1': reduced_variable(
            variableid='SHFLX',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'LHFLX_ANN_latlon_1': reduced_variable(
            variableid='LHFLX',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'ORO_ANN_latlon_1': reduced_variable(
            variableid='ORO',
            filetable=filetable1,
            reduction_function=reduce2latlon ),
        'OCNFRAC_ANN_latlon_1': reduced_variable(
            variableid='OCNFRAC',
            filetable=filetable1,
            reduction_function=reduce2latlon ),


        'ts_lat_old': reduced_variable(
            variableid='surface_temperature', # normally a CF standard_name, even for non-CF data.
            filetable=filetable1,
            reduction_function=reduce2lat_old ),
        'ts_lat_new': reduced_variable(
            variableid='surface_temperature', # normally a CF standard_name, even for non-CF data.
            filetable=filetable1,
            reduction_function=reduce2lat 
            # The reduction function will take just one argument, a variable (MV).  But it might
            # be expressed here as a lambda wrapping a more general function.
            # Often there will be ranges in time, space, etc. specified here.  No range means
            # everything.
            ),
        'ts_scalar_tropical_o': reduced_variable(
            variableid = 'surface_temperature',
            filetable=filetable1,
            reduction_function=(lambda mv,vid=None: reduce2scalar_zonal_old(mv,-20,20,vid=vid))
            ),
        'ts_scalar_tropical_n': reduced_variable(
            variableid = 'surface_temperature',
            filetable=filetable1,
            reduction_function=(lambda mv,vid=None: reduce2scalar_zonal(mv,-20,20,vid=vid))
            )
        }

    # Derived variables have to be treated separately from reduced variables
    # because derived variables generally depend on reduced variables.
    # But N.B.: the dicts reduced_variables and derived_variables
    # must never use the same key!
    derived_variables = {
        'CAM_HEAT_TRANSPORT_ALL_1': derived_var(
            vid='CAM_HEAT_TRANSPORT_ALL_1',
            inputs=['FSNS_ANN_latlon_1', 'FLNS_ANN_latlon_1', 'FLUT_ANN_latlon_1',
                    'FSNTOA_ANN_latlon_1', 'FLNT_ANN_latlon_1', 'FSNT_ANN_latlon_1',
                    'SHFLX_ANN_latlon_1', 'LHFLX_ANN_latlon_1', 'OCNFRAC_ANN_latlon_1' ],
            outputs=['atlantic_heat_transport','pacific_heat_transport',
                     'indian_heat_transport', 'global_heat_transport' ],
            func=oceanic_heat_transport ),
        'NCEP_OBS_HEAT_TRANSPORT_ALL_2': derived_var(
            vid='NCEP_OBS_HEAT_TRANSPORT_ALL_2',
            inputs=[],
            outputs=('latitude', ['atlantic_heat_transport','pacific_heat_transport',
                     'indian_heat_transport', 'global_heat_transport' ]),
            func=(lambda: ncep_ocean_heat_transport(path2) ) ),
        'T_ANN_1': derived_var(
            vid='T_ANN_1', inputs=['T_CAM_ANN_1', 'hyam_1', 'hybm_1', 'PS_ANN_1', 'T_CAM_ANN_2'],
            outputs=('temperature'),
            func=verticalize )
        }

    plotvars = dict( reduced_variables.items() + derived_variables.items() )

    # The plotvspecs dict names and contains all plotspec objects which we have defined.
    # The plotspeckeys variable, below, names the ones for which we will generate output.
    # A dict value can be a plotspec object, or a list of such objects.  A list of
    # plotspec instances specifies a page containing multiple plots in separate panes.
    plotspecs = {
        'TREFHT_ANN_Npole_ALL':
            ['TREFHT_ANN_Npole_1', 'TREFHT_ANN_Npole_2', 'TREFHT_ANN_Npole_diff'],
        'TREFHT_ANN_Npole_1': plotspec(
            vid='TREFHT_ANN_Npole_1',
            xvars=['TREFHT_ANN_latlon_Npole_1'], xfunc = lonvar,
            yvars=['TREFHT_ANN_latlon_Npole_1'], yfunc = latvar,
            zvars=['TREFHT_ANN_latlon_Npole_1'], zfunc = (lambda z: z),
            plottype='polar contour plot' ),
        'TREFHT_ANN_Npole_2': plotspec(
            vid='TREFHT_ANN_Npole_2',
            xvars=['TREFHT_ANN_latlon_Npole_2'], xfunc = lonvar,
            yvars=['TREFHT_ANN_latlon_Npole_2'], yfunc = latvar,
            zvars=['TREFHT_ANN_latlon_Npole_2'], zfunc = (lambda z: z),
            plottype='polar contour plot' ),
        'TREFHT_ANN_Npole_diff': plotspec(
            vid='TREFHT_ANN_Npole_diff',
            xvars=['TREFHT_ANN_latlon_Npole_1','TREFHT_ANN_latlon_Npole_2'], xfunc = lonvar_min,
            yvars=['TREFHT_ANN_latlon_Npole_1','TREFHT_ANN_latlon_Npole_2'], yfunc = latvar_min,
            zvars=['TREFHT_ANN_latlon_Npole_1','TREFHT_ANN_latlon_Npole_2'], zfunc = aminusb_2ax,
            plottype='polar contour plot' ),
        'TREFHT_DJF_laton_ALL':
            ['TREFHT_DJF_latlon_1', 'TREFHT_DJF_latlon_2', 'TREFHT_DJF_latlon_diff'],
        'TREFHT_DJF_latlon_1': plotspec(
            vid='TREFHT_DJF_latlon_1',
            xvars=['TREFHT_DJF_latlon_1'], xfunc = lonvar,
            yvars=['TREFHT_DJF_latlon_1'], yfunc = latvar,
            zvars=['TREFHT_DJF_latlon_1'], zfunc = (lambda z: z),
            plottype='contour plot' ),
        'TREFHT_DJF_latlon_2': plotspec(
            vid='TREFHT_DJF_latlon_2',
            xvars=['TREFHT_DJF_latlon_2'], xfunc = lonvar,
            yvars=['TREFHT_DJF_latlon_2'], yfunc = latvar,
            zvars=['TREFHT_DJF_latlon_2'], zfunc = (lambda z: z),
            plottype='contour plot' ),
        'TREFHT_DJF_latlon_diff': plotspec(
            vid='TREFHT_DJF_latlon_diff',
            xvars=['TREFHT_DJF_latlon_1','TREFHT_DJF_latlon_2'], xfunc=lonvar_min,
            yvars=['TREFHT_DJF_latlon_1','TREFHT_DJF_latlon_2'], yfunc=latvar_min,
            zvars=['TREFHT_DJF_latlon_1','TREFHT_DJF_latlon_2'], zfunc= aminusb_2ax,
            plottype='contour_plot'
            ),
        'T_ANN_VERT_CAM_OBS_ALL':
            ['T_VERT_ANN_1', 'T_VERT_ANN_2', 'T_VERT_difference' ],
        'T_VERT_difference': plotspec(
            vid='T_VERT_difference', xvars=['T_ANN_1','T_CAM_ANN_2'], xfunc = latvar_min,
            yvars=['T_ANN_1','T_CAM_ANN_2'], yfunc = levvar_min,
            ya1vars=['T_ANN_1','T_CAM_ANN_2'], ya1func = (lambda y1,y2: heightvar(levvar_min(y1,y2))),
            zvars=['T_ANN_1','T_CAM_ANN_2'], zfunc=aminusb_ax2, plottype="contour plot" ),
        'T_VERT_ANN_2': plotspec(
            vid='T_ANN_2', xvars=['T_CAM_ANN_2'], xfunc=latvar,
            yvars=['T_CAM_ANN_2'], yfunc=levvar, ya1vars=['T_CAM_ANN_2'], ya1func=heightvar,
            zvars=['T_CAM_ANN_2'], plottype='contour plot',
            zrangevars=['T_ANN_1','T_CAM_ANN_2'], zrangefunc=minmin_maxmax ),
        'T_VERT_ANN_1': plotspec(
            vid='T_ANN_1', xvars=['T_ANN_1'], xfunc=latvar,
            yvars=['T_ANN_1'], yfunc=levvar, ya1vars=['T_ANN_1'], ya1func=heightvar,
            zvars=['T_ANN_1'], plottype='contour plot',
            zrangevars=['T_ANN_1','T_CAM_ANN_2'], zrangefunc=minmin_maxmax ),
        'NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][3]), plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_PACIFIC_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_PACIFIC_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][0]), plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_ATLANTIC_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_ATLANTIC_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][1]), plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_INDIAN_2': plotspec(
            vid='NCEP_OBS_HEAT_TRANSPORT_INDIAN_2',
            xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], xfunc=(lambda x: x[0]),
            yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            yfunc=(lambda y: y[1][2]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_GLOBAL_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_GLOBAL_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[3]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_PACIFIC_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_PACIFIC_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[0]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_ATLANTIC_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_ATLANTIC_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[1]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_INDIAN_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_INDIAN_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            yfunc=(lambda y: y[2]), plottype='line plot'),
        'CAM_HEAT_TRANSPORT_ALL_1':
            ['CAM_HEAT_TRANSPORT_GLOBAL_1','CAM_HEAT_TRANSPORT_PACIFIC_1',
             'CAM_HEAT_TRANSPORT_ATLANTIC_1','CAM_HEAT_TRANSPORT_INDIAN_1'],
        'CAM_NCEP_HEAT_TRANSPORT_GLOBAL': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_GLOBAL',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[3]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][3]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_PACIFIC': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[0]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][0]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_ATLANTIC',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[0]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][1]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_INDIAN': plotspec(
            vid='CAM_NCEP_HEAT_TRANSPORT_INDIAN',
            x1vars=['FSNS_ANN_latlon_1'], x1func=latvar,
            y1vars=['CAM_HEAT_TRANSPORT_ALL_1' ],
            y1func=(lambda y: y[0]),
            x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'], x2func=(lambda x: x[0]),
            y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2' ],
            y2func=(lambda y: y[1][2]),
            plottype='2 line plot'  ),
        'CAM_NCEP_HEAT_TRANSPORT_ALL':
            ['CAM_NCEP_HEAT_TRANSPORT_GLOBAL','CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
             'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC','CAM_NCEP_HEAT_TRANSPORT_INDIAN'],
        'past_CAM_HEAT_TRANSPORT_GLOBAL_1': plotspec(
            vid='CAM_HEAT_TRANSPORT_GLOBAL_1',
            xvars=['FSNS_ANN_latlon_1'], xfunc=latvar,
            yvars=['FSNS_ANN_latlon_1', 'FLNS_ANN_latlon_1', 'FLUT_ANN_latlon_1',
                    'FSNTOA_ANN_latlon_1', 'FLNT_ANN_latlon_1', 'FSNT_ANN_latlon_1',
                    'SHFLX_ANN_latlon_1', 'LHFLX_ANN_latlon_1', 'OCNFRAC_ANN_latlon_1' ],
            yfunc=oceanic_heat_transport, plottype='line plot'),
        'PRECT_JJA': ['PRECT_JJA_2line','PRECT_JJA_diff'],
        'PRECT_JJA_2line': plotspec(
            vid='PRECT_JJA_2line',
            x1vars=['PRECT_JJA_lat_1'], x1func = latvar,
            x2vars=['PRECT_JJA_lat_2'], x2func = latvar,
            y1vars=['PRECT_JJA_lat_1'], y1func=(lambda y: y),
            y2vars=['PRECT_JJA_lat_2'], y2func=(lambda y: y),
            plottype='2-line plot'),
        'PRECT_JJA_diff': plotspec(
            vid='PRECT_JJA_difference',
            xvars=['PRECT_JJA_lat_1','PRECT_JJA_lat_2'], xfunc = latvar_min,
            yvars=['PRECT_JJA_lat_1','PRECT_JJA_lat_2'],
            yfunc=aminusb_1ax,   # aminusb_1ax(y1,y2)=y1-y2; each y has 1 axis, use min axis
            plottype='line plot'),
        'TREFHT_ANN': plotspec(
            vid='TREFHT_ANN',xvars=['TREFHT_ANN_lat_1'], xfunc = latvar,
            yvars=['TREFHT_ANN_lat_1'], yfunc=(lambda y: y), plottype='line plot'),
        'TREFHT_DJF': ['TREFHT_DJF_2line','TREFHT_DJF_diff'],
        'TREFHT_DJF_2line': plotspec(
            vid='TREFHT_DJF_2line',
            x1vars=['TREFHT_DJF_lat_1'], x1func = latvar,
            x2vars=['TREFHT_DJF_lat_2'], x2func = latvar,
            y1vars=['TREFHT_DJF_lat_1'], y1func=(lambda y: y),
            y2vars=['TREFHT_DJF_lat_2'], y2func=(lambda y: y),
            plottype='2-line plot'),
        'TREFHT_DJF_diff': plotspec(
            vid='TREFHT_DJF_diff',
            xvars=['TREFHT_DJF_lat_1','TREFHT_DJF_lat_2'], xfunc = latvar_min,
            yvars=['TREFHT_DJF_lat_1','TREFHT_DJF_lat_2'],
            yfunc=aminusb_1ax,   # aminusb_1ax(y1,y2)=y1-y2; each y has 1 axis, use min axis
            plottype='line plot'),
        'TREFHT_DJF_line': plotspec(
            vid='TREFHT_DJF_line',
            xvars=['TREFHT_DJF_lat_1'], xfunc = latvar,
            yvars=['TREFHT_DJF_lat_1'], plottype='line plot'),
        'TREFHT_DJF_contour': plotspec(
            vid='TREFHT_DJF_contour',
            xvars=['TREFHT_DJF_latlon_1'], xfunc = (lambda x: x),
            plottype='line plot'),
        #plotspec( vid='TREFHT_JJA',xvars=['TREFHT_JJA'], xfiletable=filetable1, xfunc = latvar,
        #          yvars=['TREFHT_JJA'], yfunc=(lambda y: y), plottype='line plot'),
        #plotspec(
        #    vid='ts_by_lat_old',   # suitable for filenames
        #    xfiletable=filetable1,
        #    xfunc = latvar, # function to return x axis values
        #    xvars = ['ts_lat_old'],    # names of variables or axes, args of xfunc
        #    yfiletable=filetable1, # can differ from xfiletable, e.g. comparing 2 runs
        #    yfunc = (lambda y: y), # function to return y axis values
        #    yvars = ['ts_lat_old'], # names of variables or axes, args of xfunc
        #    zfiletable=filetable1,
        #    zfunc = (lambda: None),
        #    zvars = [],         # would be needed for countour or 3D plot
        #    # ... the ?vars variable will be converted (using the filetable and
        #    # plotvars) to actual variables which become the arguments for a call
        #    # of ?func, which returns the data we write out for plotting use.
        #    plottype='line plot' ),
        'ts_by_lat': plotspec(
            vid='ts_by_lat',   # suitable for filenames
            xfunc = latvar, # function to return x axis values
            xvars = ['ts_lat_new'],    # names of variables or axes, args of xfunc
            yfunc = (lambda y: y), # function to return y axis values
            yvars = ['ts_lat_new'], # names of variables or axes, args of xfunc
            zfunc = (lambda: None),
            zvars = [],         # would be needed for countour or 3D plot
            # ... the ?vars variable will be converted (using the filetable and
            # plotvars) to actual variables which become the arguments for a call
            # of ?func, which returns the data we write out for plotting use.
            plottype='line plot' ),
        #plotspec( vid="ts_global_old",xvars=['ts_scalar_tropical_o'], xfiletable=filetable1 ),
        'ts_global': plotspec( vid="ts_global",xvars=['ts_scalar_tropical_n'] ),
        }

    # Plotspeckeys specifies what plot data we will compute and write out.
    # In the future we may add a command line option, or provide other ways to
    # define plotspeckeys.
    #plotspeckeys = [['TREFHT_DJF_2line','TREFHT_DJF_difference']]
    #plotspeckeys = ['TREFHT_DJF_2line']
    #plotspeckeys = ['NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2','CAM_HEAT_TRANSPORT_ALL_1']
    #plotspeckeys = ['CAM_NCEP_HEAT_TRANSPORT_GLOBAL']
    #plotspeckeys = ['CAM_NCEP_HEAT_TRANSPORT_ALL']
    #plotspeckeys = ['T_ANN_VERT_CAM_OBS_ALL']
    #plotspeckeys = ['TREFHT_DJF_laton_ALL']
    #plotspeckeys = ['TREFHT_ANN_Npole_ALL']
    plotspeckeys = ['TREFHT_DJF']
    #plotspeckeys = ['GLOBAL_AVERAGES']

    # Find the variable names required by the plotspecs.
    varkeys = []
    for psk in plotspeckeys:
        if type(psk) is str and type(plotspecs[psk]) is list:
            psk = plotspecs[psk]
        if type(psk) is str:
            write_xml = False
            psl = [ plotspecs[psk] ]
        else:
            write_xml = True
            psl = [ plotspecs[k] for k in psk ]
            xml_name = '_'.join( [ ps._strid for ps in psl ] ) +'.xml'
            h = open( xml_name, 'w' )
            h.write("<plotdata>\n")
        for ps in psl:
            varkeys = varkeys+ps.xvars+ps.x1vars+ps.x2vars+ps.x3vars
            varkeys = varkeys+ps.yvars+ps.y1vars+ps.y2vars+ps.y3vars
            varkeys = varkeys + ps.zvars + ps.zrangevars
    for key in varkeys:
        if key in derived_variables.keys():
            varkeys = varkeys + derived_variables[key]._inputs
    varkeys = list( set(varkeys) )

    # Compute the value of every variable we need.
    varvals = {}
    # First compute all the reduced variables
    for key in varkeys:
        if key in reduced_variables.keys():
            varvals[key] = reduced_variables[key].reduce()
    # Then use the reduced variables to compute the derived variables
    #   Note that the derive() method is allowed to return a tuple.  This way
    #   we can use one function to compute what's really several variables.
    for key in varkeys:
        if key in derived_variables.keys():
            varvals[key] = derived_variables[key].derive(varvals)

    # Now use the reduced and derived variables to compute the plot data.
    for psk in plotspeckeys:
        if type(psk) is str and type(plotspecs[psk]) is list:
            psk = plotspecs[psk]
        if type(psk) is str:
            write_xml = False
            psl = [ plotspecs[psk] ]
        else:
            write_xml = True
            psl = [ plotspecs[k] for k in psk ]
            xml_name = '_'.join( [ ps._strid for ps in psl ] ) +'.xml'
            h = open( xml_name, 'w' )
            h.write("<plotdata>\n")
        varkeys = []
        for ps in psl:
            logger.info("jfp preparing data for %s", ps._strid)
            xrv = [ varvals[k] for k in ps.xvars ]
            x1rv = [ varvals[k] for k in ps.x1vars ]
            x2rv = [ varvals[k] for k in ps.x2vars ]
            x3rv = [ varvals[k] for k in ps.x3vars ]
            yrv = [ varvals[k] for k in ps.yvars ]
            y1rv = [ varvals[k] for k in ps.y1vars ]
            y2rv = [ varvals[k] for k in ps.y2vars ]
            y3rv = [ varvals[k] for k in ps.y3vars ]
            yarv = [ varvals[k] for k in ps.yavars ]
            ya1rv = [ varvals[k] for k in ps.ya1vars ]
            zrv = [ varvals[k] for k in ps.zvars ]
            zrrv = [ varvals[k] for k in ps.zrangevars ]
            xax = apply( ps.xfunc, xrv )
            x1ax = apply( ps.x1func, x1rv )
            x2ax = apply( ps.x2func, x2rv )
            x3ax = apply( ps.x3func, x3rv )
            yax = apply( ps.yfunc, yrv )
            y1ax = apply( ps.y1func, y1rv )
            y2ax = apply( ps.y2func, y2rv )
            y3ax = apply( ps.y3func, y3rv )
            yaax = apply( ps.yafunc, yarv )
            ya1ax = apply( ps.ya1func, ya1rv )
            zax = apply( ps.zfunc, zrv )
            zr = apply( ps.zrangefunc, zrrv )
            if      (xax is None or len(xrv)==0) and (x1ax is None or len(x1rv)==0)\
                and (x2ax is None or len(x2rv)==0) and (x3ax is None or len(x3rv)==0)\
                and (yax is None or len(yrv)==0) and (y1ax is None or len(y1rv)==0)\
                and (y2ax is None or len(y2rv)==0) and (y3ax is None or len(y3rv)==0)\
                and (zax is None or len(zrv)==0):
                continue
            filename = ps._strid+"_test.nc"
            value=0
            cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

            g = cdms2.open( filename, 'w' )    # later, choose a better name and a path!
            store_provenance(g)
            # Much more belongs in g, e.g. axis and graph names.
            if xax is not None and len(xrv)>0:
                xax.id = 'X'
                g.write(xax)
            if x1ax is not None and len(x1rv)>0:
                x1ax.id = 'X1'
                g.write(x1ax)
            if x2ax is not None and len(x2rv)>0:
                x2ax.id = 'X2'
                g.write(x2ax)
            if x3ax is not None and len(x3rv)>0:
                x3ax.id = 'X3'
                g.write(x3ax)
            if yax is not None and len(yrv)>0:
                yax.id = 'Y'
                g.write(yax)
            if y1ax is not None and len(y1rv)>0:
                y1ax.id = 'Y1'
                g.write(y1ax)
            if y2ax is not None and len(y2rv)>0:
                y2ax.id = 'Y2'
                g.write(y2ax)
            if y3ax is not None and len(y3rv)>0:
                y3ax.id = 'Y3'
                g.write(y3ax)
            if yaax is not None and len(yarv)>0:
                yaax.id = 'YA'
                g.write(yaax)
            if ya1ax is not None and len(ya1rv)>0:
                ya1ax.id = 'YA1'
                g.write(ya1ax)
            if zax is not None and len(zrv)>0:
                zax.id = 'Z'
                g.write(zax)
            if zr is not None:
                zr.id = 'Zrange'
                g.write(zr)
            g.presentation = ps.plottype
            # Note: For table output, it would be convenient to use a string-valued variable X
            # to specify string parts of the table.  Butcdms2 doesn't support them usefully.
            # Instead we'll manage with a convention that a table row plotspec's id is the name of
            # the row, thus available to be printed in, e.g., the first column.
            if ps.plottype=="table row":
                g.rowid = ps._strid
            g.close()
            if write_xml:
                h.write( "<ncfile>"+filename+"</ncfile>\n" )

        if write_xml:
            h.write( "</plotdata>\n" )
            h.close()
Example #37
0
def execute(test_str, plotset, obstype, varid, season, imagefilename,
            imagethreshold, ncfiles, rtol, atol):
    print test_str
    # Silence annoying messages about how to set the NetCDF file type.  Anything will do.
    cdms2.setNetcdfShuffleFlag(0)
    cdms2.setNetcdfDeflateFlag(0)
    cdms2.setNetcdfDeflateLevelFlag(0)

    #get commmand line args
    p = argparse.ArgumentParser(description="Basic gm testing code for vcs")
    p.add_argument("--datadir",
                   dest="datadir",
                   help="root directory for model and obs data")
    p.add_argument("--baseline",
                   dest="baseline",
                   help="directory with baseline files for comparing results")
    p.add_argument("--keep",
                   dest="keep",
                   help="Iff True, will keep computed png and nc files")
    args = p.parse_args(sys.argv[1:])
    datadir = args.datadir
    baselinepath = args.baseline
    keep = args.keep

    #setup paths to data
    modelpath = os.path.join(datadir, 'cam_output')
    obspath = os.path.join(datadir, 'obs_atmos')
    outpath = tempfile.mkdtemp() + "/"
    print "outpath=", outpath

    #setup string to be executed and run script
    #diagstr = "diags --outputdir '%s' --model path=%s,climos=no --obs path=%s,filter=\"f_contains('NCEP')\",climos=yes --package AMWG --set 3 --var T --seasons JJA" % (outpath, modelpath, obspath)
    diagstr_parts = [
        " --outputdir %s " % (outpath),
        " --model path=%s,climos=no " % (modelpath),
        " --obs path=%s,filter=\"f_contains('%s')\",climos=yes " %
        (obspath, obstype), " --package AMWG ",
        " --set %s " % (str(plotset)),
        " --var %s" % (varid),
        " --seasons %s" % (season)
    ]
    diagstr = "diags "
    for part in diagstr_parts:
        diagstr += part
    print 'executing '
    print diagstr

    # nonstandard, suitable for testing:
    proc = subprocess.Popen([diagstr], shell=True)
    proc_status = proc.wait()
    if proc_status != 0:
        raise DiagError("diags run failed")

    if keep:
        print "save ", imagefilename, ncfiles.keys()
        print "output directory is = ", outpath
    else:
        # Test of graphics (png) file match:
        # This just looks at combined plot, aka summary plot, which is a compound of three plots.

        imagefname = os.path.join(outpath, imagefilename)
        imagebaselinefname = os.path.join(baselinepath, imagefilename)
        graphics_result = checkimage.check_result_image(
            imagefname, imagebaselinefname, imagethreshold)
        print "Graphics file", imagefname, "match difference:", graphics_result

        # Test of NetCDF data (nc) file match:
        CLOSE = True
        for ncfilename, ncvars in ncfiles.items():
            for var in ncvars:
                #print ncfilename, var
                print baselinepath
                try:
                    close = closeness(var, ncfilename, outpath, baselinepath,
                                      rtol, atol)
                    if not close:
                        print var, ' in ', ncfilename, ' is not close.'
                except:
                    print 'comparison failed ', ncfilename, var
                    close = False
                CLOSE = CLOSE and close

        #cleanup the temp files
        shutil.rmtree(outpath)
        assert (CLOSE)  #, 'data are not close'
Example #38
0
from cdms2 import MV
import numpy
import glob
import sys
import os
from os import path
import re
from scipy import interpolate
import shutil

# _______________
if __name__=="__main__":


    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(0) #1
    cdms2.setNetcdfDeflateFlag(0) #1
    cdms2.setNetcdfDeflateLevelFlag(0) #3

    infile=None
    outfile=None
    variable=None

    ii=1
    while ii < len(sys.argv):
        arg = sys.argv[ii]
        if arg=='-o':
            ii = ii + 1
            outfile=sys.argv[ii]
        elif arg=='-v':
            ii=ii+1
def mmeAveMsk1D(listFiles,
                sw2d,
                years,
                inDir,
                outDir,
                outFile,
                timeInt,
                mme,
                ToeType,
                fullTS,
                debug=True):
    '''
    The mmeAveMsk1D() function averages rhon or scalar density bined files with differing masks
    It ouputs the MME and a percentage of non-masked bins

    Created on Tue Nov 25 13:56:20 CET 2014

    Inputs:
    -------
    - listFiles(str)         - the list of files to be averaged
    - sw2d                   - dimension of fields to consider (1 or 2)
    - years(t1,t2)           - years for slice read
    - inDir(str)             - input directory where files are stored
    - outDir(str)            - output directory
    - outFile(str)           - output file
    - timeInt(2xindices)     - indices of init period to compare with (e.g. [1,20])
    - mme(bool)              - multi-model mean (will read in single model ensemble stats)
    - FfllTS                 - 0/1: if 1, uses full time serie (ignores years(t1,t2))
    - debug <optional>       - boolean value

    Notes:
    -----
    - EG 25 Nov 2014   - Initial function write
    - EG  9 Dec 2014   - Add agreement on difference with init period - save as <var>Agree
    - EG 04 Oct 2016   - Add 3D files support

    TODO:
    ------

    '''

    # CDMS initialisation - netCDF compression
    comp = 1
    # 0 for no compression
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # Numpy initialisation
    npy.set_printoptions(precision=2)

    if debug:
        debug = True
    else:
        debug = False
    # File dim and grid inits
    t1 = years[0]
    t2 = years[1]
    if t2 <= 0:
        useLastYears = True
        t2 = -t2
    else:
        useLastYears = False
    # Bound of period average to remove
    peri1 = timeInt[0]
    peri2 = timeInt[1]
    # Find dimension
    runN = len(listFiles)
    try:
        fi = cdm.open(inDir[0] + '/' + listFiles[0])
    except:
        print ' *** file not found ', inDir[0] + '/' + listFiles[0]
        sys.exit(' Abort')
    if sw2d == 1:
        ptopd0 = fi['ptopdepth']
        # Create variable handle
        latN = ptopd0.shape[2]
        basN = ptopd0.shape[1]
    elif sw2d == 2:
        ptopd0 = fi['ptopdepthxy']
        # Create variable handle
        lonN = ptopd0.shape[2]
        latN = ptopd0.shape[1]

    #timN = ptopd0.shape[0]
    timN = t2 - t1
    if fullTS:
        print '  !!! Working on full Time Serie (fullTS = True)'
        timN = ptopd0.shape[0]
        t1 = 0
        t2 = timN
    t10 = t1
    t20 = t2
    # Get grid objects
    axesList = ptopd0.getAxisList()
    # Declare and open files for writing
    if os.path.isfile(outDir + '/' + outFile):
        os.remove(outDir + '/' + outFile)
    outFile_f = cdm.open(outDir + '/' + outFile, 'w')

    print ' Number of members:', len(listFiles)

    valmask = ptopd0.missing_value

    # init time axis
    time = cdm.createAxis(npy.float32(range(timN)))
    time.id = 'time'
    time.units = 'years since 1861'
    time.designateTime()

    # loop on variables
    # init percent array

    if sw2d == 1:
        varList = [
            'ptopdepth', 'ptopsigma', 'ptopso', 'ptopthetao', 'volpers',
            'salpers', 'tempers'
        ]
        #varList = ['ptopdepth']
        varDim = [1, 1, 1, 1, 0, 0, 0]
        percent = npy.ma.ones([runN, timN, basN, latN], dtype='float32') * 0.
    elif sw2d == 2:
        varList = ['ptopdepthxy', 'ptopsigmaxy', 'ptopsoxy', 'ptopthetaoxy']
        #varList = ['ptopdepthxy']
        varDim = [2, 2, 2, 2]
        percent = npy.ma.ones([runN, timN, latN, lonN], dtype='float32') * 0.

    varFill = [
        valmask, valmask, valmask, valmask, valmask, valmask, valmask, valmask,
        valmask
    ]

    axis1D = [time, axesList[1], axesList[2]]
    axis0D = [time, axesList[1]]
    print ' timN = ', timN

    # loop on 1D variables
    for iv, var in enumerate(varList):
        ti0 = timc.clock()

        # Array inits
        if varDim[iv] == 2:
            isonvar = npy.ma.ones([runN, timN, latN, lonN],
                                  dtype='float32') * valmask
            vardiff = npy.ma.ones([runN, timN, latN, lonN],
                                  dtype='float32') * valmask
            varones = npy.ma.ones([runN, timN, latN, lonN],
                                  dtype='float32') * 1.
            axisVar = axis1D
        elif varDim[iv] == 1:
            isonvar = npy.ma.ones([runN, timN, basN, latN],
                                  dtype='float32') * valmask
            vardiff = npy.ma.ones([runN, timN, basN, latN],
                                  dtype='float32') * valmask
            varones = npy.ma.ones([runN, timN, basN, latN],
                                  dtype='float32') * 1.
            axisVar = axis1D
        else:
            isonvar = npy.ma.ones([runN, timN, basN],
                                  dtype='float32') * valmask
            vardiff = npy.ma.ones([runN, timN, basN],
                                  dtype='float32') * valmask
            varones = npy.ma.ones([runN, timN, basN], dtype='float32') * 1.
            axisVar = axis0D
        print ' Variable ', iv, var, varDim[iv]
        # loop over files to fill up array
        for ic, file in enumerate(listFiles):
            ft = cdm.open(inDir[0] + '/' + file)
            timeax = ft.getAxis('time')
            try:
                tmax = timeax.shape[0]
            except:
                print ic, file, timeax
            if ic == 0:
                tmax0 = tmax
                #print ic,file, tmax
            #adapt [t1,t2] time bounds to piControl last NN years
            if useLastYears:
                t1 = tmax - t20
                t2 = tmax
            else:
                if tmax != tmax0:
                    print 'tmax <> tmax0', tmax, tmax0
                    print 'wrong time axis: exiting...'

                    return
            #print 'Time dims:',ic, t1,t2,tmax
            # read array
            computeVar = True
            allVars = ft.variables.keys()
            if 'ptopsigmaxy' in allVars:
                computeVar = False
            if (var == 'ptopsigmaxy') & computeVar:
                #print '  ic = ',ic
                # reconstruct from isondepthg and ptopdepthxy

                isond = ft('isondepthg', time=slice(t1, t2))
                #print isond.data.shape, timN*latN*lonN
                itest = 94 * 360 + 150
                axesList = isond.getAxisList()
                levs = axesList[1][:]
                levN = len(levs)
                #ti02 = timc.clock()
                levs3d0 = mv.reshape(npy.tile(levs, latN * lonN),
                                     (latN * lonN, levN))
                #ti05 = timc.clock()
                isonRead = npy.ma.ones([timN, latN, lonN],
                                       dtype='float32') * valmask
                for it in range(timN):  # loop on time to limit memory usage
                    levs3d = levs3d0 * 1.
                    depthlo = mv.reshape(vardepth[ic, it, ...], latN * lonN)
                    depth3d = npy.reshape(npy.repeat(depthlo, levN),
                                          (latN * lonN, levN))
                    isond3d = mv.reshape(
                        npy.transpose(isond.data[it, ...], (1, 2, 0)),
                        (latN * lonN, levN))
                    #print isond3d[itest,:]
                    isond3d[isond3d > valmask / 10] = 0.
                    #print isond3d[itest,:]
                    isond3dp1 = npy.roll(isond3d, -1, axis=1)
                    isond3dp1[:, -1] = isond3d[:, -1]
                    #print isond3dp1[itest,:]
                    #levs3d[levs3d > 30. ] = 0. # to distinguish bottom masked points from surface masked points
                    #print levs3d[itest,:]
                    levs3d[(depth3d <= isond3d)] = 0.
                    #print levs3d[itest,:]
                    levs3d[(depth3d > isond3dp1)] = 0.
                    #print levs3d[itest,:]
                    #isonwrk = npy.sum(levs3d,axis=1)
                    isonwrk = npy.max(levs3d, axis=1)
                    if it < 0:
                        print ic, it
                        print depthlo[itest]
                        print isond3d[itest, :]
                        print isonwrk[itest]
                        print
                    isonRead[it, ...] = mv.reshape(isonwrk, (latN, lonN))
                # <-- end of loop on time
                del (isond3d, isond3dp1)
                gc.collect()
                # mask with depthxy and where sigmaxy = 0
                isonRead.mask = vardepth.mask[ic, ...]
                isonRead = mv.masked_where(isonRead == 0, isonRead)
                isonRead.long_name = var
                isonRead.units = 'sigma_n'
                isonRead.id = var
                del (isond, depth3d, levs3d, levs3d0, isonwrk)
                gc.collect()
                #ti3 = timc.clock()
                #print ti02-ti0,ti05-ti02, ti1-ti05,ti12-ti1,ti15-ti12,ti2-ti15,ti3-ti2
                #print ti3-ti0
                # write ptopsigmaxy
                if os.path.isfile(inDir[0] + '/work_ptopsigmaxy/' + file):
                    os.remove(inDir[0] + '/work_ptopsigmaxy/' + file)
                fiout = cdm.open(inDir[0] + '/work_ptopsigmaxy/' + file, 'w')
                if ic == 0:
                    print ' Creating ', inDir[0] + '/work_ptopsigmaxy/' + file
                isonsigxy = cdm.createVariable(isonRead,
                                               axes=axis1D,
                                               id='ptopsigmaxy')
                isonsigxy.long_name = 'Density of shallowest persistent ocean on ison'
                isonsigxy.units = 'sigma_n'
                fiout.write(isonsigxy.astype('float32'))
                fiout.close()
            else:
                # Direct read of variable
                isonRead = ft(var, time=slice(t1, t2))
            #print isonRead.shape, timN
            if varFill[iv] != valmask:
                isonvar[ic, ...] = isonRead.filled(varFill[iv])
            else:
                isonvar[ic, ...] = isonRead
            #print isonvar[ic,:,40,100]
            # compute percentage of non-masked points accros MME
            if iv == 0:
                maskvar = mv.masked_values(isonRead.data, valmask).mask
                percent[ic, ...] = npy.float32(npy.equal(maskvar, 0))
            if mme:
                # if mme then just average Bowl and Agree fields
                varst = var + 'Agree'
                vardiff[ic, ...] = ft(varst, time=slice(t1, t2))
            else:
                # Compute difference with average of first initN years, use mask of last month
                varinit = cdu.averager(isonvar[ic, peri1:peri2, ...], axis=0)
                for tr in range(timN):
                    vardiff[ic, tr, ...] = isonvar[ic, tr, ...] - varinit
                vardiff[ic, ...].mask = isonvar[ic, ...].mask

            ft.close()
        # <-- end of loop on files
        # TODO remove masked points at longitudes 0 or 180deg for some models
        # if ptopdepthxy, keep for ptopsigmaxy computation (reconstruct from isondepthg and ptopdepthxy)
        if var == 'ptopdepthxy':
            vardepth = isonvar
        # Compute percentage of bin presence
        # Only keep points where percent > 50%
        if iv == 0:
            percenta = (cdu.averager(percent, axis=0)) * 100.
            percenta = mv.masked_less(percenta, 50)
            percentw = cdm.createVariable(percenta,
                                          axes=axis1D,
                                          id='ptoppercent')
            percentw._FillValue = valmask
            percentw.long_name = 'percentage of MME bin'
            percentw.units = '%'
            outFile_f.write(percentw.astype('float32'))
        # Sign of difference
        if mme:
            vardiffsgSum = cdu.averager(vardiff, axis=0)
            vardiffsgSum = cdm.createVariable(vardiffsgSum,
                                              axes=axisVar,
                                              id='foo')
            vardiffsgSum = maskVal(vardiffsgSum, valmask)
            vardiffsgSum.mask = percentw.mask
        else:
            vardiffsg = npy.copysign(varones, vardiff)
            # average signs
            vardiffsgSum = cdu.averager(vardiffsg, axis=0)
            vardiffsgSum = mv.masked_greater(vardiffsgSum, 10000.)
            vardiffsgSum.mask = percentw.mask
            vardiffsgSum._FillValue = valmask

        # average accross members
        isonVarAve = cdu.averager(isonvar, axis=0)
        isonVarAve = cdm.createVariable(isonVarAve, axes=axisVar, id='foo')
        # mask
        if varFill[iv] == valmask:
            isonVarAve = maskVal(isonVarAve, valmask)

        isonVarAve.mask = percentw.mask

        # Write
        isonave = cdm.createVariable(isonVarAve, axes=axisVar, id=isonRead.id)
        isonave.long_name = isonRead.long_name
        isonave.units = isonRead.units
        isonavediff = cdm.createVariable(vardiffsgSum,
                                         axes=axisVar,
                                         id=isonRead.id + 'Agree')
        isonavediff.long_name = isonRead.long_name
        isonavediff.units = isonRead.units

        outFile_f.write(isonave.astype('float32'))
        outFile_f.write(isonavediff.astype('float32'))
        tf = timc.clock()
        #print '   time var',tf-ti0
    # <--- end of loop on variables

    outFile_f.close()
    fi.close()
Example #40
0
# Python module imports
import os
import shutil
import subprocess
import sys
import cdms2 as cdm
# Add durolib to path
sys.path.insert(1, '/export/durack1/git/pylib')  # Assumes crunchy/oceanonly
from durolib import globalAttWrite


# Set cdms preferences - no compression, no shuffling, no complaining
cdm.setNetcdfDeflateFlag(1)
# 1-9, min to max - Comes at heavy IO (read/write time cost)
cdm.setNetcdfDeflateLevelFlag(9)
cdm.setNetcdfShuffleFlag(0)
cdm.setCompressionWarnings(0)  # Turn off nag messages
# Set bounds automagically
# cdm.setAutoBounds(1) ; # Use with caution

# Set build info once
buildDate = '141126'
outPath = '/work/durack1/Shared/141126_metrics-acme'
# Create input variable lists
uvcdatInstall = ''.join(
    ['/export/durack1/', buildDate, '_pcmdi_metrics/PCMDI_METRICS/bin/'])
# Specify inputs:
#        Realm   ModelId               InputFiles    SourceDirectory
data = [
    ['atmos',
     'ACME-CAM5-SE_v0pt1',
Example #41
0
   def processCmdLine(self):
      parser = argparse.ArgumentParser(
         description='UV-CDAT Climate Modeling Diagnostics', 
         usage='%(prog)s --path1 [options]')

      parser.add_argument('--path', '-p', action='append', nargs=1, 
         help="Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2.")
      parser.add_argument('--path2', '-q', action='append', nargs=1, 
         help="Path to a second dataset.")
      parser.add_argument('--obspath', action='append', nargs=1,
                          help="Path to an observational dataset")
      parser.add_argument('--cachepath', nargs=1,
         help="Path for temporary and cachced files. Defaults to /tmp")
#      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
#         help="The realm type. Current valid options are 'land' and 'atmosphere'")
      parser.add_argument('--filter', '-f', nargs=1, 
         help="A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices.")
      parser.add_argument('--filter2', '-g', nargs=1, 
         help="A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices.")
      parser.add_argument('--new_filter', '-F', action='append', nargs=1, 
         help="A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices.")
      parser.add_argument('--packages', '--package', '-k', nargs='+', 
         help="The diagnostic packages to run against the dataset(s). Multiple packages can be specified.")
      parser.add_argument('--sets', '--set', '-s', nargs='+', 
         help="The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package") 
      parser.add_argument('--vars', '--var', '-v', nargs='+', 
         help="Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL") 
      parser.add_argument('--list', '-l', nargs=1, choices=['sets', 'vars', 'variables', 'packages', 'seasons', 'regions', 'translations', 'options'], 
         help="Determine which packages, sets, regions, variables, and variable options are available")
         # maybe eventually add compression level too....
      parser.add_argument('--compress', nargs=1, choices=['no', 'yes'],
         help="Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools") #no compression, add self state

      parser.add_argument('--outputpre', nargs=1,
         help="Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc")
      parser.add_argument('--outputpost', nargs=1,
         help="Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc")
      parser.add_argument('--outputdir', '-O', nargs=1,
         help="Directory in which output files will be written." )

      parser.add_argument('--seasons', nargs='+', choices=all_seasons,
         help="Specify which seasons to generate climatoogies for")
      parser.add_argument('--years', nargs='+',
         help="Specify which years to include when generating climatologies") 
      parser.add_argument('--months', nargs='+', choices=all_months,
         help="Specify which months to generate climatologies for")
      parser.add_argument('--climatologies', '-c', nargs=1, choices=['no','yes'],
         help="Specifies whether or not climatologies should be generated")
      parser.add_argument('--plots', '-t', nargs=1, choices=['no','yes'],
         help="Specifies whether or not plots should be generated")
      parser.add_argument('--plottype', nargs=1)
      parser.add_argument('--precomputed', nargs=1, choices=['no','yes'], 
         help="Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc")
      parser.add_argument('--json', '-j', nargs=1, choices=['no', 'yes'],
         help="Produce JSON output files as part of climatology/diags generation") # same
      parser.add_argument('--netcdf', '-n', nargs=1, choices=['no', 'yes'],
         help="Produce NetCDF output files as part of climatology/diags generation") # same
      parser.add_argument('--xml', '-x', nargs=1, choices=['no', 'yes'],
         help="Produce XML output files as part of climatology/diags generation")
      parser.add_argument('--seasonally', action='store_true',
         help="Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons")
      parser.add_argument('--monthly', action='store_true',
         help="Produce climatologies for all predefined months")
      parser.add_argument('--yearly', action='store_true',
         help="Produce annual climatogolies for all years in the dataset")
      parser.add_argument('--timestart', nargs=1,
         help="Specify the starting time for the dataset, such as 'months since Jan 2000'")
      parser.add_argument('--timebounds', nargs=1, choices=['daily', 'monthly', 'yearly'],
         help="Specify the time boudns for the dataset")
      parser.add_argument('--verbose', '-V', action='count',
         help="Increase the verbosity level. Each -v option increases the verbosity more.") # count
      parser.add_argument('--name', action='append', nargs=1,
         help="Specify option names for the datasets for plot titles, etc") #optional name for the set
      # This will be the standard list of region names NCAR has
      parser.add_argument('--regions', '--region', nargs='+', choices=all_regions.keys(),
         help="Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'")
      parser.add_argument('--starttime', nargs=1,
         help="Specify a start time in the dataset")
      parser.add_argument('--endtime', nargs=1, 
         help="Specify an end time in the dataset")
      parser.add_argument('--translate', nargs='?', default='y',
         help="Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1")
      parser.add_argument('--varopts', nargs='+',
         help="Variable auxillary options")



      args = parser.parse_args()

      if(args.list != None):
         if args.list[0] == 'translations':
            print "Default variable translations: "
            self.listTranslations()
            quit()
         if args.list[0] == 'regions':
            print "Available geographical regions: ", all_regions.keys()
            quit()

         if args.list[0] == 'seasons':
            print "Available seasons: ", all_seasons
            quit()

         if args.list[0] == 'packages':
            print "Listing available packages:"
            print self.all_packages.keys()
            quit()

         
         if args.list[0] == 'sets':
            if args.packages == None:
               print "Please specify package before requesting available diags sets"
               quit()
            for p in args.packages:
               print 'Avaialble sets for package ', p, ':'
               sets = self.listSets(p)
               keys = sets.keys()
               for k in keys:
                  print 'Set',k, ' - ', sets[k]
            quit()
               
         if args.list[0] == 'variables' or args.list[0] == 'vars':
            if args.path != None:
               for i in args.path:
                  self._opts['path'].append(i[0])
            else:
               print 'Must provide a dataset when requesting a variable listing'
               quit()
            self.listVariables(args.packages, args.sets)
            quit()
         if args.list[0] == 'options':
            if args.path!= None:
               for i in args.path:
                  self._opts['path'].append(i[0])
            else:
               print 'Must provide a dataset when requesting a variable listing'
               quit()
            self.listVarOptions(args.packages, args.sets, args.vars)
            quit()

      # Generally if we've gotten this far, it means no --list was specified. If we don't have
      # at least a path, we should exit.
      if(args.path != None):
         for i in args.path:
            self._opts['path'].append(i[0])
      else:
         print 'Must specify a path or the --list option at a minimum.'
         print 'For help, type "diags --help".'
         quit()
      if(args.path2 != None):
         for i in args.path2:
            self._opts['path2'].append(i[0])

      if(args.obspath != None):
         for i in args.obspath:
            self._opts['obspath'].append(i[0])

      # TODO: Should some pre-defined filters be "nameable" here?
      if(args.filter != None): # Only supports one filter argument, see filter2.
         self._opts['filter'] = args.filter[0]
         self._opts['user_filter'] = True
#         for i in args.filter:
#            self._opts['filter'].append(i[0])
      if(args.filter2 != None): # This is a second filter argument.
         self._opts['filter2'] = args.filter2[0]
         self._opts['user_filter'] = True
      if(args.new_filter != None):  # like filter but with multiple arguments
         for i in args.new_filter:
            self._opts['new_filter'].append(i[0])

      if(args.cachepath != None):
         self._opts['cachepath'] = args.cachepath[0]

      self._opts['seasonally'] = args.seasonally
      self._opts['monthly'] = args.monthly

      if(args.varopts != None):
         self._opts['varopts'] = args.varopts

      if(args.starttime != None):
         self._opts['start'] = args.starttime[0]

      if(args.endtime != None):
         self._opts['end'] = args.endtime[0]

      # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
      # they are still set after you set them once in the python process.
      if(args.compress != None):
         if(args.compress[0] == 'no'):
            self._opts['compress'] = False
         else:
            self._opts['compress'] = True


      if self._opts['compress'] == True:
         print 'Enabling compression for output netCDF files'
         cdms2.setNetcdfShuffleFlag(1)
         cdms2.setNetcdfDeflateFlag(1)
         cdms2.setNetcdfDeflateLevelFlag(9)
      else:
         print 'Disabling compression for output netCDF files'
         cdms2.setNetcdfShuffleFlag(0)
         cdms2.setNetcdfDeflateFlag(0)
         cdms2.setNetcdfDeflateLevelFlag(0)
         

      if(args.json != None):
         if(args.json[0] == 'no'):
            self._opts['json'] = False
         else:
            self._opts['json'] = True
      if(args.xml != None):
         if(args.xml[0] == 'no'):
            self._opts['xml'] = False
         else:
            self._opts['xml'] = True

      if(args.netcdf != None):
         if(args.netcdf[0] == 'no'):
            self._opts['netcdf'] = False
         else:
            self._opts['netcdf'] = True

      if(args.plots != None):
         if(args.plots[0].lower() == 'no' or args.plots[0] == 0):
            self._opts['plots'] = False
         else:
            self._opts['plots'] = True

      if(args.climatologies != None):
         if(args.climatologies[0] == 'no'):
            self._opts['climatologies'] = False
         else:
            self._opts['climatologies'] = True

      self._opts['verbose'] = args.verbose

      if(args.name != None):
         for i in args.name:
            self._opts['dsnames'].append(i[0])

      # Help create output file names
      if(args.outputpre != None):
         self._opts['outputpre'] = args.outputpre[0]
      if(args.outputpost != None):
         self._opts['outputpost'] = args.outputpost[0]

      # Output directory
      if(args.outputdir != None):
         if not os.path.isdir(args.outputdir[0]):
            print "ERROR, output directory",args.outputdir[0],"does not exist!"
            quit()
         self._opts['outputdir'] = args.outputdir[0]

      if(args.translate != 'y'):
         print args.translate
         print self._opts['translate']
         quit()
      # Timestart assumes a string like "months since 2000". I can't find documentation on
      # toRelativeTime() so I have no idea how to check for valid input
      # This is required for some of the land model sets I've seen
      if(args.timestart != None):
         self._opts['reltime'] = args.timestart
         
      # cdutil.setTimeBounds{bounds}(variable)
      if(args.timebounds != None):
         self._opts['bounds'] = args.timebounds

      # Check if a user specified package actually exists
      # Note: This is case sensitive.....
      if(args.packages != None):
         plist = []
         for x in args.packages:
            if x.upper() in self.all_packages.keys():
               plist.append(x)
            elif x in self.all_packages.keys():
               plist.append(x.lower())

         if plist == []:
            print 'Package name(s) ', args.packages, ' not valid'
            print 'Valid package names: ', self.all_packages.keys()
            quit()
         else:
            self._opts['packages'] = plist


      # TODO: Requires exact case; probably make this more user friendly and look for mixed case
      if(args.regions != None):
         rlist = []
         for x in args.regions:
            if x in all_regions.keys():
               rlist.append(x)
         print 'REGIONS: ', rlist
         self._opts['regions'] = rlist

      # Given user-selected packages, check for user specified sets
      # Note: If multiple packages have the same set names, then they are all added to the list.
      # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
      if(self._opts['packages'] == None and args.sets != None):
         print 'No package specified'
         self._opts['sets'] = args.sets

      if(args.sets != None and self._opts['packages'] != None):
         # unfortuantely, we have to go through all of this....
         # there should be a non-init of the class method to list sets/packages/etc,
         # ie a dictionary perhaps?
         sets = []
         import metrics.fileio.filetable as ft
         import metrics.fileio.findfiles as fi
         import metrics.packages.diagnostic_groups 
         package = self._opts['packages']
         if package[0].lower() == 'lmwg':
            import metrics.packages.lmwg.lmwg
         elif package[0].lower()=='amwg':
            import metrics.packages.amwg.amwg
         dtree = fi.dirtree_datafiles(self, pathid=0)
         filetable = ft.basic_filetable(dtree, self)
         dm = metrics.packages.diagnostic_groups.diagnostics_menu()

         pclass = dm[package[0].upper()]()

         slist = pclass.list_diagnostic_sets()
         keys = slist.keys()
         keys.sort()
         for k in keys:
            fields = k.split()
            for user in args.sets:
               if user == fields[0]:
                  sets.append(user)
         self._opts['sets'] = sets
         if sets != args.sets:
            print 'sets requested ', args.sets
            print 'sets available: ', slist
            exit(1)

      # TODO: Check against an actual list of variables from the set
      if args.vars != None:
         self._opts['vars'] = args.vars

      # If --yearly is set, then we will add 'ANN' to the list of climatologies
      if(args.yearly == True):
         self._opts['yearly'] = True
         self._opts['times'].append('ANN')

      # If --monthly is set, we add all months to the list of climatologies
      if(args.monthly == True):
         self._opts['monthly'] = True
         self._opts['times'].extend(all_months)

      # If --seasonally is set, we add all 4 seasons to the list of climatologies
      if(args.seasonally == True):
         self._opts['seasonally'] = True
         self._opts['times'].extend(all_seasons)

      # This allows specific individual months to be added to the list of climatologies
      if(args.months != None):
         if(args.monthly == True):
            print "Please specify just one of --monthly or --months"
            quit()
         else:
            mlist = [x for x in all_months if x in args.months]
            self._opts['times'] = self._opts['times']+mlist

      # This allows specific individual years to be added to the list of climatologies.
      # Note: Checkign for valid input is impossible until we look at the dataset
      # This has to be special cased since typically someone will be saying
      # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
      if(args.years != None):
         if(args.yearly == True):
            print "Please specify just one of --yearly or --years"
            quit()
         else:
            self._opts['years'] = args.years

      if(args.seasons != None):
         if(args.seasonally == True):
            print "Please specify just one of --seasonally or --seasons"
            quit()
         else:
            slist = [x for x in all_seasons if x in args.seasons]
            self._opts['times'] = self._opts['times']+slist
def mmeAveMsk3D(listFiles,
                years,
                inDir,
                outDir,
                outFile,
                timeInt,
                mme,
                ToeType,
                debug=True):
    '''
    The mmeAveMsk3D() function averages rhon/lat density bined files with differing masks
    It ouputs
     - the MME
     - a percentage of non-masked bins
     - the sign agreement of period2-period1 differences
     - ToE per run and for MME

    Author:    Eric Guilyardi : [email protected]

    Created on Tue Nov 21 2016

    Inputs:
    -------
    - listFiles(str)         - the list of files to be averaged
    - years(t1,t2)           - years for slice read
    - inDir[](str)           - input directory where files are stored (add histnat as inDir[1] for ToE)
    - outDir(str)            - output directory
    - outFile(str)           - output file
    - timeInt(2xindices)     - indices of init period to compare with (e.g. [1,20])
    - mme(bool)              - multi-model mean (will read in single model ensemble stats)
    - ToeType(str)           - ToE type ('F': none, 'histnat')
                               -> requires running first mm+mme without ToE to compute Stddev
    - debug <optional>       - boolean value

    Notes:
    -----
    - EG 21 Nov 2016   - Initial function write

    - TODO :
                 - add computation of ToE per model (toe 1 and toe 2) see ticket #50
                 - add isonhtc (see ticket #48)
    '''

    # CDMS initialisation - netCDF compression
    comp = 1  # 0 for no compression
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # Numpy initialisation
    npy.set_printoptions(precision=2)

    if debug:
        debug = True
    else:
        debug = False
    # File dim and grid inits
    t1 = years[0]
    t2 = years[1]
    # Bound of period average to remove
    peri1 = timeInt[0]
    peri2 = timeInt[1]
    fi = cdm.open(inDir[0] + '/' + listFiles[0])
    # Switch if only variables below the bowl are present/treated
    nobowl = True
    if nobowl:
        isond0 = fi['isondepthgBowl']
        # Create variable handle
    else:
        isond0 = fi['isondepthg']
        # Create variable handle
    # Get grid objects
    axesList = isond0.getAxisList()
    sigmaGrd = isond0.getLevel()
    #time = isond0.getTime()
    lonN = isond0.shape[3]
    latN = isond0.shape[2]
    levN = isond0.shape[1]
    varsig = 'ptopsigmaxy'

    # Limit number of models to 3 for testing of mme
    #if mme:
    #    listFiles = listFiles[0:2]
    #    print ' !!! ### Testing 3 models ###',  listFiles

    # Declare and open files for writing
    if os.path.isfile(outDir + '/' + outFile):
        os.remove(outDir + '/' + outFile)
    outFile_f = cdm.open(outDir + '/' + outFile, 'w')

    #timN = isond0.shape[0]
    timN = t2 - t1
    runN = len(listFiles)

    print ' Number of members:', len(listFiles)

    valmask = isond0.missing_value

    varList = ['isondepthg', 'persistmxy', 'sog', 'thetaog', 'isonthickg']
    varFill = [valmask, valmask, valmask, valmask, valmask]
    percent = npy.ma.ones([runN, timN, latN, lonN], dtype='float32') * 0.
    varbowl = npy.ma.ones([runN, timN, latN, lonN], dtype='float32') * 1.
    #varList = ['isondepthg']
    #print ' !!! ### Testing one variable ###', varList

    # init sigma axis
    sigma = cdm.createAxis(npy.float32(range(1)))
    sigma.id = axesList[1].id
    sigma.units = axesList[1].units
    sigma.designateTime()
    # init time axis
    time = cdm.createAxis(npy.float32(range(timN)))
    time.id = 'time'
    time.units = 'years since 1861'
    # init ensemble axis
    ensembleAxis = cdm.createAxis(npy.float32(range(runN)))
    ensembleAxis.id = 'members'
    ensembleAxis.units = 'N'
    # Output axis
    sigmaList = [sigma, axesList[2], axesList[3]]
    # sigma, lat, lon
    sigmaTimeList = [sigma, time, axesList[2], axesList[3]]
    # sigma, time, lat, lon
    # init arrays
    isonvar = npy.ma.ones([runN, timN, latN, lonN], dtype='float32') * valmask
    varbowl2D = npy.ma.ones([runN, timN, latN, lonN],
                            dtype='float32') * valmask
    varstd, varToE1, varToE2 = [
        npy.ma.ones([runN, latN, lonN], dtype='float32') * valmask
        for _ in range(3)
    ]

    # Loop on density levels (for memory management, becomes UNLIMITED axis and requires a ncpq to reorder dimensions)

    delta_ib = 1
    print ' Sigma index:'
    for ib in range(levN):
        ib1 = ib + delta_ib
        print ib,
        tim0 = timc.clock()
        # loop on variables
        for iv, var in enumerate(varList):
            if nobowl:
                varb = var + 'Bowl'
            else:
                varb = var
            if ib == 0:
                print ' Variable ', iv, varb
            # loop over files to fill up array
            for i, file in enumerate(listFiles):
                tim01 = timc.clock()
                ft = cdm.open(inDir[0] + '/' + file)
                model = file.split('.')[1]
                timeax = ft.getAxis('time')
                if i == 0:
                    tmax0 = timeax.shape[0]
                tmax = timeax.shape[0]
                if tmax != tmax0:
                    print 'wrong time axis: exiting...'
                    return
                # read array
                isonRead = ft(varb, time=slice(t1, t2),
                              lev=slice(ib, ib1)).squeeze()
                if varFill[iv] != valmask:
                    isonvar[i, ...] = isonRead.filled(varFill[iv])
                else:
                    isonvar[i, ...] = isonRead
                tim02 = timc.clock()
                # compute percentage of non-masked points accros MME
                if iv == 0:
                    maskvar = mv.masked_values(isonRead.data, valmask).mask
                    percent[i, ...] = npy.float32(npy.equal(maskvar, 0))
                tim03 = timc.clock()
                if mme:
                    # if mme then just accumulate Bowl, Agree and Std fields
                    #varst = var+'Agree'
                    #vardiff[i,...] = ft(varst,time = slice(t1,t2),lev = slice(ib,ib1)).squeeze()
                    isonRead = ft(varb, time=slice(t1, t2),
                                  lev=slice(ib, ib1)).squeeze()
                    varbowl2D[i, ...] = isonRead
                else:
                    # Compute difference with average of first initN years
                    #varinit = cdu.averager(isonvar[i,peri1:peri2,...],axis=0)
                    #for t in range(timN):
                    #    vardiff[i,t,...] = isonvar[i,t,...] - varinit
                    #vardiff[i,...].mask = isonvar[i,...].mask
                    # Read bowl to truncate field above bowl
                    if ib == 0 and iv == 0:
                        varbowl[i, ...] = ft(varsig, time=slice(t1, t2))
                        #varbowl[i,...] = bowlRead
                    # Compute Stddev
                    varstd[i, ...] = npy.ma.std(isonvar[i, ...], axis=0)
                    # Compute ToE
                    if ToeType == 'histnat':
                        toto = 1
                        # TODO
                        # Read mean and Std dev from histnat
                        #    if i == 0:
                        #        filehn  = glob.glob(inDir[1]+'/cmip5.'+model+'.*zon2D*')[0]
                        #        #filehn = replace(outFile,'historical','historicalNat')
                        #        fthn = cdm.open(filehn)
                        #        varmeanhn = fthn(var)
                        #        varst = var+'Std'
                        #        varmaxstd = fthn(varst)
                        #    toemult = 1.
                        #    signal = npy.reshape(isonvar[i,...]-varmeanhn,(timN,basN*levN*latN))
                        #    noise = npy.reshape(varmaxstd,(basN*levN*latN))
                        #    varToE1[i,...] = npy.reshape(findToE(signal, noise, toemult),(basN,levN,latN))
                        #    toemult = 2.
                        #    varToE2[i,...] = npy.reshape(findToE(signal, noise, toemult),(basN,levN,latN))
                tim04 = timc.clock()
                ft.close()
                #print 'ib, section 1 timing',ib, tim02-tim01,tim03-tim02,tim04-tim03
            # <-- end of loop on files (i)

            tim1 = timc.clock()

            # Compute percentage of bin presence
            # Only keep points where percent > 50%
            if iv == 0:
                percenta = (cdu.averager(percent, axis=0)) * 100.
                percenta = mv.masked_less(percenta, 50)
                percenta = npy.reshape(percenta, [delta_ib, timN, latN, lonN])
                percentw = cdm.createVariable(percenta,
                                              axes=sigmaTimeList,
                                              id='isonpercent')
                percentw._FillValue = valmask
                percentw.long_name = 'percentage of MME bin'
                percentw.units = '%'
                outFile_f.write(percentw.astype('float32'), extend=1, index=ib)

            # Sign of difference
            #if mme:
            #    vardiffsgSum = cdu.averager(vardiff, axis=0)
            #    vardiffsgSum = cdm.createVariable(vardiffsgSum , axes = sigmaTimeList , id = 'foo')
            #    vardiffsgSum = maskVal(vardiffsgSum, valmask)
            #    vardiffsgSum.mask = percentw.mask
            #else:
            #    vardiffsg = npy.copysign(varones,vardiff)
            #    # average signs
            #    vardiffsgSum = cdu.averager(vardiffsg, axis=0)
            #    vardiffsgSum = mv.masked_greater(vardiffsgSum, 10000.)
            #    vardiffsgSum.mask = percentw.mask
            #    vardiffsgSum._FillValue = valmask

            # average variable accross members
            isonVarAve = cdu.averager(isonvar, axis=0)
            isonVarAve = npy.reshape(isonVarAve, [delta_ib, timN, latN, lonN])
            isonVarAve = cdm.createVariable(isonVarAve,
                                            axes=sigmaTimeList,
                                            id='foo')
            # mask
            if varFill[iv] == valmask:
                isonVarAve = maskVal(isonVarAve, valmask)

            isonVarAve.mask = percentw.mask
            tim2 = timc.clock()

            # Only keep points with rhon >  bowl-delta_rho
            delta_rho = 0.
            # mme case
            if mme:  # start from average of <var>Agree
                isonVarBowl = cdu.averager(varbowl2D, axis=0)
                isonVarBowl = npy.reshape(isonVarBowl,
                                          [delta_ib, timN, latN, lonN])
                isonVarBowl = cdm.createVariable(isonVarBowl,
                                                 axes=sigmaTimeList,
                                                 id='foo')
                isonVarBowl = maskVal(isonVarBowl, valmask)
                isonVarBowl.mask = percentw.mask
                # Compute intermodel stddev
                isonVarStd = statistics.std(varbowl2D, axis=0)
                isonVarStd = npy.reshape(isonVarStd,
                                         [delta_ib, timN, latN, lonN])
                isonVarStd = cdm.createVariable(isonVarStd,
                                                axes=sigmaTimeList,
                                                id='foo')
                isonVarStd = maskVal(isonVarStd, valmask)
                isonVarStd.mask = percentw.mask

                # Write
                isonvarbowlw = cdm.createVariable(isonVarBowl,
                                                  axes=sigmaTimeList,
                                                  id=isonRead.id)
                isonvarbowlw.long_name = isonRead.long_name
                isonvarbowlw.units = isonRead.units
                isonvarstdw = cdm.createVariable(isonVarStd,
                                                 axes=sigmaTimeList,
                                                 id=isonRead.id + 'Std')
                isonvarstdw.long_name = isonRead.long_name + ' intermodel std'
                isonvarstdw.units = isonRead.units

                outFile_f.write(isonvarbowlw.astype('float32'),
                                extend=1,
                                index=ib)
                outFile_f.write(isonvarstdw.astype('float32'),
                                extend=1,
                                index=ib)

                #if ib == 0 and iv == 0:
                #    # TODO review
                #    # Read multimodel sigma on bowl and average in time
                #    file1d  =  replace(outDir+'/'+outFile,'2D','1D')
                #    if os.path.isfile(file1d):
                #        f1d = cdm.open(file1d)
                #    else:
                #        print 'ERROR:',file1d,'missing (if mme, run 2D first)'
                #        sys.exit(1)
                #    bowlRead = f1d(varsig,time = slice(t1,t2),lev = slice(ib,ib1))
                #    f1d.close()
                #    siglimit = cdu.averager(bowlRead, axis=0)  - delta_rho
                # TODO: remove loop by building global array with 1/0
                #if sw2d == 1:
                #    for il in range(latN):
                #        for ib in range(basN):
                #            #if ib == 2:
                #            #    print il, siglimit[ib,il]
                #            if siglimit[ib,il] < valmask/1000.:
                #                 # if mme bowl density defined, mask above bowl
                #                index = (npy.argwhere(sigmaGrd[:] >= siglimit[ib,il]))
                #                isonVarBowl [:,ib,0:index[0],il].mask = True
                #                isonVarStd  [:,ib,0:index[0],il].mask = True
                #                vardiffsgSum[:,ib,0:index[0],il].mask = True
                #            else:
                #                # mask all points
                #                isonVarBowl [:,ib,:,il].mask = True
                #                isonVarStd  [:,ib,:,il].mask = True
                #                vardiffsgSum[:,ib,:,il].mask = True
            # mm case
            else:
                isonVarBowl = isonVarAve * 1.  # start from variable
                #isonVarStd  = isonVarAve*1. # start from variable
                if ib == 0 and iv == 0:
                    # build bowl position
                    siglimit = cdu.averager(varbowl,
                                            axis=0)  # average accross members
                    siglimit = npy.reshape(siglimit,
                                           [timN * latN * lonN]) - delta_rho
                if iv == 0:
                    sigarr = siglimit * 1.
                    sigarr[:] = sigmaGrd[ib]
                # test
                i = 60
                j = 60
                ij = j * lonN + i
                isonVarBowl = npy.reshape(isonVarBowl, [timN * latN * lonN])
                #vardiffsgSum = npy.reshape(vardiffsgSum,[timN*latN*lonN])

                isonVarBowl.mask = npy.where(sigarr < siglimit, True,
                                             isonVarBowl.mask)
                #vardiffsgSum.mask = npy.where(sigarr < siglimit, True, vardiffsgSum.mask)

                isonVarBowl = npy.reshape(isonVarBowl, [timN, latN, lonN])
                #vardiffsgSum = npy.reshape(vardiffsgSum,[timN,latN,lonN])

                isonVarBowl = maskVal(isonVarBowl, valmask)
                #vardiffsgSum = maskVal(vardiffsgSum, valmask)
                # Find max of Std dev of all members
                isonVarStd = npy.ma.max(varstd, axis=0)
                # mask
                isonVarStd = maskVal(isonVarStd, valmask)

                # Write
                #isonave = cdm.createVariable(isonVarAve, axes = sigmaTimeList, id = isonRead.id)
                #isonave.long_name = isonRead.long_name
                #isonave.units     = isonRead.units
                #vardiffsgSum = npy.reshape(vardiffsgSum,[delta_ib,timN,latN,lonN])
                #isonavediff = cdm.createVariable(vardiffsgSum, axes = sigmaTimeList, id = isonRead.id+'Agree')
                #isonavediff.long_name = isonRead.long_name
                #isonavediff.units     = isonRead.units
                isonVarBowl = npy.reshape(isonVarBowl,
                                          [delta_ib, timN, latN, lonN])
                isonavebowl = cdm.createVariable(isonVarBowl,
                                                 axes=sigmaTimeList,
                                                 id=isonRead.id + 'Bowl')
                isonavebowl.long_name = isonRead.long_name
                isonavebowl.units = isonRead.units
                isonVarStd = npy.reshape(isonVarStd, [delta_ib, latN, lonN])
                isonmaxstd = cdm.createVariable(isonVarStd,
                                                axes=sigmaList,
                                                id=isonRead.id + 'Std')
                isonmaxstd.long_name = isonRead.long_name
                isonmaxstd.units = isonRead.units

                #outFile_f.write(    isonave.astype('float32'), extend = 1, index = ib)
                #outFile_f.write(isonavediff.astype('float32'), extend = 1, index = ib)
                outFile_f.write(isonavebowl.astype('float32'),
                                extend=1,
                                index=ib)
                outFile_f.write(isonmaxstd.astype('float32'),
                                extend=1,
                                index=ib)

            tim3 = timc.clock()

            if ToeType == 'histnat':
                isontoe1 = cdm.createVariable(
                    varToE1,
                    axes=[ensembleAxis, axesList[1], axesList[2], axesList[3]],
                    id=isonRead.id + 'ToE1')
                isontoe1.long_name = 'ToE 1 for ' + isonRead.long_name
                isontoe1.units = 'Year'
                isontoe2 = cdm.createVariable(
                    varToE2,
                    axes=[ensembleAxis, axesList[1], axesList[2], axesList[3]],
                    id=isonRead.id + 'ToE2')
                isontoe2.long_name = 'ToE 2 for ' + isonRead.long_name
                isontoe2.units = 'Year'
                outFile_f.write(isontoe1.astype('float32'), extend=1, index=ib)
                outFile_f.write(isontoe2.astype('float32'), extend=1, index=ib)

            tim4 = timc.clock()
        # <--- end of loop on variables

        #print 'ib, timing',ib, tim01-tim0,tim1-tim01,tim2-tim1,tim3-tim2,tim4-tim3
    # <--- end of loop on density
    print ' '

    outFile_f.close()
    fi.close()
Example #43
0
 def setup_cdms2(self):
     cdms2.setNetcdfShuffleFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateFlag(0)  # Argument is either 0 or 1
     cdms2.setNetcdfDeflateLevelFlag(0)  # Argument is int between 0 and 9
Example #44
0
    def processCmdLine(self):
        parser = argparse.ArgumentParser(
            description="UV-CDAT Climate Modeling Diagnostics", usage="%(prog)s --path1 [options]"
        )

        parser.add_argument(
            "--path",
            "-p",
            action="append",
            nargs=1,
            help="Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2.",
        )
        parser.add_argument("--path2", "-q", action="append", nargs=1, help="Path to a second dataset.")
        parser.add_argument("--obspath", action="append", nargs=1, help="Path to an observational dataset")
        parser.add_argument("--cachepath", nargs=1, help="Path for temporary and cachced files. Defaults to /tmp")
        #      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
        #         help="The realm type. Current valid options are 'land' and 'atmosphere'")
        parser.add_argument(
            "--filter",
            "-f",
            nargs=1,
            help="A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices.",
        )
        parser.add_argument(
            "--filter2",
            "-g",
            nargs=1,
            help="A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices.",
        )
        parser.add_argument(
            "--new_filter",
            "-F",
            action="append",
            nargs=1,
            help="A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices.",
        )
        parser.add_argument(
            "--packages",
            "--package",
            "-k",
            nargs="+",
            help="The diagnostic packages to run against the dataset(s). Multiple packages can be specified.",
        )
        parser.add_argument(
            "--sets",
            "--set",
            "-s",
            nargs="+",
            help="The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package",
        )
        parser.add_argument(
            "--vars",
            "--var",
            "-v",
            nargs="+",
            help="Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL",
        )
        parser.add_argument(
            "--list",
            "-l",
            nargs=1,
            choices=["sets", "vars", "variables", "packages", "seasons", "regions", "translations", "options"],
            help="Determine which packages, sets, regions, variables, and variable options are available",
        )
        # maybe eventually add compression level too....
        parser.add_argument(
            "--compress",
            nargs=1,
            choices=["no", "yes"],
            help="Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools",
        )  # no compression, add self state

        parser.add_argument(
            "--outputpre",
            nargs=1,
            help="Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc",
        )
        parser.add_argument(
            "--outputpost",
            nargs=1,
            help="Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc",
        )
        parser.add_argument("--outputdir", "-O", nargs=1, help="Directory in which output files will be written.")

        parser.add_argument(
            "--seasons", nargs="+", choices=all_seasons, help="Specify which seasons to generate climatoogies for"
        )
        parser.add_argument("--years", nargs="+", help="Specify which years to include when generating climatologies")
        parser.add_argument(
            "--months", nargs="+", choices=all_months, help="Specify which months to generate climatologies for"
        )
        parser.add_argument(
            "--climatologies",
            "-c",
            nargs=1,
            choices=["no", "yes"],
            help="Specifies whether or not climatologies should be generated",
        )
        parser.add_argument(
            "--plots", "-t", nargs=1, choices=["no", "yes"], help="Specifies whether or not plots should be generated"
        )
        parser.add_argument("--plottype", nargs=1)
        parser.add_argument(
            "--precomputed",
            nargs=1,
            choices=["no", "yes"],
            help="Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc",
        )
        parser.add_argument(
            "--json",
            "-j",
            nargs=1,
            choices=["no", "yes"],
            help="Produce JSON output files as part of climatology/diags generation",
        )  # same
        parser.add_argument(
            "--netcdf",
            "-n",
            nargs=1,
            choices=["no", "yes"],
            help="Produce NetCDF output files as part of climatology/diags generation",
        )  # same
        parser.add_argument(
            "--xml",
            "-x",
            nargs=1,
            choices=["no", "yes"],
            help="Produce XML output files as part of climatology/diags generation",
        )
        parser.add_argument(
            "--seasonally",
            action="store_true",
            help="Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons",
        )
        parser.add_argument("--monthly", action="store_true", help="Produce climatologies for all predefined months")
        parser.add_argument(
            "--yearly", action="store_true", help="Produce annual climatogolies for all years in the dataset"
        )
        parser.add_argument(
            "--timestart", nargs=1, help="Specify the starting time for the dataset, such as 'months since Jan 2000'"
        )
        parser.add_argument(
            "--timebounds",
            nargs=1,
            choices=["daily", "monthly", "yearly"],
            help="Specify the time boudns for the dataset",
        )
        parser.add_argument(
            "--verbose",
            "-V",
            action="count",
            help="Increase the verbosity level. Each -v option increases the verbosity more.",
        )  # count
        parser.add_argument(
            "--name", action="append", nargs=1, help="Specify option names for the datasets for plot titles, etc"
        )  # optional name for the set
        # This will be the standard list of region names NCAR has
        parser.add_argument(
            "--regions",
            "--region",
            nargs="+",
            choices=all_regions.keys(),
            help="Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'",
        )
        parser.add_argument("--starttime", nargs=1, help="Specify a start time in the dataset")
        parser.add_argument("--endtime", nargs=1, help="Specify an end time in the dataset")
        parser.add_argument(
            "--translate",
            nargs="?",
            default="y",
            help="Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1",
        )
        parser.add_argument("--varopts", nargs="+", help="Variable auxillary options")

        args = parser.parse_args()

        if args.list != None:
            if args.list[0] == "translations":
                print "Default variable translations: "
                self.listTranslations()
                quit()
            if args.list[0] == "regions":
                print "Available geographical regions: ", all_regions.keys()
                quit()

            if args.list[0] == "seasons":
                print "Available seasons: ", all_seasons
                quit()

            if args.list[0] == "packages":
                print "Listing available packages:"
                print self.all_packages.keys()
                quit()

            if args.list[0] == "sets":
                if args.packages == None:
                    print "Please specify package before requesting available diags sets"
                    quit()
                for p in args.packages:
                    print "Avaialble sets for package ", p, ":"
                    sets = self.listSets(p)
                    keys = sets.keys()
                    for k in keys:
                        print "Set", k, " - ", sets[k]
                quit()

            if args.list[0] == "variables" or args.list[0] == "vars":
                if args.path != None:
                    for i in args.path:
                        self._opts["path"].append(i[0])
                else:
                    print "Must provide a dataset when requesting a variable listing"
                    quit()
                self.listVariables(args.packages, args.sets)
                quit()
            if args.list[0] == "options":
                if args.path != None:
                    for i in args.path:
                        self._opts["path"].append(i[0])
                else:
                    print "Must provide a dataset when requesting a variable listing"
                    quit()
                self.listVarOptions(args.packages, args.sets, args.vars)
                quit()

        # Generally if we've gotten this far, it means no --list was specified. If we don't have
        # at least a path, we should exit.
        if args.path != None:
            for i in args.path:
                self._opts["path"].append(i[0])
        else:
            print "Must specify a path or the --list option at a minimum."
            print 'For help, type "diags --help".'
            quit()
        if args.path2 != None:
            for i in args.path2:
                self._opts["path2"].append(i[0])

        if args.obspath != None:
            for i in args.obspath:
                self._opts["obspath"].append(i[0])

        # TODO: Should some pre-defined filters be "nameable" here?
        if args.filter != None:  # Only supports one filter argument, see filter2.
            self._opts["filter"] = args.filter[0]
            self._opts["user_filter"] = True
        #         for i in args.filter:
        #            self._opts['filter'].append(i[0])
        if args.filter2 != None:  # This is a second filter argument.
            self._opts["filter2"] = args.filter2[0]
            self._opts["user_filter"] = True
        if args.new_filter != None:  # like filter but with multiple arguments
            for i in args.new_filter:
                self._opts["new_filter"].append(i[0])

        if args.cachepath != None:
            self._opts["cachepath"] = args.cachepath[0]

        self._opts["seasonally"] = args.seasonally
        self._opts["monthly"] = args.monthly

        if args.starttime != None:
            self._opts["start"] = args.starttime[0]

        if args.endtime != None:
            self._opts["end"] = args.endtime[0]

        # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
        # they are still set after you set them once in the python process.
        if args.compress != None:
            if args.compress[0] == "no":
                self._opts["compress"] = False
            else:
                self._opts["compress"] = True

        if self._opts["compress"] == True:
            print "Enabling compression for output netCDF files"
            cdms2.setNetcdfShuffleFlag(1)
            cdms2.setNetcdfDeflateFlag(1)
            cdms2.setNetcdfDeflateLevelFlag(9)
        else:
            print "Disabling compression for output netCDF files"
            cdms2.setNetcdfShuffleFlag(0)
            cdms2.setNetcdfDeflateFlag(0)
            cdms2.setNetcdfDeflateLevelFlag(0)

        if args.json != None:
            if args.json[0] == "no":
                self._opts["json"] = False
            else:
                self._opts["json"] = True
        if args.xml != None:
            if args.xml[0] == "no":
                self._opts["xml"] = False
            else:
                self._opts["xml"] = True

        if args.netcdf != None:
            if args.netcdf[0] == "no":
                self._opts["netcdf"] = False
            else:
                self._opts["netcdf"] = True

        if args.plots != None:
            if args.plots[0].lower() == "no" or args.plots[0] == 0:
                self._opts["plots"] = False
            else:
                self._opts["plots"] = True

        if args.climatologies != None:
            if args.climatologies[0] == "no":
                self._opts["climatologies"] = False
            else:
                self._opts["climatologies"] = True

        self._opts["verbose"] = args.verbose

        if args.name != None:
            for i in args.name:
                self._opts["dsnames"].append(i[0])

        # Help create output file names
        if args.outputpre != None:
            self._opts["outputpre"] = args.outputpre[0]
        if args.outputpost != None:
            self._opts["outputpost"] = args.outputpost[0]

        # Output directory
        if args.outputdir != None:
            if not os.path.isdir(args.outputdir[0]):
                print "ERROR, output directory", args.outputdir[0], "does not exist!"
                quit()
            self._opts["outputdir"] = args.outputdir[0]

        if args.translate != "y":
            print args.translate
            print self._opts["translate"]
            quit()
        # Timestart assumes a string like "months since 2000". I can't find documentation on
        # toRelativeTime() so I have no idea how to check for valid input
        # This is required for some of the land model sets I've seen
        if args.timestart != None:
            self._opts["reltime"] = args.timestart

        # cdutil.setTimeBounds{bounds}(variable)
        if args.timebounds != None:
            self._opts["bounds"] = args.timebounds

        # Check if a user specified package actually exists
        # Note: This is case sensitive.....
        if args.packages != None:
            plist = []
            for x in args.packages:
                if x.upper() in self.all_packages.keys():
                    plist.append(x)
                elif x in self.all_packages.keys():
                    plist.append(x.lower())

            if plist == []:
                print "Package name(s) ", args.packages, " not valid"
                print "Valid package names: ", self.all_packages.keys()
                quit()
            else:
                self._opts["packages"] = plist

        # TODO: Requires exact case; probably make this more user friendly and look for mixed case
        if args.regions != None:
            rlist = []
            for x in args.regions:
                if x in all_regions.keys():
                    rlist.append(x)
            print "REGIONS: ", rlist
            self._opts["regions"] = rlist

        # Given user-selected packages, check for user specified sets
        # Note: If multiple packages have the same set names, then they are all added to the list.
        # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
        if self._opts["packages"] == None and args.sets != None:
            print "No package specified"
            self._opts["sets"] = args.sets

        if args.sets != None and self._opts["packages"] != None:
            # unfortuantely, we have to go through all of this....
            # there should be a non-init of the class method to list sets/packages/etc,
            # ie a dictionary perhaps?
            sets = []
            import metrics.fileio.filetable as ft
            import metrics.fileio.findfiles as fi
            import metrics.packages.diagnostic_groups

            package = self._opts["packages"]
            if package[0].lower() == "lmwg":
                import metrics.packages.lmwg.lmwg
            elif package[0].lower() == "amwg":
                import metrics.packages.amwg.amwg
            dtree = fi.dirtree_datafiles(self, pathid=0)
            filetable = ft.basic_filetable(dtree, self)
            dm = metrics.packages.diagnostic_groups.diagnostics_menu()

            pclass = dm[package[0].upper()]()

            slist = pclass.list_diagnostic_sets()
            keys = slist.keys()
            keys.sort()
            for k in keys:
                fields = k.split()
                for user in args.sets:
                    if user == fields[0]:
                        sets.append(user)
            self._opts["sets"] = sets
            if sets != args.sets:
                print "sets requested ", args.sets
                print "sets available: ", slist
                exit(1)

        # check for some varopts first.
        if args.varopts != None:
            self._opts["varopts"] = args.varopts
        # Add some hackery here to convert pressure level vars to var+varopts
        if args.vars != None:
            self._opts["vars"] = args.vars

            vpl = ["Z3_300", "Z3_500", "U_200", "T_200", "T_850"]
            vl = list(set(args.vars) - set(vpl))
            if vl == args.vars:  # no pressure level vars made it this far.
                print "No pressure level vars found in input vars list."
            else:  # more complicated....
                print "Pressure level vars found in input vars list.... Processing...."
                vopts = []
                if (
                    self._opts["varopts"] != [] and self._opts["varopts"] != None
                ):  # hopefully the user didn't also specify varopts....
                    print "User passed in varopts but there are pressure-level variables in the vars list."
                    print "This will append the pressure levels found to the varopts array"
                    # see which pressure level vars were passed. this will be the super set of pressure levels.
                if "Z3_300" in self._opts["vars"]:
                    vopts.append("300")
                    self._opts["vars"] = [x.replace("Z3_300", "Z3") for x in self._opts["vars"]]
                if "Z3_500" in self._opts["vars"]:
                    vopts.append("500")
                    self._opts["vars"] = [x.replace("Z3_500", "Z3") for x in self._opts["vars"]]
                if "T_200" in self._opts["vars"]:
                    vopts.append("200")
                    self._opts["vars"] = [x.replace("T_200", "T") for x in self._opts["vars"]]
                if "T_850" in self._opts["vars"]:
                    vopts.append("850")
                    self._opts["vars"] = [x.replace("T_850", "T") for x in self._opts["vars"]]
                if "U_200" in self._opts["vars"]:
                    vopts.append("200")
                    self._opts["vars"] = [x.replace("U_200", "U") for x in self._opts["vars"]]
                vopts = list(set(vopts))
                if self._opts["varopts"] == [] or self._opts["varopts"] == None:
                    self._opts["varopts"] = vopts
                else:
                    self._opts["varopts"].extend(vopts)
                    self._opts["varopts"] = list(set(self._opts["varopts"]))
                print "Updated vars list: ", self._opts["vars"]

        # If --yearly is set, then we will add 'ANN' to the list of climatologies
        if args.yearly == True:
            self._opts["yearly"] = True
            self._opts["times"].append("ANN")

        # If --monthly is set, we add all months to the list of climatologies
        if args.monthly == True:
            self._opts["monthly"] = True
            self._opts["times"].extend(all_months)

        # If --seasonally is set, we add all 4 seasons to the list of climatologies
        if args.seasonally == True:
            self._opts["seasonally"] = True
            self._opts["times"].extend(all_seasons)

        # This allows specific individual months to be added to the list of climatologies
        if args.months != None:
            if args.monthly == True:
                print "Please specify just one of --monthly or --months"
                quit()
            else:
                mlist = [x for x in all_months if x in args.months]
                self._opts["times"] = self._opts["times"] + mlist

        # This allows specific individual years to be added to the list of climatologies.
        # Note: Checkign for valid input is impossible until we look at the dataset
        # This has to be special cased since typically someone will be saying
        # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
        if args.years != None:
            if args.yearly == True:
                print "Please specify just one of --yearly or --years"
                quit()
            else:
                self._opts["years"] = args.years

        if args.seasons != None:
            if args.seasonally == True:
                print "Please specify just one of --seasonally or --seasons"
                quit()
            else:
                slist = [x for x in all_seasons if x in args.seasons]
                self._opts["times"] = self._opts["times"] + slist
    outdim=outGY.shape

    outvar=numpy.zeros( (dim[0], outdim[0], outdim[1] )) + 1.e20
    for itime in range(dim[0]):
        tmp = interpolate.griddata(numpy.reshape(points, (dim[1]*dim[2],2)), numpy.ravel(yvar[itime]), (outGY, outGX), method=interpol)
        outvar[itime] = numpy.flipud(tmp)

    infile.close()

    return outvar
# ______________________
if __name__=='__main__':
    nodata = 1.e20
    (referenceGrid, latAxis, lonAxis, latBounds, lonBounds) = makeGrid()
    # for netcdf3: set flags to 0
    cdms2.setNetcdfShuffleFlag(1)
    cdms2.setNetcdfDeflateFlag(1)
    cdms2.setNetcdfDeflateLevelFlag(3)

    indir='/data/sst/reynolds_climatology/noaa_oist_v2/'
    outdir='/data/sst/reynolds_climatology/noaa_oist_v2/resized_fitted/'
    
    infile=indir+'/max_sst.ltm.1971-2000.nc'
    outvar = do_resize('sst', infile)
    saveData(outdir+'/max_sst.ltm.1971-2000_resized.nc', outvar, typecode='f', id='sst', fill_value=nodata, grid=referenceGrid, copyaxes=1, attribute1='real Climato max',attribute2='Degrees Celsius',latAxis=latAxis,lonAxis=lonAxis)
    
    infile=indir+'sst.ltm.1971-2000.nc'
    outvar = do_resize_multi('sst',infile)
    outfile=outdir+'sst.ltm.1971-2000_resized.nc'
    if os.path.exists(outfile): os.remove(outfile)
    fh=cdms2.open(outfile, 'w')
Example #46
0
def correctFile(idxcorr, ncorr, inFile, inDir, outFile, outDir):
    '''
    Correct density binned files (undefined ptop & long 0 issue)
    idxcorr = [idx_i,idx_i1,jmax] indices for longitude correction - if [0,0,0] ignore
    ncorr   = number of corrections: 1 or 2
    '''
    # CDMS initialisation - netCDF compression
    comp = 1  # 0 for no compression
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # Numpy initialisation
    npy.set_printoptions(precision=2)

    varList3D = ['isondepthg', 'isonthickg', 'sog', 'thetaog']
    varList2D = [
        'ptopsoxy', 'ptopdepthxy', 'ptopsigmaxy', 'ptopthetaoxy', 'persistmxy'
    ]

    # First test, read level by level and write level by level (memory management)
    # use ncpdq -a time,lev,lat,lon to recover the dimension order

    fi = cdm.open(inDir + '/' + inFile)
    fo = cdm.open(outDir + '/' + outFile, 'w')
    isondg = fi['isondepthg']
    # Create variable handle
    # Get grid objects
    #axesList = isondg.getAxisList()
    #sigmaGrd = isondg.getLevel()
    lonN = isondg.shape[3]
    latN = isondg.shape[2]
    levN = isondg.shape[1]
    timN = isondg.shape[0]
    #valmask = isondg.missing_value

    if ncorr == 2:
        ic1 = idxcorr[0][0]
        ic2 = idxcorr[0][1]
        jcmax = idxcorr[0][2]
        ic12 = idxcorr[1][0]
        ic22 = idxcorr[1][1]
        jcmax2 = idxcorr[1][2]
        if ic2 >= lonN - 1:
            ic2 = 0
        if ic22 >= lonN - 1:
            ic22 = 0
    elif ncorr == 1:
        ic1 = idxcorr[0]
        ic2 = idxcorr[1]
        jcmax = idxcorr[2]
        if ic2 >= lonN - 1:
            ic2 = 0
    #print ic1,ic2,jcmax
    corr_long = True
    if ic1 == 0 and ic2 == 0 and jcmax == 0:
        corr_long = False
    #testp = 10
    for it in range(timN):
        #if it/testp*testp == it:
        #    print ' year =',it
        # test
        #i = 90
        #j = 90
        #i2d = 6
        #j2d = 12
        #ij = j*lonN+i
        #ij2d = j2d*lonN+i2d
        #print 'ij=',ij
        # 3D variables
        for iv in varList3D:
            #print iv
            outVar = fi(iv, time=slice(it, it + 1))
            # Correct for longitude interpolation issue
            if corr_long:
                for jt in range(jcmax):
                    outVar[:, :, jt, ic1] = (outVar[:, :, jt, ic1 - 1] +
                                             outVar[:, :, jt, ic2 + 1]) / 2
                    outVar[:, :, jt, ic2] = outVar[:, :, jt, ic1]
                if ncorr == 2:
                    for jt in range(jcmax2):
                        outVar[:, :, jt,
                               ic12] = (outVar[:, :, jt, ic12 - 1] +
                                        outVar[:, :, jt, ic22 + 1]) / 2
                        outVar[:, :, jt, ic22] = outVar[:, :, jt, ic12]
#             # Correct Bowl properties
#             if iv =='isondepthg':
#                 vardepth = npy.reshape(outVar,(levN,latN*lonN))
#                 #print 'test'
#                 #print outVar[:,:,j2d,i2d]
#                 #print vardepth[:,ij2d]
#                 # find values of surface points
#                 vardepthBowl = npy.min(npy.reshape(outVar,(levN,latN*lonN)),axis=0)
#                 vardepthBowlTile = npy.repeat(vardepthBowl,levN,axis=0).reshape((latN*lonN,levN)).transpose()
#                 #print vardepthBowlTile.shape
#                 #print vardepthBowl[ij2d], vardepthBowlTile[:,ij2d]
#                 levs = outVar.getAxisList()[1][:]
#                 #print 'levs',levs
#                 levs3d  = mv.reshape(npy.tile(levs,latN*lonN),(latN*lonN,levN)).transpose()
#                 varsigmaBowl = npy.max(npy.where(vardepth == vardepthBowlTile,levs3d,0),axis=0)
#                 #print varsigmaBowl[ij2d],levs3d[:,ij2d]

#             elif iv == 'sog':
#                 varsog = npy.reshape(outVar,(levN,latN*lonN))
#                 varsoBowl = npy.max(npy.where(vardepth == vardepthBowlTile,varsog,0),axis=0)
#                 #print varsoBowl[ij2d], varsog[:,ij2d]
#                 #print vardepth[:,ij2d],vardepthBowlTile[:,ij2d]
#                 del (varsog); gc.collect()
#             elif iv =='thetaog':
#                 varthetao = npy.reshape(outVar,(levN,latN*lonN))
#                 varthetaoBowl = npy.max(npy.where(vardepth == vardepthBowlTile,varthetao,-1000),axis=0)
#                 #print varthetaoBowl[ij2d],varthetao[:,ij2d]
#                 del (varthetao); gc.collect()
# Write
            fo.write(outVar.astype('float32'), extend=1, index=it)
            fo.sync()
        del (vardepth)
        gc.collect()
        # 2D variables and correct isondepthg = 0
        for iv in varList2D:
            outVar = fi(iv, time=slice(it, it + 1))
            # Correct for longitude interpolation issue
            if corr_long:
                for jt in range(jcmax):
                    outVar[:, jt, ic1] = (outVar[:, jt, ic1 - 1] +
                                          outVar[:, jt, ic2 + 1]) / 2
                    outVar[:, jt, ic2] = outVar[:, jt, ic1]
                if ncorr == 2:
                    for jt in range(jcmax2):
                        outVar[:, jt, ic12] = (outVar[:, jt, ic12 - 1] +
                                               outVar[:, jt, ic22 + 1]) / 2
                        outVar[:, jt, ic22] = outVar[:, jt, ic12]
            # Correct for ptopsoxy < 30
            #print 'before',outVar[:,j2d,i2d]
#             if iv == 'ptopsoxy':
#                 testso = npy.reshape(outVar,(latN*lonN)) < 30.
#                 #print 'testdepth', testdepth[ij2d]
#                 #print npy.argwhere(testdepth)[0:10]/lonN, npy.argwhere(testdepth)[0:10]-npy.argwhere(testdepth)[0:10]/lonN*lonN
#                 outVar.data[...] = npy.where(testso,varsoBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
#             elif iv == 'ptopdepthxy':
#                 outVar.data[...] = npy.where(testso,vardepthBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
#             elif iv == 'ptopthetaoxy':
#                 outVar.data[...] = npy.where(testso,varthetaoBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
#             elif iv == 'ptopsigmaxy':
#                 outVar.data[...] = npy.where(testso,varsigmaBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
#print 'after',outVar[:,j2d,i2d]

# Write
            fo.write(outVar.astype('float32'), extend=1, index=it)
            fo.sync()

    fi.close()
    fo.close()


# testing

#model = 'CCSM4'
#idxcorr=[139,140,145]
#ncorr = 1
#inFile = 'cmip5.CCSM4.historical24.r1i1p1.an.ocn.Omon.density.ver-v20121128.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.CCSM4.historical24.outtest.nc'

#model = 'CanESM2'
#idxcorr=[179,180,180]
#ncorr = 1
#inFile = 'cmip5.CanESM2.historical24.r1i1p1.an.ocn.Omon.density.ver-1.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.CanESM2.historical24.outtest.nc'

#model = 'IPSL-CM5A-LR'
#idxcorr=[0,0,0]
#ncorr=1
#inFile = 'cmip5.IPSL-CM5A-LR.historical24.r1i1p1.an.ocn.Omon.density.ver-v20111119.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.IPSL-CM5A-LR.historical24.outtest.nc'

#model = 'Ishii'
#idxcorr=[[359,359,39],[180,180,180]]
#idxcorr=[359,359,39]
#ncorr = 1
#inFile = 'obs.Ishii.historical.r0i0p0.an.ocn.Omon.density.ver-1.latestX.nc'
#inDir='/Volumes/hciclad/data/Density_binning/Prod_density_obs_april16'
#outFile = 'obs.Ishii.historical.r0i0p0.an.ocn.Omon.density.ver-1.latestXCorr.nc'

#model = 'EN4'
#idxcorr=[[359,359,39],[180,180,180]]
#idxcorr=[359,359,39]
#ncorr = 2
#inFile = 'obs.EN4.historical.r0i0p0.mo.ocn.Omon.density.ver-1.latestX.nc'
#inDir='/Volumes/hciclad/data/Density_binning/Prod_density_obs_april16'
#outFile = 'obs.EN4.historical.r0i0p0.mo.ocn.Omon.density.ver-1.latestXCorr.nc'

#outDir = inDir

#correctFile(idxcorr, ncorr, inFile, inDir, outFile, outDir)
Example #47
0
def _ERA5_2_N480(year_start, month_start, day_start, hr_start, year_end,
                 month_end, day_end, hr_end, data_dir, cdo_path):
    #--------------------------------------------------------------

    dt = datetime.datetime(year_start, month_start, day_start, hr_start)
    dt_end = datetime.datetime(year_end, month_end, day_end, hr_end)
    delt_1hr = datetime.timedelta(hours=1)
    print(dt)
    print(dt_end)
    print(delt_1hr)
    print(' ')

    while dt <= dt_end:
        ymd = '%04d%02d%02d' % (dt.year, dt.month, dt.day)
        filedate = '%04d-%02d-%02d' % (dt.year, dt.month, dt.day)
        hour_str = '%02d' % dt.hour

        in_filename = 'UVTQPS-' + ymd + '_' + hour_str + '.grib'

        print(in_filename)
        print(filedate)
        print(ymd)

        dirname = data_dir + '/'
        os.chdir(dirname)

        # cdo sinfon *.grib    #check grib variable information
        # select spectral fields of sfc z(129), ln sfc p(152)
        #--------------------------------
        sel2d_call = cdo_path + 'cdo selvar,z,lnsp' + ' ' + \
                     in_filename + ' ' + 'ml_2d.grib'
        print(sel2d_call)
        sel2d = subprocess.Popen(sel2d_call, shell=True)
        sel2d.wait()

        # select spectral fields of T(130)
        #--------------------------------
        #selSp_call = cdo_path + 'cdo selcode,129,152,130' + ' ' + \
        selt_call = cdo_path + 'cdo selvar,t' + ' ' + \
                    in_filename + ' ' + 'ml_t.grib'
        print(selt_call)
        selt = subprocess.Popen(selt_call, shell=True)
        selt.wait()

        # Convert spectral to full gg
        #--------------------------------
        sp2gp_call = cdo_path + 'cdo sp2gp' + ' ' + \
                     'ml_2d.grib' + ' ' + 'ml_2d_fgg.grib'
        print(sp2gp_call)
        spp = subprocess.Popen(sp2gp_call, shell=True)
        spp.wait()

        sp2gp_call = cdo_path + 'cdo sp2gp' + ' ' + \
                     'ml_t.grib' + ' ' + 'ml_t_fgg.grib'
        print(sp2gp_call)
        spp = subprocess.Popen(sp2gp_call, shell=True)
        spp.wait()

        # select div(155), vor(138)  in spectral
        #--------------------------------
        divVor_call = cdo_path + 'cdo selvar,d,vo' + ' ' + \
                      in_filename + ' ' + 'ml_divvo.grib'
        print(divVor_call)
        divv = subprocess.Popen(divVor_call, shell=True)
        divv.wait()

        # convert div, vor spectral to u,v full gaussian
        #--------------------------------
        dv2uv_call = cdo_path + 'cdo dv2uv' + ' ' + \
                     'ml_divvo.grib' + ' ' + 'ml_uv_fgg.grib'
        print(dv2uv_call)
        dvuv = subprocess.Popen(dv2uv_call, shell=True)
        dvuv.wait()

        # select reduced gg fields Q (133)
        #--------------------------------
        selRgg_call = cdo_path + 'cdo selvar,q' + ' ' + \
                      in_filename + ' ' + 'ml_q_rgg.grib'
        print(selRgg_call)
        slgg = subprocess.Popen(selRgg_call, shell=True)
        slgg.wait()

        # convert reduced gg to full gg
        #----------------------------
        #fgg_call = cdo_path + 'cdo -R copy' + ' ' + \
        fgg_call = cdo_path + 'cdo setgridtype,regular' + ' ' + \
                   'ml_q_rgg.grib' + ' ' + 'ml_q_fggN320.grib'
        print(fgg_call)
        fgg = subprocess.Popen(fgg_call, shell=True)
        fgg.wait()

        # Remap Q onto N480 grid (Q is on a different grid).
        #------------------------------------------------
        remap_call = cdo_path + 'cdo remapbil,n480' + ' ' + \
                     'ml_q_fggN320.grib' + ' ' + 'ml_q_fgg.grib'
        print(remap_call)
        rmp = subprocess.Popen(remap_call, shell=True)
        rmp.wait()

        # Combine all the files
        #------------------------------------------------
        cat2_call = 'cat ml_uv_fgg.grib ml_t_fgg.grib ml_2d_fgg.grib > ' + \
                    'UVTQPS-' + ymd + '_' + hour_str + '_N480_UVTn2D.grib'
        print(cat2_call)
        ct2 = subprocess.Popen(cat2_call, shell=True)
        ct2.wait()

        cp1_call = 'cp ml_q_fgg.grib' + ' ' + \
                   'UVTQPS-' + ymd + '_'  + hour_str + '_N480_Q.grib'
        print(cp1_call)
        cp1 = subprocess.Popen(cp1_call, shell=True)
        cp1.wait()

        # Convert grib2 to grib1 and make grads ctl for hourly files.
        #----------------------------------
        # Use cdo gradsdes to geneate ctls files
        # This only works for grib1 format. Have to convert grib2 to grib1
        # In the processes, cdo -f grb copy would mess up the variable names so have to separate the variables into two files
        grib2to1_call = cdo_path + 'cdo -f grb copy' + ' ' + \
                'UVTQPS-' + ymd + '_' + hour_str + '_N480_UVTn2D.grib' + ' ' + \
                'UVTQPS-' + ymd + '_' + hour_str + '_N480_UVTn2D.grib1'
        print(grib2to1_call)
        grb21 = subprocess.Popen(grib2to1_call, shell=True)
        grb21.wait()

        grib2to1_call = cdo_path + 'cdo -f grb copy' + ' ' + \
                'UVTQPS-' + ymd + '_'  + hour_str + '_N480_Q.grib' + ' ' + \
                'UVTQPS-' + ymd + '_'  + hour_str + '_N480_Q.grib1'
        print(grib2to1_call)
        grb21 = subprocess.Popen(grib2to1_call, shell=True)
        grb21.wait()

        gradsdes1_call = cdo_path + 'cdo gradsdes' + ' ' + \
                         'UVTQPS-' + ymd + '_' + hour_str + '_N480_UVTn2D.grib1'
        print(gradsdes1_call)
        grd1 = subprocess.Popen(gradsdes1_call, shell=True)
        grd1.wait()

        gradsdes2_call = cdo_path + 'cdo gradsdes' + ' ' + \
                         'UVTQPS-' + ymd + '_'  + hour_str + '_N480_Q.grib1'
        print(gradsdes2_call)
        grd2 = subprocess.Popen(gradsdes2_call, shell=True)
        grd2.wait()

        # Generate NC file
        #----------------------------------

        infile1 = 'UVTQPS-' + ymd + '_' + hour_str + '_N480_UVTn2D.grib1.ctl'
        infile2 = 'UVTQPS-' + ymd + '_' + hour_str + '_N480_Q.grib1.ctl'
        out_filename = 'UVTQPS-' + ymd + '_' + hour_str + '.nc'

        print(infile1)
        print(infile2)
        print(out_filename)

        fid1 = cdms2.open(infile1)
        fid2 = cdms2.open(infile2)

        var_u = fid1('var131')
        var_v = fid1('var132')
        var_t = fid1('var0')
        var_lnsp = fid1('var25')
        var_z = fid1('var4')
        var_q = fid2('var0')

        lev = var_u.getLevel()
        lat = var_u.getLatitude()
        lon = var_u.getLongitude()

        cdms2.setNetcdfShuffleFlag(0)
        cdms2.setNetcdfDeflateFlag(0)
        cdms2.setNetcdfDeflateLevelFlag(0)

        c = cdms2.open(out_filename, 'w')

        time = cdms2.createAxis([0.])
        time.id = 'time'
        time.units = 'hours since ' + filedate + ' ' + hour_str + ':00:00'
        time.long_name = 'time'
        time.axis = 'T'
        time.calendar = 'gregorian'

        print(time.units)

        var1 = cdms2.createVariable(var_u,
                                    axes=(time, lev, lat, lon),
                                    typecode='f',
                                    id='U')
        var1.long_name = 'zonal wind component'
        var1.units = 'm/s'

        var2 = cdms2.createVariable(var_v,
                                    axes=(time, lev, lat, lon),
                                    typecode='f',
                                    id='V')
        var2.long_name = 'meridional wind component'
        var2.units = 'm/s'

        var3 = cdms2.createVariable(var_t,
                                    axes=(time, lev, lat, lon),
                                    typecode='f',
                                    id='T')
        var3.long_name = 'temperature'
        var3.units = 'K'

        var4 = cdms2.createVariable(var_q,
                                    axes=(time, lev, lat, lon),
                                    typecode='f',
                                    id='Q')
        var4.long_name = 'specific humidity'
        var4.units = 'kg/kg'

        var5 = cdms2.createVariable(var_lnsp,
                                    axes=(time, lat, lon),
                                    typecode='f',
                                    id='PS')
        var5.long_name = 'surface pressure'
        var5.units = 'Pa'

        var6 = cdms2.createVariable(var_z,
                                    axes=(time, lat, lon),
                                    typecode='f',
                                    id='PHIS')
        var6.long_name = 'surface geopotential'
        var6.units = 'm2/s2'

        c.write(var1)
        c.write(var2)
        c.write(var3)
        c.write(var4)
        c.write(var5)
        c.write(var6)

        fid1.close()
        fid2.close()
        c.close()

        ######### remove all temporary files and continue onto next day
        remove_call1 = 'rm -f' + ' ' + 'ml_2d.grib' + ' ' + 'ml_t.grib' + ' ' + \
                          'ml_2d_fgg.grib' + ' ' + 'ml_t_fgg.grib' + ' ' + \
                          'ml_divvo.grib' + ' ' + 'ml_uv_fgg.grib' + ' ' + \
                          'ml_q_rgg.grib' + ' ' + 'ml_q_fggN320.grib' + ' ' + 'ml_q_fgg.grib'
        print(remove_call1)
        rmc1 = subprocess.Popen(remove_call1, shell=True)
        rmc1.wait()

        remove_call2 = 'rm -f' + ' ' + '*_N480_*grib*'
        print(remove_call2)
        rmc2 = subprocess.Popen(remove_call2, shell=True)
        rmc2.wait()
        ##################################
        dt = dt + delt_1hr

    return
Example #48
0
nt = f['xe'].shape[0]
print f['xe'].getTime().asComponentTime()[0:nt:nt - 1]
#  -> [2008-8-15 0:0:0.0, 2008-8-15 23:0:0.0]

# Lire une selection de la variable
import cdtime
xe = f('xe', ('2008-8-15', cdtime.comptime(2008, 8, 15, 12), 'cc'),
       lon=slice(5, 6),
       lat=(48.1, 48.5),
       squeeze=1)
print xe.shape
# -> (13, 29)
# squeeze a supprime l'axes des longitudes de dim 1

# Fermer le fichier lu
f.close()

# Definir la compression netcdf4
cdms2.setNetcdfShuffleFlag(1)
cdms2.setNetcdfDeflateFlag(1)
cdms2.setNetcdfDeflateLevelFlag(3)

# Creer un nouveau fichier
ncfile = 'misc-io-netcdf.nc'
import os
if os.path.exists(ncfile): os.remove(ncfile)
f = cdms2.open('misc-io-netcdf.nc', 'w')  # ouverture en ecriture
f.write(xe)  # ecriture d'une variable
f.history = 'Created with ' + __file__.encode('utf8')  # attribut global
f.close()  # fermeture
Example #49
0
def test_driver(path1, path2=None, filt2=None):
    """ Test driver for setting up data for plots"""

    # First, find and index the data files.
    datafiles1 = dirtree_datafiles(path1)
    print "jfp datafiles1=", datafiles1
    datafiles2 = dirtree_datafiles(path2, filt2)
    print "jfp datafiles2=", datafiles2
    filetable1 = basic_filetable(datafiles1)
    filetable2 = basic_filetable(datafiles2)

    # Next we'll compute reduced variables.  They have generally been reduced by averaging in time,
    # and often more axes as well.  Reducing the data first is the fastest way to compute, important
    # if we need to be interactive.  And it is correct if whatever we plot is linear in the
    # variables, as is almost always the case.  But if we want to plot a highly nonlinear function
    # of the data variables, the averaging will have to wait until later.

    # The reduced_variables dict names and contains all the reduced variables which we have defined.
    # They will be used in defining instances of plotspec.
    reduced_variables = {
        'hyam_1':
        reduced_variable(variableid='hyam',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid=None: x)),
        'hybm_1':
        reduced_variable(variableid='hybm',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid=None: x)),
        'PS_ANN_1':
        reduced_variable(variableid='PS',
                         filetable=filetable1,
                         reduction_function=reduce2lat),
        'T_CAM_ANN_1':
        reduced_variable(variableid='T',
                         filetable=filetable1,
                         reduction_function=reduce2levlat),
        'T_CAM_ANN_2':
        reduced_variable(variableid='T',
                         filetable=filetable2,
                         reduction_function=reduce2levlat),
        'TREFHT_ANN_latlon_Npole_1':
        reduced_variable(variableid='TREFHT',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid=None: restrict_lat(
                             reduce2latlon(x, vid=vid), 50, 90))),
        'TREFHT_ANN_latlon_Npole_2':
        reduced_variable(variableid='TREFHT',
                         filetable=filetable2,
                         reduction_function=(lambda x, vid=None: restrict_lat(
                             reduce2latlon(x, vid=vid), 50, 90))),
        'TREFHT_ANN_lat_1':
        reduced_variable(variableid='TREFHT',
                         filetable=filetable1,
                         reduction_function=reduce2lat),
        'TREFHT_DJF_lat_1':
        reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x, vid=None: reduce2lat_seasonal(
                x, seasonsDJF, vid=vid))),
        'TREFHT_DJF_lat_2':
        reduced_variable(
            variableid='TREFHT',
            filetable=filetable2,
            reduction_function=(lambda x, vid=None: reduce2lat_seasonal(
                x, seasonsDJF, vid=vid))),
        'TREFHT_DJF_latlon_1':
        reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x, vid=None: reduce2latlon_seasonal(
                x, seasonsDJF, vid=vid))),
        'TREFHT_DJF_latlon_2':
        reduced_variable(
            variableid='TREFHT',
            filetable=filetable2,
            reduction_function=(lambda x, vid=None: reduce2latlon_seasonal(
                x, seasonsDJF, vid=vid))),
        'TREFHT_JJA':
        reduced_variable(
            variableid='TREFHT',
            filetable=filetable1,
            reduction_function=(lambda x, vid=None: reduce2lat_seasonal(
                x, seasonsJJA, vid=vid))),
        'PRECT_JJA_lat_1':
        reduced_variable(
            variableid='PRECT',
            filetable=filetable1,
            reduction_function=(lambda x, vid=None: reduce2lat_seasonal(
                x, seasonsJJA, vid=vid))),
        'PRECT_JJA_lat_2':
        reduced_variable(
            variableid='PRECT',
            filetable=filetable2,
            reduction_function=(lambda x, vid=None: reduce2lat_seasonal(
                x, seasonsJJA, vid=vid))),

        # CAM variables needed for heat transport:
        # FSNS, FLNS, FLUT, FSNTOA, FLNT, FSNT, SHFLX, LHFLX,
        'FSNS_1':
        reduced_variable(variableid='FSNS',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid: x)),
        'FSNS_ANN_latlon_1':
        reduced_variable(variableid='FSNS',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'FLNS_1':
        reduced_variable(variableid='FLNS',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid: x)),
        'FLNS_ANN_latlon_1':
        reduced_variable(variableid='FLNS',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'FLUT_ANN_latlon_1':
        reduced_variable(variableid='FLUT',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'FSNTOA_ANN_latlon_1':
        reduced_variable(variableid='FSNTOA',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'FLNT_1':
        reduced_variable(variableid='FLNT',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid: x)),
        'FLNT_ANN_latlon_1':
        reduced_variable(variableid='FLNT',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'FSNT_1':
        reduced_variable(variableid='FSNT',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid: x)),
        'FSNT_ANN_latlon_1':
        reduced_variable(variableid='FSNT',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'QFLX_1':
        reduced_variable(variableid='QFLX',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid: x)),
        'SHFLX_1':
        reduced_variable(variableid='SHFLX',
                         filetable=filetable1,
                         reduction_function=(lambda x, vid: x)),
        'SHFLX_ANN_latlon_1':
        reduced_variable(variableid='SHFLX',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'LHFLX_ANN_latlon_1':
        reduced_variable(variableid='LHFLX',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'ORO_ANN_latlon_1':
        reduced_variable(variableid='ORO',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'OCNFRAC_ANN_latlon_1':
        reduced_variable(variableid='OCNFRAC',
                         filetable=filetable1,
                         reduction_function=reduce2latlon),
        'ts_lat_old':
        reduced_variable(
            variableid=
            'surface_temperature',  # normally a CF standard_name, even for non-CF data.
            filetable=filetable1,
            reduction_function=reduce2lat_old),
        'ts_lat_new':
        reduced_variable(
            variableid=
            'surface_temperature',  # normally a CF standard_name, even for non-CF data.
            filetable=filetable1,
            reduction_function=reduce2lat
            # The reduction function will take just one argument, a variable (MV).  But it might
            # be expressed here as a lambda wrapping a more general function.
            # Often there will be ranges in time, space, etc. specified here.  No range means
            # everything.
        ),
        'ts_scalar_tropical_o':
        reduced_variable(
            variableid='surface_temperature',
            filetable=filetable1,
            reduction_function=(lambda mv, vid=None: reduce2scalar_zonal_old(
                mv, -20, 20, vid=vid))),
        'ts_scalar_tropical_n':
        reduced_variable(
            variableid='surface_temperature',
            filetable=filetable1,
            reduction_function=(lambda mv, vid=None: reduce2scalar_zonal(
                mv, -20, 20, vid=vid)))
    }

    # Derived variables have to be treated separately from reduced variables
    # because derived variables generally depend on reduced variables.
    # But N.B.: the dicts reduced_variables and derived_variables
    # must never use the same key!
    derived_variables = {
        'CAM_HEAT_TRANSPORT_ALL_1':
        derived_var(vid='CAM_HEAT_TRANSPORT_ALL_1',
                    inputs=[
                        'FSNS_ANN_latlon_1', 'FLNS_ANN_latlon_1',
                        'FLUT_ANN_latlon_1', 'FSNTOA_ANN_latlon_1',
                        'FLNT_ANN_latlon_1', 'FSNT_ANN_latlon_1',
                        'SHFLX_ANN_latlon_1', 'LHFLX_ANN_latlon_1',
                        'OCNFRAC_ANN_latlon_1'
                    ],
                    outputs=[
                        'atlantic_heat_transport', 'pacific_heat_transport',
                        'indian_heat_transport', 'global_heat_transport'
                    ],
                    func=oceanic_heat_transport),
        'NCEP_OBS_HEAT_TRANSPORT_ALL_2':
        derived_var(vid='NCEP_OBS_HEAT_TRANSPORT_ALL_2',
                    inputs=[],
                    outputs=('latitude', [
                        'atlantic_heat_transport', 'pacific_heat_transport',
                        'indian_heat_transport', 'global_heat_transport'
                    ]),
                    func=(lambda: ncep_ocean_heat_transport(path2))),
        'T_ANN_1':
        derived_var(vid='T_ANN_1',
                    inputs=[
                        'T_CAM_ANN_1', 'hyam_1', 'hybm_1', 'PS_ANN_1',
                        'T_CAM_ANN_2'
                    ],
                    outputs=('temperature'),
                    func=verticalize)
    }

    plotvars = dict(reduced_variables.items() + derived_variables.items())

    # The plotvspecs dict names and contains all plotspec objects which we have defined.
    # The plotspeckeys variable, below, names the ones for which we will generate output.
    # A dict value can be a plotspec object, or a list of such objects.  A list of
    # plotspec instances specifies a page containing multiple plots in separate panes.
    plotspecs = {
        'TREFHT_ANN_Npole_ALL':
        ['TREFHT_ANN_Npole_1', 'TREFHT_ANN_Npole_2', 'TREFHT_ANN_Npole_diff'],
        'TREFHT_ANN_Npole_1':
        plotspec(vid='TREFHT_ANN_Npole_1',
                 xvars=['TREFHT_ANN_latlon_Npole_1'],
                 xfunc=lonvar,
                 yvars=['TREFHT_ANN_latlon_Npole_1'],
                 yfunc=latvar,
                 zvars=['TREFHT_ANN_latlon_Npole_1'],
                 zfunc=(lambda z: z),
                 plottype='polar contour plot'),
        'TREFHT_ANN_Npole_2':
        plotspec(vid='TREFHT_ANN_Npole_2',
                 xvars=['TREFHT_ANN_latlon_Npole_2'],
                 xfunc=lonvar,
                 yvars=['TREFHT_ANN_latlon_Npole_2'],
                 yfunc=latvar,
                 zvars=['TREFHT_ANN_latlon_Npole_2'],
                 zfunc=(lambda z: z),
                 plottype='polar contour plot'),
        'TREFHT_ANN_Npole_diff':
        plotspec(
            vid='TREFHT_ANN_Npole_diff',
            xvars=['TREFHT_ANN_latlon_Npole_1', 'TREFHT_ANN_latlon_Npole_2'],
            xfunc=lonvar_min,
            yvars=['TREFHT_ANN_latlon_Npole_1', 'TREFHT_ANN_latlon_Npole_2'],
            yfunc=latvar_min,
            zvars=['TREFHT_ANN_latlon_Npole_1', 'TREFHT_ANN_latlon_Npole_2'],
            zfunc=aminusb_2ax,
            plottype='polar contour plot'),
        'TREFHT_DJF_laton_ALL': [
            'TREFHT_DJF_latlon_1', 'TREFHT_DJF_latlon_2',
            'TREFHT_DJF_latlon_diff'
        ],
        'TREFHT_DJF_latlon_1':
        plotspec(vid='TREFHT_DJF_latlon_1',
                 xvars=['TREFHT_DJF_latlon_1'],
                 xfunc=lonvar,
                 yvars=['TREFHT_DJF_latlon_1'],
                 yfunc=latvar,
                 zvars=['TREFHT_DJF_latlon_1'],
                 zfunc=(lambda z: z),
                 plottype='contour plot'),
        'TREFHT_DJF_latlon_2':
        plotspec(vid='TREFHT_DJF_latlon_2',
                 xvars=['TREFHT_DJF_latlon_2'],
                 xfunc=lonvar,
                 yvars=['TREFHT_DJF_latlon_2'],
                 yfunc=latvar,
                 zvars=['TREFHT_DJF_latlon_2'],
                 zfunc=(lambda z: z),
                 plottype='contour plot'),
        'TREFHT_DJF_latlon_diff':
        plotspec(vid='TREFHT_DJF_latlon_diff',
                 xvars=['TREFHT_DJF_latlon_1', 'TREFHT_DJF_latlon_2'],
                 xfunc=lonvar_min,
                 yvars=['TREFHT_DJF_latlon_1', 'TREFHT_DJF_latlon_2'],
                 yfunc=latvar_min,
                 zvars=['TREFHT_DJF_latlon_1', 'TREFHT_DJF_latlon_2'],
                 zfunc=aminusb_2ax,
                 plottype='contour_plot'),
        'T_ANN_VERT_CAM_OBS_ALL':
        ['T_VERT_ANN_1', 'T_VERT_ANN_2', 'T_VERT_difference'],
        'T_VERT_difference':
        plotspec(vid='T_VERT_difference',
                 xvars=['T_ANN_1', 'T_CAM_ANN_2'],
                 xfunc=latvar_min,
                 yvars=['T_ANN_1', 'T_CAM_ANN_2'],
                 yfunc=levvar_min,
                 ya1vars=['T_ANN_1', 'T_CAM_ANN_2'],
                 ya1func=(lambda y1, y2: heightvar(levvar_min(y1, y2))),
                 zvars=['T_ANN_1', 'T_CAM_ANN_2'],
                 zfunc=aminusb_ax2,
                 plottype="contour plot"),
        'T_VERT_ANN_2':
        plotspec(vid='T_ANN_2',
                 xvars=['T_CAM_ANN_2'],
                 xfunc=latvar,
                 yvars=['T_CAM_ANN_2'],
                 yfunc=levvar,
                 ya1vars=['T_CAM_ANN_2'],
                 ya1func=heightvar,
                 zvars=['T_CAM_ANN_2'],
                 plottype='contour plot',
                 zrangevars=['T_ANN_1', 'T_CAM_ANN_2'],
                 zrangefunc=minmin_maxmax),
        'T_VERT_ANN_1':
        plotspec(vid='T_ANN_1',
                 xvars=['T_ANN_1'],
                 xfunc=latvar,
                 yvars=['T_ANN_1'],
                 yfunc=levvar,
                 ya1vars=['T_ANN_1'],
                 ya1func=heightvar,
                 zvars=['T_ANN_1'],
                 plottype='contour plot',
                 zrangevars=['T_ANN_1', 'T_CAM_ANN_2'],
                 zrangefunc=minmin_maxmax),
        'NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2':
        plotspec(vid='NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2',
                 xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 xfunc=(lambda x: x[0]),
                 yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 yfunc=(lambda y: y[1][3]),
                 plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_PACIFIC_2':
        plotspec(vid='NCEP_OBS_HEAT_TRANSPORT_PACIFIC_2',
                 xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 xfunc=(lambda x: x[0]),
                 yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 yfunc=(lambda y: y[1][0]),
                 plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_ATLANTIC_2':
        plotspec(vid='NCEP_OBS_HEAT_TRANSPORT_ATLANTIC_2',
                 xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 xfunc=(lambda x: x[0]),
                 yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 yfunc=(lambda y: y[1][1]),
                 plottype='line plot'),
        'NCEP_OBS_HEAT_TRANSPORT_INDIAN_2':
        plotspec(vid='NCEP_OBS_HEAT_TRANSPORT_INDIAN_2',
                 xvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 xfunc=(lambda x: x[0]),
                 yvars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 yfunc=(lambda y: y[1][2]),
                 plottype='line plot'),
        'CAM_HEAT_TRANSPORT_GLOBAL_1':
        plotspec(vid='CAM_HEAT_TRANSPORT_GLOBAL_1',
                 xvars=['FSNS_ANN_latlon_1'],
                 xfunc=latvar,
                 yvars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 yfunc=(lambda y: y[3]),
                 plottype='line plot'),
        'CAM_HEAT_TRANSPORT_PACIFIC_1':
        plotspec(vid='CAM_HEAT_TRANSPORT_PACIFIC_1',
                 xvars=['FSNS_ANN_latlon_1'],
                 xfunc=latvar,
                 yvars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 yfunc=(lambda y: y[0]),
                 plottype='line plot'),
        'CAM_HEAT_TRANSPORT_ATLANTIC_1':
        plotspec(vid='CAM_HEAT_TRANSPORT_ATLANTIC_1',
                 xvars=['FSNS_ANN_latlon_1'],
                 xfunc=latvar,
                 yvars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 yfunc=(lambda y: y[1]),
                 plottype='line plot'),
        'CAM_HEAT_TRANSPORT_INDIAN_1':
        plotspec(vid='CAM_HEAT_TRANSPORT_INDIAN_1',
                 xvars=['FSNS_ANN_latlon_1'],
                 xfunc=latvar,
                 yvars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 yfunc=(lambda y: y[2]),
                 plottype='line plot'),
        'CAM_HEAT_TRANSPORT_ALL_1': [
            'CAM_HEAT_TRANSPORT_GLOBAL_1', 'CAM_HEAT_TRANSPORT_PACIFIC_1',
            'CAM_HEAT_TRANSPORT_ATLANTIC_1', 'CAM_HEAT_TRANSPORT_INDIAN_1'
        ],
        'CAM_NCEP_HEAT_TRANSPORT_GLOBAL':
        plotspec(vid='CAM_NCEP_HEAT_TRANSPORT_GLOBAL',
                 x1vars=['FSNS_ANN_latlon_1'],
                 x1func=latvar,
                 y1vars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 y1func=(lambda y: y[3]),
                 x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 x2func=(lambda x: x[0]),
                 y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 y2func=(lambda y: y[1][3]),
                 plottype='2 line plot'),
        'CAM_NCEP_HEAT_TRANSPORT_PACIFIC':
        plotspec(vid='CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
                 x1vars=['FSNS_ANN_latlon_1'],
                 x1func=latvar,
                 y1vars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 y1func=(lambda y: y[0]),
                 x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 x2func=(lambda x: x[0]),
                 y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 y2func=(lambda y: y[1][0]),
                 plottype='2 line plot'),
        'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC':
        plotspec(vid='CAM_NCEP_HEAT_TRANSPORT_ATLANTIC',
                 x1vars=['FSNS_ANN_latlon_1'],
                 x1func=latvar,
                 y1vars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 y1func=(lambda y: y[0]),
                 x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 x2func=(lambda x: x[0]),
                 y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 y2func=(lambda y: y[1][1]),
                 plottype='2 line plot'),
        'CAM_NCEP_HEAT_TRANSPORT_INDIAN':
        plotspec(vid='CAM_NCEP_HEAT_TRANSPORT_INDIAN',
                 x1vars=['FSNS_ANN_latlon_1'],
                 x1func=latvar,
                 y1vars=['CAM_HEAT_TRANSPORT_ALL_1'],
                 y1func=(lambda y: y[0]),
                 x2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 x2func=(lambda x: x[0]),
                 y2vars=['NCEP_OBS_HEAT_TRANSPORT_ALL_2'],
                 y2func=(lambda y: y[1][2]),
                 plottype='2 line plot'),
        'CAM_NCEP_HEAT_TRANSPORT_ALL': [
            'CAM_NCEP_HEAT_TRANSPORT_GLOBAL',
            'CAM_NCEP_HEAT_TRANSPORT_PACIFIC',
            'CAM_NCEP_HEAT_TRANSPORT_ATLANTIC',
            'CAM_NCEP_HEAT_TRANSPORT_INDIAN'
        ],
        'past_CAM_HEAT_TRANSPORT_GLOBAL_1':
        plotspec(vid='CAM_HEAT_TRANSPORT_GLOBAL_1',
                 xvars=['FSNS_ANN_latlon_1'],
                 xfunc=latvar,
                 yvars=[
                     'FSNS_ANN_latlon_1', 'FLNS_ANN_latlon_1',
                     'FLUT_ANN_latlon_1', 'FSNTOA_ANN_latlon_1',
                     'FLNT_ANN_latlon_1', 'FSNT_ANN_latlon_1',
                     'SHFLX_ANN_latlon_1', 'LHFLX_ANN_latlon_1',
                     'OCNFRAC_ANN_latlon_1'
                 ],
                 yfunc=oceanic_heat_transport,
                 plottype='line plot'),
        'PRECT_JJA': ['PRECT_JJA_2line', 'PRECT_JJA_diff'],
        'PRECT_JJA_2line':
        plotspec(vid='PRECT_JJA_2line',
                 x1vars=['PRECT_JJA_lat_1'],
                 x1func=latvar,
                 x2vars=['PRECT_JJA_lat_2'],
                 x2func=latvar,
                 y1vars=['PRECT_JJA_lat_1'],
                 y1func=(lambda y: y),
                 y2vars=['PRECT_JJA_lat_2'],
                 y2func=(lambda y: y),
                 plottype='2-line plot'),
        'PRECT_JJA_diff':
        plotspec(
            vid='PRECT_JJA_difference',
            xvars=['PRECT_JJA_lat_1', 'PRECT_JJA_lat_2'],
            xfunc=latvar_min,
            yvars=['PRECT_JJA_lat_1', 'PRECT_JJA_lat_2'],
            yfunc=
            aminusb_1ax,  # aminusb_1ax(y1,y2)=y1-y2; each y has 1 axis, use min axis
            plottype='line plot'),
        'TREFHT_ANN':
        plotspec(vid='TREFHT_ANN',
                 xvars=['TREFHT_ANN_lat_1'],
                 xfunc=latvar,
                 yvars=['TREFHT_ANN_lat_1'],
                 yfunc=(lambda y: y),
                 plottype='line plot'),
        'TREFHT_DJF': ['TREFHT_DJF_2line', 'TREFHT_DJF_diff'],
        'TREFHT_DJF_2line':
        plotspec(vid='TREFHT_DJF_2line',
                 x1vars=['TREFHT_DJF_lat_1'],
                 x1func=latvar,
                 x2vars=['TREFHT_DJF_lat_2'],
                 x2func=latvar,
                 y1vars=['TREFHT_DJF_lat_1'],
                 y1func=(lambda y: y),
                 y2vars=['TREFHT_DJF_lat_2'],
                 y2func=(lambda y: y),
                 plottype='2-line plot'),
        'TREFHT_DJF_diff':
        plotspec(
            vid='TREFHT_DJF_diff',
            xvars=['TREFHT_DJF_lat_1', 'TREFHT_DJF_lat_2'],
            xfunc=latvar_min,
            yvars=['TREFHT_DJF_lat_1', 'TREFHT_DJF_lat_2'],
            yfunc=
            aminusb_1ax,  # aminusb_1ax(y1,y2)=y1-y2; each y has 1 axis, use min axis
            plottype='line plot'),
        'TREFHT_DJF_line':
        plotspec(vid='TREFHT_DJF_line',
                 xvars=['TREFHT_DJF_lat_1'],
                 xfunc=latvar,
                 yvars=['TREFHT_DJF_lat_1'],
                 plottype='line plot'),
        'TREFHT_DJF_contour':
        plotspec(vid='TREFHT_DJF_contour',
                 xvars=['TREFHT_DJF_latlon_1'],
                 xfunc=(lambda x: x),
                 plottype='line plot'),
        #plotspec( vid='TREFHT_JJA',xvars=['TREFHT_JJA'], xfiletable=filetable1, xfunc = latvar,
        #          yvars=['TREFHT_JJA'], yfunc=(lambda y: y), plottype='line plot'),
        #plotspec(
        #    vid='ts_by_lat_old',   # suitable for filenames
        #    xfiletable=filetable1,
        #    xfunc = latvar, # function to return x axis values
        #    xvars = ['ts_lat_old'],    # names of variables or axes, args of xfunc
        #    yfiletable=filetable1, # can differ from xfiletable, e.g. comparing 2 runs
        #    yfunc = (lambda y: y), # function to return y axis values
        #    yvars = ['ts_lat_old'], # names of variables or axes, args of xfunc
        #    zfiletable=filetable1,
        #    zfunc = (lambda: None),
        #    zvars = [],         # would be needed for countour or 3D plot
        #    # ... the ?vars variable will be converted (using the filetable and
        #    # plotvars) to actual variables which become the arguments for a call
        #    # of ?func, which returns the data we write out for plotting use.
        #    plottype='line plot' ),
        'ts_by_lat':
        plotspec(
            vid='ts_by_lat',  # suitable for filenames
            xfunc=latvar,  # function to return x axis values
            xvars=['ts_lat_new'],  # names of variables or axes, args of xfunc
            yfunc=(lambda y: y),  # function to return y axis values
            yvars=['ts_lat_new'],  # names of variables or axes, args of xfunc
            zfunc=(lambda: None),
            zvars=[],  # would be needed for countour or 3D plot
            # ... the ?vars variable will be converted (using the filetable and
            # plotvars) to actual variables which become the arguments for a call
            # of ?func, which returns the data we write out for plotting use.
            plottype='line plot'),
        #plotspec( vid="ts_global_old",xvars=['ts_scalar_tropical_o'], xfiletable=filetable1 ),
        'ts_global':
        plotspec(vid="ts_global", xvars=['ts_scalar_tropical_n']),
    }

    # Plotspeckeys specifies what plot data we will compute and write out.
    # In the future we may add a command line option, or provide other ways to
    # define plotspeckeys.
    #plotspeckeys = [['TREFHT_DJF_2line','TREFHT_DJF_difference']]
    #plotspeckeys = ['TREFHT_DJF_2line']
    #plotspeckeys = ['NCEP_OBS_HEAT_TRANSPORT_GLOBAL_2','CAM_HEAT_TRANSPORT_ALL_1']
    #plotspeckeys = ['CAM_NCEP_HEAT_TRANSPORT_GLOBAL']
    #plotspeckeys = ['CAM_NCEP_HEAT_TRANSPORT_ALL']
    #plotspeckeys = ['T_ANN_VERT_CAM_OBS_ALL']
    #plotspeckeys = ['TREFHT_DJF_laton_ALL']
    #plotspeckeys = ['TREFHT_ANN_Npole_ALL']
    plotspeckeys = ['TREFHT_DJF']
    #plotspeckeys = ['GLOBAL_AVERAGES']

    # Find the variable names required by the plotspecs.
    varkeys = []
    for psk in plotspeckeys:
        if type(psk) is str and type(plotspecs[psk]) is list:
            psk = plotspecs[psk]
        if type(psk) is str:
            write_xml = False
            psl = [plotspecs[psk]]
        else:
            write_xml = True
            psl = [plotspecs[k] for k in psk]
            xml_name = '_'.join([ps._strid for ps in psl]) + '.xml'
            h = open(xml_name, 'w')
            h.write("<plotdata>\n")
        for ps in psl:
            varkeys = varkeys + ps.xvars + ps.x1vars + ps.x2vars + ps.x3vars
            varkeys = varkeys + ps.yvars + ps.y1vars + ps.y2vars + ps.y3vars
            varkeys = varkeys + ps.zvars + ps.zrangevars
    for key in varkeys:
        if key in derived_variables.keys():
            varkeys = varkeys + derived_variables[key]._inputs
    varkeys = list(set(varkeys))

    # Compute the value of every variable we need.
    varvals = {}
    # First compute all the reduced variables
    for key in varkeys:
        if key in reduced_variables.keys():
            varvals[key] = reduced_variables[key].reduce()
    # Then use the reduced variables to compute the derived variables
    #   Note that the derive() method is allowed to return a tuple.  This way
    #   we can use one function to compute what's really several variables.
    for key in varkeys:
        if key in derived_variables.keys():
            varvals[key] = derived_variables[key].derive(varvals)

    # Now use the reduced and derived variables to compute the plot data.
    for psk in plotspeckeys:
        if type(psk) is str and type(plotspecs[psk]) is list:
            psk = plotspecs[psk]
        if type(psk) is str:
            write_xml = False
            psl = [plotspecs[psk]]
        else:
            write_xml = True
            psl = [plotspecs[k] for k in psk]
            xml_name = '_'.join([ps._strid for ps in psl]) + '.xml'
            h = open(xml_name, 'w')
            h.write("<plotdata>\n")
        varkeys = []
        for ps in psl:
            print "jfp preparing data for", ps._strid
            xrv = [varvals[k] for k in ps.xvars]
            x1rv = [varvals[k] for k in ps.x1vars]
            x2rv = [varvals[k] for k in ps.x2vars]
            x3rv = [varvals[k] for k in ps.x3vars]
            yrv = [varvals[k] for k in ps.yvars]
            y1rv = [varvals[k] for k in ps.y1vars]
            y2rv = [varvals[k] for k in ps.y2vars]
            y3rv = [varvals[k] for k in ps.y3vars]
            yarv = [varvals[k] for k in ps.yavars]
            ya1rv = [varvals[k] for k in ps.ya1vars]
            zrv = [varvals[k] for k in ps.zvars]
            zrrv = [varvals[k] for k in ps.zrangevars]
            xax = apply(ps.xfunc, xrv)
            x1ax = apply(ps.x1func, x1rv)
            x2ax = apply(ps.x2func, x2rv)
            x3ax = apply(ps.x3func, x3rv)
            yax = apply(ps.yfunc, yrv)
            y1ax = apply(ps.y1func, y1rv)
            y2ax = apply(ps.y2func, y2rv)
            y3ax = apply(ps.y3func, y3rv)
            yaax = apply(ps.yafunc, yarv)
            ya1ax = apply(ps.ya1func, ya1rv)
            zax = apply(ps.zfunc, zrv)
            zr = apply(ps.zrangefunc, zrrv)
            if      (xax is None or len(xrv)==0) and (x1ax is None or len(x1rv)==0)\
                and (x2ax is None or len(x2rv)==0) and (x3ax is None or len(x3rv)==0)\
                and (yax is None or len(yrv)==0) and (y1ax is None or len(y1rv)==0)\
                and (y2ax is None or len(y2rv)==0) and (y3ax is None or len(y3rv)==0)\
                and (zax is None or len(zrv)==0):
                continue
            filename = ps._strid + "_test.nc"
            value = 0
            cdms2.setNetcdfShuffleFlag(value)  ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value)  ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(
                value)  ## where value is a integer between 0 and 9 included

            g = cdms2.open(filename,
                           'w')  # later, choose a better name and a path!
            store_provenance(g)
            # Much more belongs in g, e.g. axis and graph names.
            if xax is not None and len(xrv) > 0:
                xax.id = 'X'
                g.write(xax)
            if x1ax is not None and len(x1rv) > 0:
                x1ax.id = 'X1'
                g.write(x1ax)
            if x2ax is not None and len(x2rv) > 0:
                x2ax.id = 'X2'
                g.write(x2ax)
            if x3ax is not None and len(x3rv) > 0:
                x3ax.id = 'X3'
                g.write(x3ax)
            if yax is not None and len(yrv) > 0:
                yax.id = 'Y'
                g.write(yax)
            if y1ax is not None and len(y1rv) > 0:
                y1ax.id = 'Y1'
                g.write(y1ax)
            if y2ax is not None and len(y2rv) > 0:
                y2ax.id = 'Y2'
                g.write(y2ax)
            if y3ax is not None and len(y3rv) > 0:
                y3ax.id = 'Y3'
                g.write(y3ax)
            if yaax is not None and len(yarv) > 0:
                yaax.id = 'YA'
                g.write(yaax)
            if ya1ax is not None and len(ya1rv) > 0:
                ya1ax.id = 'YA1'
                g.write(ya1ax)
            if zax is not None and len(zrv) > 0:
                zax.id = 'Z'
                g.write(zax)
            if zr is not None:
                zr.id = 'Zrange'
                g.write(zr)
            g.presentation = ps.plottype
            # Note: For table output, it would be convenient to use a string-valued variable X
            # to specify string parts of the table.  Butcdms2 doesn't support them usefully.
            # Instead we'll manage with a convention that a table row plotspec's id is the name of
            # the row, thus available to be printed in, e.g., the first column.
            if ps.plottype == "table row":
                g.rowid = ps._strid
            g.close()
            if write_xml:
                h.write("<ncfile>" + filename + "</ncfile>\n")

        if write_xml:
            h.write("</plotdata>\n")
            h.close()
import cdms2

###################################### 
value = 0 
cdms2.setNetcdfShuffleFlag(value)
cdms2.setNetcdfDeflateFlag(value)
cdms2.setNetcdfDeflateLevelFlag(value)
######################################

### EXECUTE MODULE WITH VARIOUS FUNCTIONS
execfile('modules_and_functions/misc_module.py')
execfile('modules_and_functions/getOurModelData.py')

### OPTIONS FOR REGRIDDING: METHOD AND TARGET GRID
exp = 'cmip5'
rgridMeth = 'regrid2'
targetGrid = '4x5'
targetGrid = '2.5x2.5'

### OUTPUT DIRECTORY
outdir = '/work/metricspackage/130522/data/inhouse_model_clims/samplerun/atm/mo/ac/'
## SEE END OF THIS CODE FOR OUTPUT FILENAMES

### VARIABLES TO LOOP OVER (NAMES ASSUMED TO BE CONSISTENT WITH CMIP5)
vars = ['rlut','pr']

######################################

############# GET OBS TARGET GRID
obsg = get_target_grid(targetGrid)
############# 
from Scientific.IO.NetCDF import *
from pyclimate.svdeofs import *

from pyclimate.ncstruct import *

sys.path.insert(0,"/export/bonfils2/NEWPYFORT/TRANSFO/build/lib.linux-i686-2.5")
#import Lynch
import numpy.oldnumeric as Numeric
import time
from Scientific.IO import FortranFormat
import numpy.oldnumeric.ma as MA
import sys,getopt  # for external loop


value=0
cdms.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
cdms.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
cdms.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

###################################################
args=sys.argv[1:]
letters='e:i:f:r'
keywords=['exper=','ice=','filt=','rang=']
oexpt='default'
oice='default'

lowpass='******'

Pdateclimo='default'

opts,pargs=getopt.getopt(args,letters,keywords)
Example #52
0
    leap_year = 0

f_mask = cdms.open('SWGDN.nc')
v = f_mask('SWGDN')
lat = v.getAxis(1)
lon = v.getAxis(2)
f_mask.close()

fw = cdms.open(data_patch + case_name)
wcf = MV.array(fw('wcf', squeeze=1))
wcf[wcf < 0] = 0.
wcf[wcf > 1] = 1.
fw.close()

# use NetCDF3 Classic format
cdms.setNetcdfShuffleFlag(0)  # netcdf3 classic...
cdms.setNetcdfDeflateFlag(0)  # netcdf3 classic...
cdms.setNetcdfDeflateLevelFlag(0)  # netcdf3 classic...

fm = cdms.open('selected_mask_NYS.nc')
region_mask_list = {0: 'wmask_NYS'}
mask_idx = MV.array(fm(region_mask_list[idx]))
# Pre-define the output variable and output file
g = cdms.open('averaged_NYS_wcf' + str(year) + '.nc', 'w')
new_data = MV.array(np.zeros(len_axis))
new_data.id = 'averaged_' + region_mask_list[0]
for i in range(len_axis):
    scf_idx = scf[i] * mask_idx
    scf_idx.setAxis(0, lat)
    scf_idx.setAxis(1, lon)
    # If lat/lon info is given, the following average function calculate the
Example #53
0
def compute_and_write_climatologies( varkeys, reduced_variables, season, case='', variant='', path='' ):
    """Computes climatologies and writes them to a file.
    Inputs: varkeys, names of variables whose climatologies are to be computed
            reduced_variables, dict (key:rv) where key is a variable name and rv an instance
               of the class reduced_variable
            season: the season on which the climatologies will be computed
            variant: a string to be inserted in the filename"""
    # Compute the value of every variable we need.
    # This function does not return the variable values, or even keep them.

    # First compute all the reduced variables
    # Probably this loop consumes most of the running time.  It's what has to read in all the data.
    firsttime = True
    for key in varkeys:
        if key in reduced_variables:
            time0 = time.time()
            #print "jfp",time.ctime()
            varval = reduced_variables[key].reduce()
            #print "jfp",time.ctime(),"reduced",key,"in time",time.time()-time0
            pmemusg = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # "maximum resident set size"
            pmemusg = pmemusg / 1024./1024.  # On Linux, should be 1024 for MB
            #print "jfp   peak memory",pmemusg,"MB (GB on Linux)"
            #requires psutil process = psutil.Process(os.getpid())
            #requires psutil mem = process.get_memory_info()[0] / float(2**20)
            #print "jfp   process memory",mem,"MB"
        else:
            continue
        if varval is None:
            continue

        var = reduced_variables[key]
        if firsttime:
            firsttime = False
            if case=='':
                case = getattr( var, 'case', '' )
                if case!='':
                    case = var._file_attributes['case']+'_'
            if case=='':
                case = 'nocase_'
            if variant!='':
                variant = variant+'_'
            filename = case + variant + season + "_climo.nc"
            value=0
            cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

            g = cdms2.open( os.path.join(path,filename), 'w' )    # later, choose a better name and a path!
            # ...actually we want to write this to a full directory structure like
            #    root/institute/model/realm/run_name/season/

        logger.info("writing %s",key,"in climatology file %s",filename)
        varval.id = var.variableid
        varval.reduced_variable=varval.id
        if hasattr(var,'units'):
            varval.units = var.units+'*'+var.units
        g.write(varval)
        for attr,val in var._file_attributes.items():
            if not hasattr( g, attr ):
                setattr( g, attr, val )
    if firsttime:
        logger.error("No variables found.  Did you specify the right input data?")
    else:
        g.season = season
        g.close()
    return case
Example #54
0
import cdms2
import hashlib
import numpy
from collections import OrderedDict, Mapping
import pcmdi_metrics
import cdp.cdp_io
import subprocess
import sys
import shlex
import datetime
from pcmdi_metrics import LOG_LEVEL
import copy
import re

value = 0
cdms2.setNetcdfShuffleFlag(value)  # where value is either 0 or 1
cdms2.setNetcdfDeflateFlag(value)  # where value is either 0 or 1
# where value is a integer between 0 and 9 included
cdms2.setNetcdfDeflateLevelFlag(value)
logging.getLogger("pcmdi_metrics").setLevel(LOG_LEVEL)

try:
    basestring  # noqa
except Exception:
    basestring = str


# Convert cdms MVs to json
def MV2Json(data, dic={}, struct=None):
    if struct is None:
        struct = []
Example #55
0
def correctFile(idxcorr, ncorr, inFile, inDir, outFile, outDir):
    '''
    Correct density binned files (undefined ptop & long 0 issue)
    idxcorr = [idx_i,idx_i1,jmax] indices for longitude correction - if [0,0,0] ignore
    ncorr   = number of corrections: 1 or 2
    '''
    # CDMS initialisation - netCDF compression
    comp = 1 # 0 for no compression
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # Numpy initialisation
    npy.set_printoptions(precision=2)

    varList3D = ['isondepthg','isonthickg', 'sog','thetaog']
    varList2D = ['ptopsoxy','ptopdepthxy','ptopsigmaxy','ptopthetaoxy','persistmxy']

    # First test, read level by level and write level by level (memory management)
    # use ncpdq -a time,lev,lat,lon to recover the dimension order

    fi = cdm.open(inDir+'/'+inFile)
    fo = cdm.open(outDir+'/'+outFile,'w')
    isondg  = fi['isondepthg'] ; # Create variable handle
    # Get grid objects
    #axesList = isondg.getAxisList()
    #sigmaGrd = isondg.getLevel()
    lonN = isondg.shape[3]
    latN = isondg.shape[2]
    levN = isondg.shape[1]
    timN = isondg.shape[0]
    #valmask = isondg.missing_value

    if ncorr == 2:
        ic1 = idxcorr[0][0]
        ic2 = idxcorr[0][1]
        jcmax = idxcorr[0][2]
        ic12 = idxcorr[1][0]
        ic22 = idxcorr[1][1]
        jcmax2 = idxcorr[1][2]
        if ic2 >= lonN-1:
            ic2 = 0
        if ic22 >= lonN-1:
            ic22 = 0
    elif ncorr == 1:
        ic1 = idxcorr[0]
        ic2 = idxcorr[1]
        jcmax = idxcorr[2]
        if ic2 >= lonN-1:
            ic2 = 0
    #print ic1,ic2,jcmax
    corr_long = True
    if ic1 == 0 and ic2 == 0 and jcmax == 0:
        corr_long = False
    #testp = 10
    for it in range(timN):
        #if it/testp*testp == it:
        #    print ' year =',it
        # test
        #i = 90
        #j = 90
        #i2d = 6
        #j2d = 12
        #ij = j*lonN+i
        #ij2d = j2d*lonN+i2d
        #print 'ij=',ij
        # 3D variables
        for iv in varList3D:
            #print iv
            outVar = fi(iv,time = slice(it,it+1))
            # Correct for longitude interpolation issue
            if corr_long:
                for jt in range(jcmax):
                    outVar[:,:,jt,ic1] = (outVar[:,:,jt,ic1-1]+outVar[:,:,jt,ic2+1])/2
                    outVar[:,:,jt,ic2] = outVar[:,:,jt,ic1]
                if ncorr == 2:
                    for jt in range(jcmax2):
                        outVar[:,:,jt,ic12] = (outVar[:,:,jt,ic12-1]+outVar[:,:,jt,ic22+1])/2
                        outVar[:,:,jt,ic22] = outVar[:,:,jt,ic12]
            # Correct Bowl properties
            if iv =='isondepthg':
                vardepth = npy.reshape(outVar,(levN,latN*lonN))
                #print 'test'
                #print outVar[:,:,j2d,i2d]
                #print vardepth[:,ij2d]
                # find values of surface points
                vardepthBowl = npy.min(npy.reshape(outVar,(levN,latN*lonN)),axis=0)
                vardepthBowlTile = npy.repeat(vardepthBowl,levN,axis=0).reshape((latN*lonN,levN)).transpose()
                #print vardepthBowlTile.shape
                #print vardepthBowl[ij2d], vardepthBowlTile[:,ij2d]
                levs = outVar.getAxisList()[1][:]
                #print 'levs',levs
                levs3d  = mv.reshape(npy.tile(levs,latN*lonN),(latN*lonN,levN)).transpose()
                varsigmaBowl = npy.max(npy.where(vardepth == vardepthBowlTile,levs3d,0),axis=0)
                #print varsigmaBowl[ij2d],levs3d[:,ij2d]

            elif iv == 'sog':
                varsog = npy.reshape(outVar,(levN,latN*lonN))
                varsoBowl = npy.max(npy.where(vardepth == vardepthBowlTile,varsog,0),axis=0)
                #print varsoBowl[ij2d], varsog[:,ij2d]
                #print vardepth[:,ij2d],vardepthBowlTile[:,ij2d]
                del (varsog); gc.collect()
            elif iv =='thetaog':
                varthetao = npy.reshape(outVar,(levN,latN*lonN))
                varthetaoBowl = npy.max(npy.where(vardepth == vardepthBowlTile,varthetao,-1000),axis=0)
                #print varthetaoBowl[ij2d],varthetao[:,ij2d]
                del (varthetao); gc.collect()
            # Write
            fo.write(outVar.astype('float32'), extend = 1, index = it)
            fo.sync()
        del (vardepth); gc.collect()
        # 2D variables and correct isondepthg = 0
        for iv in varList2D:
            outVar = fi(iv,time = slice(it,it+1))
            # Correct for longitude interpolation issue
            if corr_long:
                for jt in range(jcmax):
                    outVar[:,jt,ic1] = (outVar[:,jt,ic1-1]+outVar[:,jt,ic2+1])/2
                    outVar[:,jt,ic2] = outVar[:,jt,ic1]
                if ncorr == 2:
                    for jt in range(jcmax2):
                        outVar[:,jt,ic12] = (outVar[:,jt,ic12-1]+outVar[:,jt,ic22+1])/2
                        outVar[:,jt,ic22] = outVar[:,jt,ic12]
            # Correct for ptopsoxy < 30
            #print 'before',outVar[:,j2d,i2d]
            if iv == 'ptopsoxy':
                testso = npy.reshape(outVar,(latN*lonN)) < 30.
                #print 'testdepth', testdepth[ij2d]
                #print npy.argwhere(testdepth)[0:10]/lonN, npy.argwhere(testdepth)[0:10]-npy.argwhere(testdepth)[0:10]/lonN*lonN
                outVar.data[...] = npy.where(testso,varsoBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            elif iv == 'ptopdepthxy':
                outVar.data[...] = npy.where(testso,vardepthBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            elif iv == 'ptopthetaoxy':
                outVar.data[...] = npy.where(testso,varthetaoBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            elif iv == 'ptopsigmaxy':
                outVar.data[...] = npy.where(testso,varsigmaBowl,npy.reshape(outVar,(latN*lonN))).reshape(outVar.shape)[...]
            #print 'after',outVar[:,j2d,i2d]

            # Write
            fo.write(outVar.astype('float32'), extend = 1, index = it)
            fo.sync()

    fi.close()
    fo.close()

# testing

#model = 'CCSM4'
#idxcorr=[139,140,145]
#ncorr = 1
#inFile = 'cmip5.CCSM4.historical24.r1i1p1.an.ocn.Omon.density.ver-v20121128.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.CCSM4.historical24.outtest.nc'

#model = 'CanESM2'
#idxcorr=[179,180,180]
#ncorr = 1
#inFile = 'cmip5.CanESM2.historical24.r1i1p1.an.ocn.Omon.density.ver-1.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.CanESM2.historical24.outtest.nc'

#model = 'IPSL-CM5A-LR'
#idxcorr=[0,0,0]
#ncorr=1
#inFile = 'cmip5.IPSL-CM5A-LR.historical24.r1i1p1.an.ocn.Omon.density.ver-v20111119.nc'
#inDir = '/Users/ericg/Projets/Density_bining/Raw_testing'
#outFile = 'cmip5.IPSL-CM5A-LR.historical24.outtest.nc'


#model = 'Ishii'
#idxcorr=[[359,359,39],[180,180,180]]
#idxcorr=[359,359,39]
#ncorr = 1
#inFile = 'obs.Ishii.historical.r0i0p0.an.ocn.Omon.density.ver-1.latestX.nc'
#inDir='/Volumes/hciclad/data/Density_binning/Prod_density_obs_april16'
#outFile = 'obs.Ishii.historical.r0i0p0.an.ocn.Omon.density.ver-1.latestXCorr.nc'

#model = 'EN4'
#idxcorr=[[359,359,39],[180,180,180]]
#idxcorr=[359,359,39]
#ncorr = 2
#inFile = 'obs.EN4.historical.r0i0p0.mo.ocn.Omon.density.ver-1.latestX.nc'
#inDir='/Volumes/hciclad/data/Density_binning/Prod_density_obs_april16'
#outFile = 'obs.EN4.historical.r0i0p0.mo.ocn.Omon.density.ver-1.latestXCorr.nc'

#outDir = inDir


#correctFile(idxcorr, ncorr, inFile, inDir, outFile, outDir)
#!/usr/local/cdat5.2/bin/python
# v2 adds seasonal and quarters calculations for ptot, r02, sdii function
# For sdii it's neccesary run everything again

"""Module for computing precipitation extreme stats mostly using CDO utilities"""

from sys import exit
from os import path, system, mkdir
from cdms2 import setNetcdfShuffleFlag, setNetcdfDeflateLevelFlag, setNetcdfDeflateFlag
from string import split
from datetime import datetime
from daily_stats_cdms_utils import MosaicFiles

setNetcdfShuffleFlag(0)
setNetcdfDeflateFlag(0)
setNetcdfDeflateLevelFlag(0)


RootDir = '/mnt/BCSD'

OUTROOT = '/mnt/data_climatewizard/AR5_Global_Daily_25k/out_stats'
if not path.isdir(OUTROOT):
    mkdir(OUTROOT)

OUTTEMP = '/mnt/workspace_cluster_12/ClimateWizard/AR5_Global_Daily_25k'#'/mnt/data_climatewizard/AR5_Global_Daily_25k'
if not path.isdir(OUTTEMP):
    mkdir(OUTTEMP)

# added as fgobal institution attribute to output files
txtinst = "Santa Clara U.,Climate Central,The Nature Conservancy,International Center for Tropical Agriculture"
Example #57
0
    def write_plot_data( self, format="", where="" ):
        """Writes the plot's data in the specified file format and to the location given."""
        if format=="" or format=="NetCDF" or format=="NetCDF file":
            format = "NetCDF file"
        elif format=="JSON string":
            pass
        elif format=="JSON file":
            pass
        else:
            logger.warning("write_plot_data cannot recognize format name %s",format)
            logger.warning("will write a NetCDF file.")
            format = "NetCDF file"

        filename = self.outfile( format, where )

        if format=="NetCDF file":
            value=0
            cdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1
            cdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included

            writer = cdms2.open( filename, 'w' )    # later, choose a better name and a path!
            store_provenance(writer)
        elif format=="JSON file":
            logger.error("JSON file not implemented yet")
        elif format=="JSON string":
            return json.dumps(self,cls=DiagsEncoder)

        writer.source = "UV-CDAT Diagnostics"
        writer.presentation = self.ptype
        plot_these = []
        for zax in self.vars:
            try:
                if not hasattr(zax,'filetableid'):
                    zax.filetableid = zax.filetable.id()
                del zax.filetable  # we'll write var soon, and can't write a filetable
                if hasattr(zax,'filetable2'):
                    zax.filetable2id = zax.filetable2.id()
                    del zax.filetable2 # we'll write var soon, and can't write a filetable
            except:
                pass
            try:
                zax._filetableid= zax.filetableid  # and the named tuple ids aren't writeable as such
                zax.filetableid= str(zax.filetableid)  # and the named tuple ids aren't writeable as such
            except:
                pass
            try:
                zax._filetable2id= zax.filetable2id  # and the named tuple ids aren't writeable as such
                zax.filetable2id= str(zax.filetable2id)  # and the named tuple ids aren't writeable as such
            except:
                pass
            for ax in zax.getAxisList():
                try:
                    del ax.filetable
                except:
                    pass
            writer.write( zax )
            plot_these.append( str(seqgetattr(zax,'id','')) )
        writer.plot_these = ' '.join(plot_these)
        # Once the finalized method guarantees that varmax,varmin are numbers...
        #if self.finalized==True:
        #    writer.varmax = self.varmax
        #    writer.varmin = self.varmin

        writer.close()
        return [filename]
Example #58
0
#!/usr/bin/env python
# Adapted for numpy/ma/cdms2 by convertcdms.py
import numpy.oldnumeric as Numeric
# Input arguments to the script

import sys, numpy.ma as MA, string, cdms2 as cdms, cdtime, cdutil, numpy.oldnumeric as Numeric
from cdtime import reltime
# reset DefaultCalendar below
cdtime.DefaultCalendar = cdtime.NoLeapCalendar

cdms.setNetcdfShuffleFlag(0)
cdms.setNetcdfDeflateFlag(0)
cdms.setNetcdfDeflateLevelFlag(0)

pth = sys.argv[1]
infile = sys.argv[2]
varin = sys.argv[3]
ABfile = sys.argv[4]
outfile = 'x' + sys.argv[2]
varout = varin
psfile = sys.argv[5]
levels = string.join(sys.argv[6:])

Avar = 'hyam'
Bvar = 'hybm'
P0var = 'P0'

varps = 'PS'
levels = string.join(sys.argv[6:])
levels = eval(levels)  # evaluate the string
levels = MA.asarray(list(levels)) * 100.
def mmeAveMsk2D(listFiles,
                years,
                inDir,
                outDir,
                outFile,
                timeInt,
                mme,
                timeBowl,
                ToeType,
                debug=True):
    '''
    The mmeAveMsk2D() function averages rhon/lat density bined files with differing masks
    It ouputs
     - the MME
     - a percentage of non-masked bins
     - the sign agreement of period2-period1 differences
     - ToE per run and for MME

    Author:    Eric Guilyardi : [email protected]

    Created on Tue Nov 25 13:56:20 CET 2014

    Inputs:
    -------
    - listFiles(str)         - the list of files to be averaged
    - years(t1,t2)           - years for slice read
    - inDir[](str)           - input directory where files are stored (add histnat as inDir[1] for ToE)
    - outDir(str)            - output directory
    - outFile(str)           - output file
    - timeInt(2xindices)     - indices of init period to compare with (e.g. [1,20])
    - mme(bool)              - multi-model mean (will read in single model ensemble stats)
    - timeBowl               - either time 'mean' or time 'max' bowl used to mask out bowl
    - ToeType(str)           - ToE type ('F': none, 'histnat')
                               -> requires running first mm+mme without ToE to compute Stddev
    - debug <optional>       - boolean value

    Notes:
    -----
    - EG 25 Nov 2014   - Initial function write
    - EG 27 Nov 2014   - Rewrite with loop on variables
    - EG 06 Dec 2014   - Added agreement on difference with init period - save as <var>Agree
    - EG 07 Dec 2014   - Read bowl to remove points above bowl - save as <var>Bowl
    - EG 19 Apr 2016   - ToE computation (just for 2D files)
    - EG 07 Oct 2016   - add 3D file support
    - EG 21 Nov 2016   - move 3D support to new function
    - EG 10 jan 2017   - added timeBowl option

    - TODO :
                 - remove loops
                 - add computation of ToE per model (toe 1 and toe 2) see ticket #50
                 - add isonhtc (see ticket #48)
    '''

    # CDMS initialisation - netCDF compression
    comp = 1  # 0 for no compression
    cdm.setNetcdfShuffleFlag(comp)
    cdm.setNetcdfDeflateFlag(comp)
    cdm.setNetcdfDeflateLevelFlag(comp)
    cdm.setAutoBounds('on')
    # Numpy initialisation
    npy.set_printoptions(precision=2)

    if debug:
        debug = True
    else:
        debug = False
    # File dim and grid inits
    t1 = years[0]
    t2 = years[1]
    if t2 <= 0:
        useLastYears = True
        t2 = -t2
    else:
        useLastYears = False
    t10 = t1
    t20 = t2
    # Bound of period average to remove
    peri1 = timeInt[0]
    peri2 = timeInt[1]
    fi = cdm.open(inDir[0] + '/' + listFiles[0])
    isond0 = fi['isondepth']
    # Create variable handle
    # Get grid objects
    axesList = isond0.getAxisList()
    sigmaGrd = isond0.getLevel()
    latN = isond0.shape[3]
    levN = isond0.shape[2]
    basN = isond0.shape[1]
    varsig = 'ptopsigma'

    # Declare and open files for writing
    if os.path.isfile(outDir + '/' + outFile):
        os.remove(outDir + '/' + outFile)
    outFile_f = cdm.open(outDir + '/' + outFile, 'w')

    # Testing mme with less models
    #listFiles=listFiles[0:4]

    #timN = isond0.shape[0]
    timN = t2 - t1
    runN = len(listFiles)

    print ' Number of members:', len(listFiles)

    valmask = isond0.missing_value[0]
    varList = [
        'isondepth', 'isonpers', 'isonso', 'isonthetao', 'isonthick', 'isonvol'
    ]
    varFill = [0., 0., valmask, valmask, 0., 0.]
    # init arrays (2D rho/lat)
    percent = npy.ma.ones([runN, timN, basN, levN, latN], dtype='float32') * 0.
    #minbowl  = npy.ma.ones([basN,latN], dtype='float32')*1000.
    varbowl = npy.ma.ones([runN, timN, basN, latN], dtype='float32') * 1.
    #varList = ['isondepth']
    #print ' !!! ### Testing one variable ###'
    #varList = ['isonthetao']

    # init time axis
    time = cdm.createAxis(npy.float32(range(timN)))
    time.id = 'time'
    time.units = 'years since 1861'
    time.designateTime()
    # init ensemble axis
    ensembleAxis = cdm.createAxis(npy.float32(range(runN)))
    ensembleAxis.id = 'members'
    ensembleAxis.units = 'N'

    # loop on variables
    for iv, var in enumerate(varList):

        # Array inits (2D rho/lat 3D rho/lat/lon)
        #shapeR = [basN,levN,latN]
        isonvar = npy.ma.ones([runN, timN, basN, levN, latN],
                              dtype='float32') * valmask
        print('isonvar shape: ', isonvar.shape)
        vardiff, varbowl2D = [
            npy.ma.ones([runN, timN, basN, levN, latN], dtype='float32')
            for _ in range(2)
        ]
        varstd, varToE1, varToE2 = [
            npy.ma.ones([runN, basN, levN, latN], dtype='float32') * valmask
            for _ in range(3)
        ]
        varones = npy.ma.ones([runN, timN, basN, levN, latN],
                              dtype='float32') * 1.

        print ' Variable ', iv, var
        # loop over files to fill up array
        for i, file in enumerate(listFiles):
            ft = cdm.open(inDir[0] + '/' + file)
            model = file.split('.')[1]
            timeax = ft.getAxis('time')
            file1d = replace(inDir[0] + '/' + file, '2D', '1D')
            if os.path.isfile(file1d):
                f1d = cdm.open(file1d)
            else:
                print 'ERROR:', file1d, 'missing (if mme, run 1D first)'
                sys.exit(1)
            tmax = timeax.shape[0]
            if i == 0:
                tmax0 = tmax
            #adapt [t1,t2] time bounds to piControl last NN years
            if useLastYears:
                t1 = tmax - t20
                t2 = tmax
            else:
                if tmax != tmax0:
                    print 'wrong time axis: exiting...'
                    return

            # read array
            # loop over time/density for memory management
            for it in range(timN):
                t1r = t1 + it
                t2r = t1r + 1
                isonRead = ft(var, time=slice(t1r, t2r))
                if varFill[iv] != valmask:
                    isonvar[i, it, ...] = isonRead.filled(varFill[iv])
                else:
                    isonvar[i, it, ...] = isonRead
            # compute percentage of non-masked points accros MME
            if iv == 0:
                maskvar = mv.masked_values(isonRead.data, valmask).mask
                percent[i, ...] = npy.float32(npy.equal(maskvar, 0))
            if mme:
                # if mme then just accumulate Bowl, Agree fields
                varst = var + 'Agree'
                vardiff[i, ...] = ft(varst, time=slice(t1, t2))
                varb = var + 'Bowl'
                varbowl2D[i, ...] = ft(varb, time=slice(t1, t2))
            else:
                # Compute difference with average of first initN years
                varinit = cdu.averager(isonvar[i, peri1:peri2, ...], axis=0)
                for t in range(timN):
                    vardiff[i, t, ...] = isonvar[i, t, ...] - varinit
                vardiff[i, ...].mask = isonvar[i, ...].mask
                # Read bowl and truncate 2D field above bowl
                if iv == 0:
                    bowlRead = f1d(varsig, time=slice(t1, t2))
                    varbowl[i, ...] = bowlRead
                # Compute Stddev
                varstd[i, ...] = npy.ma.std(isonvar[i, ...], axis=0)
                # Compute ToE
                if ToeType == 'histnat':
                    # Read mean and Std dev from histnat
                    if i == 0:
                        filehn = glob.glob(inDir[1] + '/cmip5.' + model +
                                           '.*zon2D*')[0]
                        #filehn = replace(outFile,'historical','historicalNat')
                        fthn = cdm.open(filehn)
                        varmeanhn = fthn(var)
                        varst = var + 'Std'
                        varmaxstd = fthn(varst)
                    toemult = 1.
                    signal = npy.reshape(isonvar[i, ...] - varmeanhn,
                                         (timN, basN * levN * latN))
                    noise = npy.reshape(varmaxstd, (basN * levN * latN))
                    varToE1[i,
                            ...] = npy.reshape(findToE(signal, noise, toemult),
                                               (basN, levN, latN))
                    toemult = 2.
                    varToE2[i,
                            ...] = npy.reshape(findToE(signal, noise, toemult),
                                               (basN, levN, latN))
            ft.close()
            f1d.close()
        # <-- end of loop on files

        # Compute percentage of bin presence
        # Only keep points where percent > 50%
        if iv == 0:
            percenta = (cdu.averager(percent, axis=0)) * 100.
            percenta = mv.masked_less(percenta, 50)
            percentw = cdm.createVariable(
                percenta,
                axes=[time, axesList[1], axesList[2], axesList[3]],
                id='isonpercent')
            percentw._FillValue = valmask
            percentw.long_name = 'percentage of MME bin'
            percentw.units = '%'
            outFile_f.write(percentw.astype('float32'))

        # Sign of difference
        if mme:
            vardiffsgSum = cdu.averager(vardiff, axis=0)
            vardiffsgSum = cdm.createVariable(
                vardiffsgSum,
                axes=[time, axesList[1], axesList[2], axesList[3]],
                id='foo')
            vardiffsgSum = maskVal(vardiffsgSum, valmask)
            vardiffsgSum.mask = percentw.mask
        else:
            vardiffsg = npy.copysign(varones, vardiff)
            # average signs
            vardiffsgSum = cdu.averager(vardiffsg, axis=0)
            vardiffsgSum = mv.masked_greater(vardiffsgSum, 10000.)
            vardiffsgSum.mask = percentw.mask
            vardiffsgSum._FillValue = valmask

        # average variable accross members
        isonVarAve = cdu.averager(isonvar, axis=0)
        isonVarAve = cdm.createVariable(
            isonVarAve,
            axes=[time, axesList[1], axesList[2], axesList[3]],
            id='foo')
        # mask
        if varFill[iv] == valmask:
            isonVarAve = maskVal(isonVarAve, valmask)

        isonVarAve.mask = percentw.mask

        # Only keep points with rhon >  bowl-delta_rho
        delta_rho = 0.
        if mme:  # start from average of <var>Agree
            isonVarBowl = cdu.averager(varbowl2D, axis=0)
            isonVarBowl = cdm.createVariable(
                isonVarBowl,
                axes=[time, axesList[1], axesList[2], axesList[3]],
                id='foo')
            isonVarBowl = maskVal(isonVarBowl, valmask)
            isonVarBowl.mask = percentw.mask
            # Compute intermodel stddev
            isonVarStd = statistics.std(varbowl2D, axis=0)
            isonVarStd = cdm.createVariable(
                isonVarStd,
                axes=[time, axesList[1], axesList[2], axesList[3]],
                id='foo')
            isonVarStd = maskVal(isonVarStd, valmask)
            isonVarStd.mask = percentw.mask
            if iv == 0:
                # Read mulitmodel sigma on bowl and average in time
                file1d = replace(outDir + '/' + outFile, '2D', '1D')
                if os.path.isfile(file1d):
                    f1d = cdm.open(file1d)
                else:
                    print 'ERROR:', file1d, 'missing (if mme, run 1D first)'
                    sys.exit(1)
                bowlRead = f1d(varsig, time=slice(t1, t2))
                f1d.close()
                siglimit = cdu.averager(bowlRead, axis=0) - delta_rho
            # TODO: remove loop by building global array with 1/0
            for il in range(latN):
                for ib in range(basN):
                    #if ib == 2:
                    #    print il, siglimit[ib,il]
                    if siglimit[ib, il] < valmask / 1000.:
                        # if mme bowl density defined, mask above bowl
                        index = (npy.argwhere(sigmaGrd[:] >= siglimit[ib, il]))
                        isonVarBowl[:, ib, 0:index[0], il].mask = True
                        isonVarStd[:, ib, 0:index[0], il].mask = True
                        vardiffsgSum[:, ib, 0:index[0], il].mask = True
                    else:
                        # mask all points
                        isonVarBowl[:, ib, :, il].mask = True
                        isonVarStd[:, ib, :, il].mask = True
                        vardiffsgSum[:, ib, :, il].mask = True
        else:
            isonVarBowl = isonVarAve * 1.  # start from variable
            isonVarStd = isonVarAve * 1.  # start from variable
            if iv == 0:
                siglimit = cdu.averager(varbowl,
                                        axis=0)  # average accross members
                # Average bowl in time
                if timeBowl == 'mean':
                    siglimit = cdu.averager(siglimit, axis=0) - delta_rho
                # or take largest sigma over time
                else:
                    siglimit = npy.ma.max(siglimit, axis=0) - delta_rho
            # TODO: remove loop by building global array with 1/0
            for il in range(latN):
                for ib in range(basN):
                    if siglimit[ib, il] < valmask / 1000.:
                        # if bowl density defined, mask above bowl
                        index = (npy.argwhere(sigmaGrd[:] >= siglimit[ib, il])
                                 )[:, 0]  #Add [:,0] for python Yona
                        #import code
                        #code.interact(banner='index', local=dict(locals(), **globals()))
                        isonVarBowl[:, ib, 0:index[0], il].mask = True
                        vardiffsgSum[:, ib, 0:index[0], il].mask = True
                    else:
                        # mask all points
                        vardiffsgSum[:, ib, :, il].mask = True

            isonVarBowl = maskVal(isonVarBowl, valmask)
            # Find max of Std dev of all members
            isonVarStd = npy.ma.max(varstd, axis=0)
            # mask
            if varFill[iv] == valmask:
                isonVarStd = maskVal(isonVarStd, valmask)

        # Write
        isonave = cdm.createVariable(
            isonVarAve,
            axes=[time, axesList[1], axesList[2], axesList[3]],
            id=isonRead.id)
        isonave.long_name = isonRead.long_name
        isonave.units = isonRead.units
        isonavediff = cdm.createVariable(
            vardiffsgSum,
            axes=[time, axesList[1], axesList[2], axesList[3]],
            id=isonRead.id + 'Agree')
        isonavediff.long_name = isonRead.long_name
        isonavediff.units = isonRead.units
        isonavebowl = cdm.createVariable(
            isonVarBowl,
            axes=[time, axesList[1], axesList[2], axesList[3]],
            id=isonRead.id + 'Bowl')
        isonavebowl.long_name = isonRead.long_name
        isonavebowl.units = isonRead.units
        if not mme:
            isonmaxstd = cdm.createVariable(
                isonVarStd,
                axes=[axesList[1], axesList[2], axesList[3]],
                id=isonRead.id + 'Std')
            isonmaxstd.long_name = isonRead.long_name
            isonmaxstd.units = isonRead.units

        outFile_f.write(isonave.astype('float32'))
        outFile_f.write(isonavediff.astype('float32'))
        outFile_f.write(isonavebowl.astype('float32'))
        if not mme:
            outFile_f.write(isonmaxstd.astype('float32'))

        if ToeType == 'histnat':
            isontoe1 = cdm.createVariable(
                varToE1,
                axes=[ensembleAxis, axesList[1], axesList[2], axesList[3]],
                id=isonRead.id + 'ToE1')
            isontoe1.long_name = 'ToE 1 for ' + isonRead.long_name
            isontoe1.units = 'Year'
            isontoe2 = cdm.createVariable(
                varToE2,
                axes=[ensembleAxis, axesList[1], axesList[2], axesList[3]],
                id=isonRead.id + 'ToE2')
            isontoe2.long_name = 'ToE 2 for ' + isonRead.long_name
            isontoe2.units = 'Year'
            outFile_f.write(isontoe1.astype('float32'))
            outFile_f.write(isontoe2.astype('float32'))

        if mme:
            isonvarstd = cdm.createVariable(
                isonVarStd,
                axes=[time, axesList[1], axesList[2], axesList[3]],
                id=isonRead.id + 'ModStd')
            isonvarstd.long_name = isonRead.long_name + ' intermodel std'
            isonvarstd.units = isonRead.units
            outFile_f.write(isonvarstd.astype('float32'))

    # <--- end of loop on variables

    outFile_f.close()
    fi.close()