예제 #1
0
      def test_smooth(self):
        cdo = Cdo()
        if (parse_version(cdo.version()) >= parse_version('1.7.2') and cdo.hasNetcdf):
          ifile = "-select,level=0 " + DATA_DIR + '/icon/phc.nc'
          cdo = Cdo()
          cdo.debug = DEBUG
          #cdo.merge(input='/home/ram/data/icon/input/phc3.0/PHC__3.0__TempO__1x1__annual.nc /home/ram/data/icon/input/phc3.0/PHC__3.0__SO__1x1__annual.nc',
          #          output=ifile,
          #          options='-O')
          smooth = cdo.smooth(input=" -sellonlatbox,0,30,0,90 -chname,SO,s,TempO,t " + ifile, returnMaArray='s',options='-f nc')
          plot(np.flipud(smooth[0,:,:]),ofile='smooth',title='smooth')

          smooth2 = cdo.smooth('nsmooth=2',input="-sellonlatbox,0,30,0,90 -chname,SO,s,TempO,t " + ifile, returnMaArray='s',options='-f nc')
          plot(np.flipud(smooth2[0,:,:]),ofile='smooth2',title='smooth,nsmooth=2')

          smooth4 = cdo.smooth('nsmooth=4',input="-sellonlatbox,0,30,0,90 -chname,SO,s,TempO,t " + ifile, returnMaArray='s',options='-f nc')
          plot(np.flipud(smooth4[0,:,:]),ofile='smooth4',title='smooth,nsmooth=4')

          smooth9 = cdo.smooth9(input="-sellonlatbox,0,30,0,90 -chname,SO,s,TempO,t " + ifile, returnMaArray='s',options='-f nc')
          plot(np.flipud(smooth9[0,:,:]),ofile='smooth9',title='smooth9')

          smooth3deg = cdo.smooth('radius=6deg',input="-sellonlatbox,0,30,0,90 -chname,SO,s,TempO,t " + ifile, returnMaArray='s',options='-f nc')
          plot(np.flipud(smooth3deg[0,:,:]),ofile='smooth3deg',title='smooth,radius=6deg')

          smooth20 = cdo.smooth('nsmooth=20',input="-sellonlatbox,0,30,0,90 -chname,SO,s,TempO,t " + ifile, returnMaArray='s',options='-f nc')
          plot(np.flipud(smooth20[0,:,:]),ofile='smooth20',title='smooth,nsmooth=20')
예제 #2
0
    def test_fillmiss(self):
        cdo = Cdo()

        if not SHOW:
          return
        if cdo.hasNetcdf:
          if 'CDO' in os.environ:
            cdo.setCdo(os.environ.get('CDO'))

          cdo.debug = DEBUG
          rand = cdo.setname('v',input = '-random,r25x25 ', options = ' -f nc')

          missRange = '0.25,0.85'
          withMissRange = tempfile.NamedTemporaryFile(delete=True,prefix='cdoPy').name
          arOrg = cdo.copy(input = rand,returnMaArray = 'v')
          arWmr = cdo.setrtomiss(missRange,input = rand,output = withMissRange,returnMaArray='v')
          arFm  = cdo.fillmiss(            input = withMissRange,returnMaArray = 'v')
          arFm1s= cdo.fillmiss2(2,         input = withMissRange,returnMaArray = 'v')
          if 'setmisstonn' in cdo.operators:
            arM2NN= cdo.setmisstonn(         input = withMissRange,returnMaArray = 'v')

          pool = multiprocessing.Pool(8)
          pool.apply_async(plot, (arOrg, ),{"title":'org'      })#ofile='fmOrg')
          pool.apply_async(plot, (arWmr, ),{"title":'missing'  })#ofile='fmWmr')
          pool.apply_async(plot, (arFm,  ),{"title":'fillmiss' })#ofile= 'fmFm')
          pool.apply_async(plot, (arFm1s,),{"title":'fillmiss2'})#ofile='fmFm2')
          if 'setmisstonn' in cdo.operators:
            pool.apply_async(plot, (arM2NN,), {"title":'setmisstonn'})#, ofile='fmsetMNN')

          pool.close()
          pool.join()

        else:
          print("test_fillmiss disables because of missing python-netCDF4")
예제 #3
0
 def test_longChain(self):
   cdo = Cdo()
   if cdo.hasNetcdf:
     ifile = "-enlarge,global_0.3 -settaxis,2000-01-01 -expr,'t=sin(for*3.141529/180.0)' -for,1,10"
     t = cdo.fldmax(input="-div -sub -timmean -seltimestep,2,3 %s -seltimestep,1 %s -gridarea %s"%(ifile,ifile,ifile),
         returnMaArray="t")
     self.assertTrue(abs(8.9813e-09 - t[0][0][0]) < 1.0e-10, 'Found non-zero diff')
예제 #4
0
 def test_readCdf(self):
   cdo = Cdo()
   input= "-settunits,days  -setyear,2000 -for,1,4"
   cdfFile = cdo.copy(options="-f nc",input=input)
   if cdo.hasNetcdf:
     cdf     = cdo.readCdf(cdfFile)
     self.assertEqual(sorted(['lat','lon','for','time']),sorted(list(cdf.variables.keys())))
예제 #5
0
 def test_CDO_options(self):
     cdo = Cdo()
     cdo.debug = DEBUG
     names = cdo.showname(input = "-stdatm,0",options = "-f nc")
     self.assertEqual(["P T"],names)
     if cdo.hasLib("sz"):
       ofile = cdo.topo(options = "-z szip")
예제 #6
0
 def test_verticalLevels(self):
     cdo = Cdo()
     # check, if a given input files has vertival layers of a given thickness array
     targetThicknesses = [50.0,  100.0,  200.0,  300.0,  450.0,  600.0,  800.0, 1000.0, 1000.0, 1000.0]
     sourceLevels = "25 100 250 500 875 1400 2100 3000 4000 5000".split()
     thicknesses = cdo.thicknessOfLevels(input = "-selname,T " + cdo.stdatm(','.join(sourceLevels),options = "-f nc"))
     self.assertEqual(targetThicknesses,thicknesses)
예제 #7
0
 def test_bndLevels(self):
     cdo = Cdo()
     ofile = cdo.stdatm(25,100,250,500,875,1400,2100,3000,4000,5000,options = "-f nc")
     self.assertEqual([0, 50.0, 150.0, 350.0, 650.0, 1100.0, 1700.0, 2500.0, 3500.0, 4500.0, 5500.0],
                 cdo.boundaryLevels(input = "-selname,T " + ofile))
     self.assertEqual([50.0, 100.0, 200.0, 300.0, 450.0, 600.0, 800.0, 1000.0, 1000.0, 1000.0],
                  cdo.thicknessOfLevels(input = ofile))
예제 #8
0
    def test_outputOperators(self):
        cdo = Cdo()
        levels = cdo.showlevel(input = "-stdatm,0")
        info   = cdo.sinfo(input = "-stdatm,0")
        self.assertEqual([0,0],list(map(float,levels)))
        self.assertEqual("GRIB",info[0].split(' ')[-1])

        values = cdo.outputkey("value",input="-stdatm,0")[1::]
        self.assertEqual(["1013.25", "288"],values)
        values = cdo.outputkey("value",input="-stdatm,0,10000")[1::]
        self.assertEqual(["1013.2", "271.9", "288.0", "240.6"],['{:.1f}'.format(float(s)) for s in values])
        values = cdo.outputkey("lev",input="-stdatm,0,10000")[1::]
        self.assertEqual(["0", "10000","0", "10000"],values)

        # test autoSplit usage
        levels = cdo.showlevel(input="-stdatm,0,10,20",autoSplit=' ')
        self.assertEqual([['0','10','20'],['0','10','20']],levels)

        timesExpected = ['2001-01-01T12:00:00',
          '2001-01-01T13:00:00',
          '2001-01-01T14:00:00',
          '2001-01-01T15:00:00',
          '2001-01-01T16:00:00',
          '2001-01-01T17:00:00',
          '2001-01-01T18:00:00',
          '2001-01-01T19:00:00',
          '2001-01-01T20:00:00',
          '2001-01-01T21:00:00']
        self.assertEqual(timesExpected,
                         cdo.showtimestamp(input="-settaxis,2001-01-01,12:00,1hour -for,1,10", autoSplit='  '))

        self.assertEqual(['P T'],cdo.showname(input="-stdatm,0"))
        self.assertEqual(['P','T'],cdo.showname(input="-stdatm,0",autoSplit=' '))
예제 #9
0
def seacyc(archive, simulation, method='base'):
  """
  substracts the seasonal cycle
  :param archive: netCDF file containing the reference period
  :param simulation: netCDF file containg the period to be analysed
  :param method: method to generat the seasonal cycle files
                 base = seasonal cycle generated from reference period
                 sim = seasonal cycle generated from period to be analysed
                 own = seasonal cycle generated for both time windows
                 
  """
  from shutil import copy
  from cdo import Cdo 
  cdo = Cdo()

 # calculate seasonal cycle and add an additional file into the working directory 
 # base) cdo -s ydaymean ${basedir}base_${varname}_${dtrstr}${region}_${datestring1}_${namestring}.nc seasoncyc_base.nc
 #       cp seasoncyc_base.nc seasoncyc_sim.nc ;;
 # sim)  cdo -s ydaymean ${simdir}sim_${varname}_${dtrstr}${region}_${datestring2}_${namestring}.nc seasoncyc_sim.nc
 #       cp seasoncyc_sim.nc seasoncyc_base.nc ;;
 # own)  cdo -s ydaymean ${basedir}base_${varname}_${dtrstr}${region}_${datestring1}_${namestring}.nc seasoncyc_base.nc
 #       cdo -s ydaymean ${simdir}sim_${varname}_${dtrstr}${region}_${datestring2}_${namestring}.nc seasoncyc_sim.nc ;;
  if method == 'base':
    seasoncyc_base = cdo.ydaymean(input=archive, output='seasoncyc_base.nc' )
    seasoncyc_sim = 'seasoncyc_sim.nc'
    copy(seasoncyc_base, seasoncyc_sim)
  if method == 'sim':
    seasoncyc_sim  = cdo.ydaymean(input=simulation, output='seasoncyc_sim.nc' )
    seasoncyc_base = 'seasoncyc_base.nc'
    copy(seasoncyc_sim, seasoncyc_base)
  if method == 'own':
    seasoncyc_base = cdo.ydaymean(input=archive, output='seasoncyc_base.nc' )
    seasoncyc_sim  = cdo.ydaymean(input=simulation, output='seasoncyc_sim.nc' )
  return seasoncyc_base, seasoncyc_sim
예제 #10
0
 def testDbg(self):
     if not 'DEBUG' in os.environ:
       cdo = Cdo()
       self.assertEqual(False,cdo.debug)
       cdo.debug = True
       self.assertEqual(True,cdo.debug)
       cdo.debug = False
예제 #11
0
 def __getLonLat(self, ifile):
     """
     Get vectors with lon and lat values from a netdf file using cdo.griddes
     Was introduced because we were using a damaged grid
     lon,lat valued are located in the center of a gridbox
     
     :param ifile: netcdf fn
     :result: lon,lat vectors
     """
     def searchGriddes(grid, needle):
         tmp = [x for x in grid if x.find(needle) != -1]
         return float(tmp[0].split(' ')[-1])
     
     from cdo import Cdo
     cdo = Cdo()  
     grid = cdo.griddes(input=ifile)
     try:
         xinc = searchGriddes(grid, 'xinc')
         xsize = searchGriddes(grid, 'xsize')
         xfirst = searchGriddes(grid, 'xfirst')
     except:
         xinc = searchGriddes(grid, 'yinc')
         xsize = searchGriddes(grid, 'ysize')
         xfirst = searchGriddes(grid, 'yfirst')
     yfirst = searchGriddes(grid, 'yfirst')
     ysize = searchGriddes(grid, 'ysize')
     yinc = searchGriddes(grid, 'yinc')
     lon = np.arange(xfirst+xinc/2, xsize*xinc+xfirst+xinc/2, xinc, dtype=float)
     lat = np.arange(yfirst+yinc/2, ysize*yinc+yfirst+yinc/2, yinc, dtype=float)
     lon = np.arange(xfirst, xsize*xinc+xfirst, xinc, dtype=float)
     lat = np.arange(yfirst, ysize*yinc+yfirst, yinc, dtype=float)
     return lon, lat
예제 #12
0
 def test_cdf(self):
     cdo = Cdo()
     self.assertTrue(hasattr(cdo, "cdf"))# not in cdo.__dict__)
     if cdo.hasNetcdf:
       sum = cdo.fldsum(input = cdo.stdatm("0",options="-f nc"),returnCdf=True)
       self.assertEqual(1013.25,sum.variables["P"][:])
     else:
       self.assertRaises(ImportError,cdo.fldsum,input = cdo.stdatm("0",options="-f nc"),returnCdf=True)
예제 #13
0
 def testOps(self):
     cdo = Cdo()
     self.assertTrue("sinfov" in cdo.operators)
     self.assertTrue("for" in cdo.operators)
     self.assertTrue("mask" in cdo.operators)
     if (parse_version('1.7.0') >= parse_version(cdo.version())):
         self.assertTrue("studentt" in cdo.operators)
     self.assertTrue(len(cdo.operators) > 700)
예제 #14
0
 def test_readArray(self):
     cdo = Cdo()
     ifile = cdo.enlarge('r44x35',
                         input=' -stdatm,0,100,1000',
                         options='-f nc')
     if cdo.hasNetcdf:
       self.assertEqual((3,35,44), cdo.readArray(ifile, 'T').shape)
     else:
       self.assertRaises(ImportError,cdo.readArray,ifile,'T')
예제 #15
0
 def test_returnXDataset(self):
     cdo = Cdo()
     if cdo.hasXarray:
       sum = cdo.fldsum(input = cdo.stdatm("0",options="-f nc"),returnXDataset=True)
       self.assertEqual(1013.25,sum.variables["P"][:])
     else:
       self.assertRaises(ImportError,
           cdo.fldsum,
           input = '-topo',returnXDataset=True)
예제 #16
0
 def test_cdo_general(self):
     # test if cdos work in general
     cdo = Cdo()
     out_file = self._tmpdir + os.sep + 'cdo_test.nc'
     if os.path.exists(out_file):
         os.remove(out_file)
     cdo.timmean(options='-f nc', output=out_file, input=self.file)
     self.assertTrue(os.path.exists(out_file))
     if os.path.exists(out_file):
         os.remove(out_file)
예제 #17
0
    def test_returnMaArray(self):
        cdo = Cdo()
        cdo.debug = DEBUG
        if not cdo.hasNetcdf:
          print("no tests run for test_returnMaArray")
          return

        topo = cdo.topo(returnMaArray='topo')
        self.assertEqual(-1890.0,round(topo.mean()))
        self.assertEqual(259200,topo.count())
        bathy = cdo.setrtomiss(0,10000, input = "-topo",returnMaArray='topo')
        #print(bathy)
        self.assertEqual(173565,bathy.count())

        self.assertEqual(-3386.0,round(bathy.mean()))
        oro = cdo.setrtomiss(-10000,0, input = "-topo",returnMaArray='topo')
        self.assertEqual(1142.0,round(oro.mean()))
        self.assertEqual(85567,oro.count())
        bathy = cdo.remapnn('r2x2',input = "-topo", returnMaArray = 'topo')
        self.assertEqual(-4298.0,bathy[0,0])
        self.assertEqual(-2669.0,bathy[0,1])
        ta = cdo.remapnn('r2x2',input = "-topo", options = '-f nc')
        tb = cdo.subc(-2669.0,input = ta,options = '-f nc')
        withMask = cdo.div(input=ta+" "+tb,returnMaArray='topo')
        self.assertEqual('--',withMask[0,1].__str__())
        self.assertEqual(False,withMask.mask[0,0])
        self.assertEqual(False,withMask.mask[1,0])
        self.assertEqual(False,withMask.mask[1,1])
        self.assertEqual(True,withMask.mask[0,1])
예제 #18
0
    def test_xarray_output(self):
      cdo = Cdo()
      try:
        import xarray
      except:
        print("no xarray installation available!")
        return

      tArray = cdo.topo('global_10.0',returnXArray = 'topo')
      if DEBUG:
        print(tArray)
예제 #19
0
def main(in_folder = "/skynet1_rech3/huziy/EXP_0.1deg/DFS4.3_interpolated",
         out_folder = "/skynet3_rech1/huziy/NEMO_OFFICIAL/dev_v3_4_STABLE_2012/NEMOGCM/CONFIG/GLK/DFS4.3_clim"):

    create_links_to_forcing_files.create_links(expdir=out_folder, forcing_dir=in_folder)
    cdo_obj = Cdo()
    for vname in varnames:
        opath = os.path.join(out_folder, "{0}.nc".format(vname))
        inpaths = os.path.join(out_folder, "{0}_y*.nc".format(vname))
        cdo_obj.ensmean(input = inpaths, output = opath, options = "-f nc")

        print("processed: {0}".format(vname))
예제 #20
0
    def test_xdataset_output(self):
      cdo = Cdo()
      try:
        import xarray
      except:
        print("no xarray installation available!")
        return

      tDataset = cdo.topo('global_10.0',returnXDataset = True)
      if DEBUG:
        print(tDataset)
예제 #21
0
def main_improved(in_folder=""):
    print("Processing {}".format(in_folder))
    out_folder = in_folder + "_clim"

    if not os.path.isfile(out_folder):
        os.makedirs(out_folder)

    cdo_obj = Cdo()
    opath = os.path.join(out_folder, "mean_year.nc")
    inpaths = os.path.join(in_folder, "*")
    cdo_obj.ensmean(input=inpaths, output=opath, options="-f nc")
예제 #22
0
    def test_returnArray(self):
        cdo = Cdo()
        cdo.debug = DEBUG
        if cdo.hasNetcdf:
          self.assertRaises(LookupError, cdo.stdatm,0, returnArray = 'TT')
          temperature = cdo.stdatm(0,returnArray = 'T')
          self.assertEqual(288.0,temperature.flatten()[0])
#TODO       pressure = cdo.stdatm("0,1000",options = '-f nc -b F64',returnArray = 'P')
#TODO       self.assertEqual("[ 1013.25         898.54345604]",pressure.flatten().__str__())
        else:
          self.assertRaises(ImportError, cdo.stdatm,0, returnArray = 'TT')
          self.assertRaises(ImportError, cdo.stdatm,0, returnArray = 'T')
예제 #23
0
 def test_simple(self):
     cdo = Cdo()
     cdo.debug = DEBUG
     s   = cdo.sinfov(input="-topo",options="-f nc")
     s   = cdo.sinfov(input="-remapnn,r36x18 -topo",options="-f nc")
     f   = tempfile.NamedTemporaryFile(delete=True,prefix='cdoPy').name
     cdo.expr("'z=log(abs(topo)+1)*9.81'",input="-topo", output=f, options="-f nc")
     s   = cdo.infov(input=f)
     cdo.stdatm("0",output=f,options="-f nc")
     rm([f,])
예제 #24
0
      def test_icon_coords(self):
        cdo = Cdo()
        if cdo.hasNetcdf:
          ifile = DATA_DIR +'/icon/oce_AquaAtlanticBoxACC.nc'
          ivar  = 't_acc'
          varIn = cdo.readCdf(ifile)
          varIn = varIn.variables[ivar]
          expected =  u'clon clat'
          self.assertEqual(expected,varIn.coordinates)

          varOut =cdo.readCdf(cdo.selname(ivar,input=ifile))
          varOut = varOut.variables[ivar]
          expected =  u'clat clon'
          self.assertEqual(expected,varOut.coordinates)
def create_climo_file(fp_in, fp_out, t_start, t_end, variable):
    '''
    Generates climatological files from an input file and a selected time range

    Paramenters:
        f_in: input file path
        f_out: output file path
        t_start (datetime.datetime): start date of climo period
        t_end (datetime.datetime): end date of climo period
        variable (str): name of the variable which is being processed

    Requested date range MUST exist in the input file

    '''
    supported_vars = {
        'cddETCCDI', 'csdiETCCDI', 'cwdETCCDI', 'dtrETCCDI', 'fdETCCDI',
        'gslETCCDI', 'idETCCDI', 'prcptotETCCDI', 'r10mmETCCDI', 'r1mmETCCDI',
        'r20mmETCCDI', 'r95pETCCDI', 'r99pETCCDI', 'rx1dayETCCDI',
        'rx5dayETCCDI', 'sdiiETCCDI', 'suETCCDI', 'thresholds', 'tn10pETCCDI',
        'tn90pETCCDI', 'tnnETCCDI', 'tnxETCCDI', 'trETCCDI', 'tx10pETCCDI',
        'tx90pETCCDI', 'txnETCCDI', 'txxETCCDI', 'wsdiETCCDI', 'tasmin',
        'tasmax', 'pr'
    }

    if variable not in supported_vars:
        raise Exception("Unsupported variable: cant't yet process {}".format(variable))

    # Allow different ops by variable? # op = 'sum' if variable == 'pr' else 'mean'
    op = 'mean'

    cdo = Cdo()
    date_range = '{},{}'.format(d2s(t_start), d2s(t_end))

    if not os.path.exists(os.path.dirname(fp_out)):
        os.makedirs(os.path.dirname(fp_out))

    with NamedTemporaryFile(suffix='.nc') as tempf:
        cdo.seldate(date_range, input=fp_in, output=tempf.name)

        # Add extra postprocessing for specific variables.
        vt = var_trans(variable)

        if 'yr' in fp_in:
            cdo_cmd = '{vt} -tim{op} {fname}'.format(fname=tempf.name, op=op, vt=vt)
        else:
            cdo_cmd = '{vt} -ymon{op} {fname} {vt} -yseas{op} {fname} {vt} -tim{op} {fname}'\
                .format(fname=tempf.name, op=op, vt=vt)

        cdo.copy(input=cdo_cmd, output=fp_out)
예제 #26
0
    def test_keep_coordinates(self):
        cdo = Cdo()
        ifile = '/pool/data/ICON/ocean_data/ocean_grid/iconR2B02-ocean_etopo40_planet.nc'
        if (os.path.isfile(ifile)):
          ivar  = 'ifs2icon_cell_grid'
          varIn = cdo.readCdf(ifile)
          varIn = varIn.variables[ivar]
          expected =  'clon clat'
          self.assertEqual(expected,varIn.coordinates)

          varOut =cdo.readCdf(cdo.selname(ivar,input=ifile))
          varOut = varOut.variables[ivar]
          expected = expected.split(' ')
          expected.reverse()
          self.assertEqual(expected,varOut.coordinates.split(' '))
예제 #27
0
 def test_cdiMeta(self):
   cdo = Cdo()
   if cdo.hasNetcdf:
     ofile = cdo.stdatm("0", returnCdf = True)
     if DEBUG:
       print(ofile)
   if cdo.hasXarray:
     ofile = cdo.stdatm("0", returnXArray = 'T')
     if DEBUG:
       print(ofile)
       print(ofile.attrs)
     ofile = cdo.stdatm("0", returnXDataset=True)
     if DEBUG:
       print(ofile)
       print(ofile.attrs)
예제 #28
0
 def test_returnCdf(self):
     cdo = Cdo()
     ofile = tempfile.NamedTemporaryFile(delete=True,prefix='cdoPy').name
     press = cdo.stdatm("0",output=ofile,options="-f nc")
     self.assertEqual(ofile,press)
     if cdo.hasNetcdf:
       variables = cdo.stdatm("0",returnCdf=True).variables
       print(variables)
       cdf = cdo.stdatm("0",returnCdf=True)
       press = cdf.variables['P'][:]
       self.assertEqual(1013.25,press.min())
       press = cdo.stdatm("0",output=ofile,options="-f nc")
       self.assertEqual(ofile,press)
     else:
       self.assertRaises(ImportError,cdo.stdatm,0,returnCdf=True)
     rm([ofile,])
예제 #29
0
    def test_returnNone(self):
        cdo = Cdo()
        self.assertFalse(cdo.returnNoneOnError,"'returnNoneOnError' is _not_ False after initialization")
        cdo.returnNoneOnError = True
        self.assertTrue(cdo.returnNoneOnError,"'returnNoneOnError' is _not_ True after manual setting")
        ret  = cdo.sinfo(input="-topf")
        self.assertEqual(None,ret)
        if DEBUG:
          print(ret)

        cdo_ = Cdo(returnNoneOnError=True)
        self.assertTrue(cdo_.returnNoneOnError)
        ret  = cdo_.sinfo(input=" ifile.grb")
        self.assertEqual(None,ret)
        if DEBUG:
          print(ret)
예제 #30
0
 def testTempdir(self):
   # manual set path
   tempPath = os.path.abspath('.')+'/tempPy_{0}'.format( random.randrange(1,100000))
   cdo = Cdo(tempdir=tempPath)
   cdo.topo('r10x10',options = '-f nc')
   self.assertEqual(1,len(os.listdir(tempPath)))
   cdo.topo('r10x10',options = '-f nc')
   cdo.topo('r10x10',options = '-f nc')
   self.assertEqual(3,len(os.listdir(tempPath)))
   cdo.topo('r10x10',options = '-f nc')
   cdo.topo('r10x10',options = '-f nc')
   self.assertEqual(5,len(os.listdir(tempPath)))
   cdo.cleanTempDir()
   self.assertEqual(0,len(os.listdir(tempPath)))
예제 #31
0
def seacyc(archive, simulation, method='base'):
    """
    Subtracts the seasonal cycle.

    :param archive: netCDF file containing the reference period
    :param simulation: netCDF file containing the period to be analysed
    :param method: method to generate the seasonal cycle files
                   base = seasonal cycle generated from reference period
                   sim = seasonal cycle generated from period to be analysed
                   own = seasonal cycle generated for both time windows

    :return [str,str]: two netCDF filenames for analysis and reference period (located in working directory)
    """
    try:
        logger.debug('seacyc started with method: %s' % method)

        from shutil import copy
        from flyingpigeon.ocgis_module import call
        from flyingpigeon.utils import get_variable
        from cdo import Cdo
        cdo = Cdo()

        if method == 'base':
            seasoncyc_base = cdo.ydaymean(
                input=archive, output='seasoncyc_base.nc')
            variable = get_variable(archive)
            # seasoncyc_base = call(resource=archive,
            # variable=variable,
            # prefix='seasoncyc_base',
            #calc=[{'func': 'mean', 'name': variable}],
            # calc_grouping=['day','month'] )

            logger.debug('seasoncyc_base calculated : %s' % seasoncyc_base)
            cdo.ydaymean(input=archive, output='seasoncyc_base.nc')
            seasoncyc_sim = 'seasoncyc_sim.nc'
            copy(seasoncyc_base, seasoncyc_sim)
        elif method == 'sim':
            # seasoncyc_sim  = call(resource=archive,
              # variable=variable,
              # prefix='seasoncyc_sim',
              #calc=[{'func': 'mean', 'name': variable}],
              # calc_grouping=['day','month'] )
            cdo.ydaymean(input=simulation, output='seasoncyc_sim.nc')
            seasoncyc_base = 'seasoncyc_base.nc'
            copy(seasoncyc_sim, seasoncyc_base)
        elif method == 'own':
            # seasoncyc_base = call(resource=archive,
              # variable=variable,
              # prefix='seasoncyc_base',
              #calc=[{'func': 'mean', 'name': variable}],
              # calc_grouping=['day','month'] )
            seasoncyc_base = cdo.ydaymean(
                input=archive, output='seasoncyc_base.nc')
            # seasoncyc_sim  = call(resource=archive,
            # variable=variable,
            # prefix='seasoncyc_sim',
            #calc=[{'func': 'mean', 'name': variable}],
            # calc_grouping=['day','month'] )
            seasoncyc_sim = cdo.ydaymean(
                input=simulation, output='seasoncyc_sim.nc')
        else:
            raise Exception('normalisation method not found')

    except Exception as e:
        msg = 'seacyc function failed : %s ' % e
        logger.debug(msg)
        raise Exception(msg)

    return seasoncyc_base, seasoncyc_sim
예제 #32
0
    def test_outputOperators(self):
        cdo = Cdo(cdfMod=CDF_MOD)
        levels = cdo.showlevel(input="-stdatm,0")
        info = cdo.sinfo(input="-stdatm,0")
        self.assertEqual([0, 0], list(map(float, levels)))
        self.assertEqual("GRIB", info[0].split(' ')[-1])

        values = cdo.outputkey("value", input="-stdatm,0")[1::]
        self.assertEqual(["1013.25", "288"], values)
        values = cdo.outputkey("value", input="-stdatm,0,10000")[1::]
        self.assertEqual(["1013.25", "271.913", "288", "240.591"], values)
        values = cdo.outputkey("lev", input="-stdatm,0,10000")[1::]
        self.assertEqual(["0", "10000", "0", "10000"], values)

        # test autoSplit usage
        levels = cdo.showlevel(input="-stdatm,0,10,20", autoSplit=' ')
        self.assertEqual([['0', '10', '20'], ['0', '10', '20']], levels)

        timesExpected = [
            '2001-01-01T12:00:00', '2001-01-01T13:00:00',
            '2001-01-01T14:00:00', '2001-01-01T15:00:00',
            '2001-01-01T16:00:00', '2001-01-01T17:00:00',
            '2001-01-01T18:00:00', '2001-01-01T19:00:00',
            '2001-01-01T20:00:00', '2001-01-01T21:00:00'
        ]
        self.assertEqual(
            timesExpected,
            cdo.showtimestamp(
                input="-settaxis,2001-01-01,12:00,1hour -for,1,10",
                autoSplit='  '))

        self.assertEqual(['P T'], cdo.showname(input="-stdatm,0"))
        self.assertEqual(['P', 'T'],
                         cdo.showname(input="-stdatm,0", autoSplit=' '))
예제 #33
0
def test_cdo_import():
    from cdo import Cdo
    cdo = Cdo()
예제 #34
0
    def _handler(self, request, response):
        init_process_logger('log.txt')
        response.outputs['output_log'].file = 'log.txt'

        LOGGER.info('Start process')
        response.update_status('execution started at : {}'.format(dt.now()), 5)

        process_start_time = time.time()  # measure process execution time ...
        start_time = time.time()  # measure init ...

        ################################
        # reading in the input arguments
        ################################

        try:
            response.update_status('read input parameter : %s ' % dt.now(), 7)

            refSt = request.inputs['refSt'][0].data
            refEn = request.inputs['refEn'][0].data
            dateSt = request.inputs['dateSt'][0].data
            dateEn = request.inputs['dateEn'][0].data
            seasonwin = request.inputs['seasonwin'][0].data
            nanalog = request.inputs['nanalog'][0].data
            timres = request.inputs['timeres'][0].data

            # bbox = [-80, 20, 50, 70]
            # TODO: Add checking for wrong cordinates and apply default if nesessary
            bbox = []
            bboxStr = request.inputs['BBox'][0].data
            bboxStr = bboxStr.split(',')
            bbox.append(float(bboxStr[0]))
            bbox.append(float(bboxStr[2]))
            bbox.append(float(bboxStr[1]))
            bbox.append(float(bboxStr[3]))
            LOGGER.debug('BBOX for ocgis: %s ' % (bbox))
            LOGGER.debug('BBOX original: %s ' % (bboxStr))

            normalize = request.inputs['normalize'][0].data
            detrend = request.inputs['detrend'][0].data
            distance = request.inputs['dist'][0].data
            outformat = request.inputs['outformat'][0].data
            timewin = request.inputs['timewin'][0].data

            model_var = request.inputs['reanalyses'][0].data
            model, var = model_var.split('_')

            # experiment = self.getInputValues(identifier='experiment')[0]
            # dataset, var = experiment.split('_')
            # LOGGER.info('environment set')
            LOGGER.info('input parameters set')
            response.update_status('Read in and convert the arguments', 8)
        except Exception as e:
            msg = 'failed to read input prameter %s ' % e
            LOGGER.exception(msg)
            raise Exception(msg)

        ######################################
        # convert types and set environment
        ######################################
        try:
            response.update_status('Preparing enviroment converting arguments',
                                   9)
            LOGGER.debug('date: %s %s %s %s ' %
                         (type(refSt), refEn, dateSt, dateSt))

            start = min(refSt, dateSt)
            end = max(refEn, dateEn)

            #
            # refSt = dt.strftime(refSt, '%Y-%m-%d')
            # refEn = dt.strftime(refEn, '%Y-%m-%d')
            # dateSt = dt.strftime(dateSt, '%Y-%m-%d')
            # dateEn = dt.strftime(dateEn, '%Y-%m-%d')

            if normalize == 'None':
                seacyc = False
            else:
                seacyc = True

            if outformat == 'ascii':
                outformat = '.txt'
            elif outformat == 'netCDF':
                outformat = '.nc'
            else:
                LOGGER.exception('output format not valid')

        except Exception as e:
            msg = 'failed to set environment %s ' % e
            LOGGER.exception(msg)
            raise Exception(msg)

        ###########################
        # set the environment
        ###########################

        response.update_status('fetching data from archive', 10)

        try:
            if model == 'NCEP':
                getlevel = False
                if 'z' in var:
                    level = var.strip('z')
                    conform_units_to = None
                else:
                    level = None
                    conform_units_to = 'hPa'
            elif '20CRV2' in model:
                getlevel = False
                if 'z' in var:
                    level = var.strip('z')
                    conform_units_to = None
                else:
                    level = None
                    conform_units_to = 'hPa'
            else:
                LOGGER.exception('Reanalyses dataset not known')
            LOGGER.info('environment set for model: %s' % model)
        except Exception:
            msg = 'failed to set environment'
            LOGGER.exception(msg)
            raise Exception(msg)

        ##########################################
        # fetch Data from original data archive
        ##########################################

        # NOTE: If ref is say 1950 - 1990, and sim is just 1 week in 2017 - ALL the data will be downloaded, 1950 - 2017
        try:
            model_nc = rl(start=start.year,
                          end=end.year,
                          dataset=model,
                          variable=var,
                          timres=timres,
                          getlevel=getlevel)
            LOGGER.info('reanalyses data fetched')
        except Exception:
            msg = 'failed to get reanalyses data'
            LOGGER.exception(msg)
            raise Exception(msg)

        response.update_status('subsetting region of interest', 17)
        # from flyingpigeon.weatherregimes import get_level
        LOGGER.debug("start and end time: %s - %s" % (start, end))
        time_range = [start, end]

        # For 20CRV2 geopotential height, daily dataset for 100 years is about 50 Gb
        # So it makes sense, to operate it step-by-step
        # TODO: need to create dictionary for such datasets (for models as well)
        # TODO: benchmark the method bellow for NCEP z500 for 60 years

        #        if ('20CRV2' in model) and ('z' in var):
        if ('z' in var):
            tmp_total = []
            origvar = get_variable(model_nc)

            for z in model_nc:
                tmp_n = 'tmp_%s' % (uuid.uuid1())
                b0 = call(resource=z,
                          variable=origvar,
                          level_range=[int(level), int(level)],
                          geom=bbox,
                          spatial_wrapping='wrap',
                          prefix='levdom_' + os.path.basename(z)[0:-3])
                tmp_total.append(b0)

            tmp_total = sorted(
                tmp_total,
                key=lambda i: os.path.splitext(os.path.basename(i))[0])
            inter_subset_tmp = call(resource=tmp_total,
                                    variable=origvar,
                                    time_range=time_range)

            # Clean
            for i in tmp_total:
                tbr = 'rm -f %s' % (i)
                os.system(tbr)

                # Create new variable
            ds = Dataset(inter_subset_tmp, mode='a')
            z_var = ds.variables.pop(origvar)
            dims = z_var.dimensions
            new_var = ds.createVariable('z%s' % level,
                                        z_var.dtype,
                                        dimensions=(dims[0], dims[2], dims[3]))
            new_var[:, :, :] = squeeze(z_var[:, 0, :, :])
            # new_var.setncatts({k: z_var.getncattr(k) for k in z_var.ncattrs()})
            ds.close()
            model_subset_tmp = call(inter_subset_tmp, variable='z%s' % level)
        else:
            model_subset_tmp = call(
                resource=model_nc,
                variable=var,
                geom=bbox,
                spatial_wrapping='wrap',
                time_range=time_range,
                # conform_units_to=conform_units_to
            )

        # If dataset is 20CRV2 the 6 hourly file should be converted to daily.
        # Option to use previously 6h data from cache (if any) and not download daily files.

        if '20CRV2' in model:
            if timres == '6h':
                from cdo import Cdo

                cdo = Cdo()
                model_subset = '%s.nc' % uuid.uuid1()
                tmp_f = '%s.nc' % uuid.uuid1()

                cdo_op = getattr(cdo, 'daymean')
                cdo_op(input=model_subset_tmp, output=tmp_f)
                sti = '00:00:00'
                cdo_op = getattr(cdo, 'settime')
                cdo_op(sti, input=tmp_f, output=model_subset)
                LOGGER.debug('File Converted from: %s to daily' % (timres))
            else:
                model_subset = model_subset_tmp
        else:
            model_subset = model_subset_tmp

        LOGGER.info('Dataset subset done: %s ', model_subset)

        response.update_status('dataset subsetted', 19)

        # BLOCK OF DETRENDING of model_subset !
        # Original model subset kept to further visualisaion if needed
        # Now is issue with SLP:
        # TODO 1 Keep trend as separate file
        # TODO 2 Think how to add options to plot abomalies AND original data...
        #        May be do archive and simulation = call.. over NOT detrended data and keep it as well
        # TODO 3 Check with faster smoother add removing trend of each grid

        if detrend == 'None':
            orig_model_subset = model_subset
        else:
            orig_model_subset = remove_mean_trend(model_subset, varname=var)

        # ======================================

        ############################################################
        #  get the required bbox and time region from resource data
        ############################################################
        #
        #
        # try:
        #     if dataset == 'NCEP':
        #         if 'z' in var:
        #             variable = 'hgt'
        #             level = var.strip('z')
        #             # conform_units_to=None
        #         else:
        #             variable = 'slp'
        #             level = None
        #             # conform_units_to='hPa'
        #     elif '20CRV2' in var:
        #         if 'z' in level:
        #             variable = 'hgt'
        #             level = var.strip('z')
        #             # conform_units_to=None
        #         else:
        #             variable = 'prmsl'
        #             level = None
        #             # conform_units_to='hPa'
        #     else:
        #         LOGGER.exception('Reanalyses dataset not known')
        #     LOGGER.info('environment set')
        # except Exception as e:
        #     msg = 'failed to set environment %s ' % e
        #     LOGGER.exception(msg)
        #     # raise Exception(msg)
        #
        # LOGGER.debug("init took %s seconds.", time.time() - start_time)
        # response.update_status('Read in and convert the arguments done', 8)
        #
        # #################
        # # get input data
        # #################
        # start_time = time.time()  # measure get_input_data ...
        # response.update_status('fetching input data', 7)
        # try:
        #     input = reanalyses(start=start.year, end=end.year,
        #                        variable=var, dataset=dataset)
        #     LOGGER.info('input files %s' % input)
        #     nc_subset = call(resource=input, variable=var,
        #                      geom=bbox, spatial_wrapping='wrap')
        # except Exception as e:
        #     msg = 'failed to fetch or subset input files %s' % e
        #     LOGGER.exception(msg)
        #     # raise Exception(msg)

        LOGGER.debug("get_input_subset_dataset took %s seconds.",
                     time.time() - start_time)
        response.update_status('**** Input data fetched', 20)

        ########################
        # input data preperation
        ########################
        response.update_status('Start preparing input data', 22)
        start_time = time.time()  # measure data preperation ...

        try:
            # Construct descriptive filenames for the three files
            # listed in config file
            # TODO check strftime for years <1900 (!)

            refDatesString = dt.strftime(
                refSt, '%Y-%m-%d') + "_" + dt.strftime(refEn, '%Y-%m-%d')
            simDatesString = dt.strftime(
                dateSt, '%Y-%m-%d') + "_" + dt.strftime(dateEn, '%Y-%m-%d')
            archiveNameString = "base_" + var + "_" + refDatesString + '_%.1f_%.1f_%.1f_%.1f' \
                                % (bbox[0], bbox[2], bbox[1], bbox[3])
            simNameString = "sim_" + var + "_" + simDatesString + '_%.1f_%.1f_%.1f_%.1f' \
                            % (bbox[0], bbox[2], bbox[1], bbox[3])
            archive = call(resource=model_subset,
                           time_range=[refSt, refEn],
                           prefix=archiveNameString)
            simulation = call(resource=model_subset,
                              time_range=[dateSt, dateEn],
                              prefix=simNameString)
            LOGGER.info('archive and simulation files generated: %s, %s' %
                        (archive, simulation))
        except Exception as e:
            msg = 'failed to prepare archive and simulation files %s ' % e
            LOGGER.exception(msg)
            raise Exception(msg)

        try:
            if seacyc is True:
                LOGGER.info('normalization function with method: %s ' %
                            normalize)
                seasoncyc_base, seasoncyc_sim = analogs.seacyc(
                    archive, simulation, method=normalize)
            else:
                seasoncyc_base = seasoncyc_sim = None
        except Exception as e:
            msg = 'failed to generate normalization files %s ' % e
            LOGGER.exception(msg)
            raise Exception(msg)

        output_file = 'output.txt'
        files = [
            os.path.abspath(archive),
            os.path.abspath(simulation), output_file
        ]
        LOGGER.debug("Data preperation took %s seconds.",
                     time.time() - start_time)

        ############################
        # generate the config file
        ############################
        config_file = analogs.get_configfile(
            files=files,
            seasoncyc_base=seasoncyc_base,
            seasoncyc_sim=seasoncyc_sim,
            base_id=model,
            sim_id=model,
            timewin=timewin,
            varname=var,
            seacyc=seacyc,
            cycsmooth=91,
            nanalog=nanalog,
            seasonwin=seasonwin,
            distfun=distance,
            outformat=outformat,
            calccor=True,
            silent=False,
            period=[
                dt.strftime(refSt, '%Y-%m-%d'),
                dt.strftime(refEn, '%Y-%m-%d')
            ],
            bbox="{0[0]},{0[2]},{0[1]},{0[3]}".format(bbox))
        response.update_status('generated config file', 25)
        #######################
        # CASTf90 call
        #######################
        start_time = time.time()  # measure call castf90

        # -----------------------
        try:
            import ctypes
            # TODO: This lib is for linux
            mkl_rt = ctypes.CDLL('libmkl_rt.so')
            nth = mkl_rt.mkl_get_max_threads()
            LOGGER.debug('Current number of threads: %s' % (nth))
            mkl_rt.mkl_set_num_threads(ctypes.byref(ctypes.c_int(64)))
            nth = mkl_rt.mkl_get_max_threads()
            LOGGER.debug('NEW number of threads: %s' % (nth))
            # TODO: Does it \/\/\/ work with default shell=False in subprocess... (?)
            os.environ['MKL_NUM_THREADS'] = str(nth)
            os.environ['OMP_NUM_THREADS'] = str(nth)
        except Exception as e:
            msg = 'Failed to set THREADS %s ' % e
            LOGGER.debug(msg)
        # -----------------------

        response.update_status('Start CASTf90 call', 30)
        try:
            # response.update_status('execution of CASTf90', 50)
            cmd = ['analogue.out', config_file]
            LOGGER.debug("castf90 command: %s", cmd)
            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
            LOGGER.info('analogue output:\n %s', output)
            response.update_status('**** CASTf90 suceeded', 70)
        except CalledProcessError as e:
            msg = 'CASTf90 failed:\n{0}'.format(e.output)
            LOGGER.exception(msg)
            raise Exception(msg)
        LOGGER.debug("castf90 took %s seconds.", time.time() - start_time)

        # TODO: Add try - except for pdfs
        analogs_pdf = analogs.plot_analogs(configfile=config_file)
        response.update_status('preparing output', 75)
        # response.outputs['config'].storage = FileStorage()
        response.outputs['analog_pdf'].file = analogs_pdf
        response.outputs['config'].file = config_file
        response.outputs['analogs'].file = output_file
        response.outputs['output_netcdf'].file = simulation
        response.outputs['target_netcdf'].file = archive

        if seacyc is True:
            response.outputs['base_netcdf'].file = seasoncyc_base
            response.outputs['sim_netcdf'].file = seasoncyc_sim
        else:
            # TODO: Still unclear how to overpass unknown number of outputs
            dummy_base = 'dummy_base.nc'
            dummy_sim = 'dummy_sim.nc'
            with open(dummy_base, 'a'):
                os.utime(dummy_base, None)
            with open(dummy_sim, 'a'):
                os.utime(dummy_sim, None)
            response.outputs['base_netcdf'].file = dummy_base
            response.outputs['sim_netcdf'].file = dummy_sim

        ########################
        # generate analog viewer
        ########################

        formated_analogs_file = analogs.reformat_analogs(output_file)
        # response.outputs['formated_analogs'].storage = FileStorage()
        response.outputs['formated_analogs'].file = formated_analogs_file
        LOGGER.info('analogs reformated')
        response.update_status('reformatted analog file', 80)

        viewer_html = analogs.render_viewer(
            # configfile=response.outputs['config'].get_url(),
            configfile=config_file,
            # datafile=response.outputs['formated_analogs'].get_url())
            datafile=formated_analogs_file)
        response.outputs['output'].file = viewer_html
        response.update_status('Successfully generated analogs viewer', 90)
        LOGGER.info('rendered pages: %s ', viewer_html)

        response.update_status('execution ended', 100)
        LOGGER.debug("total execution took %s seconds.",
                     time.time() - process_start_time)
        return response
예제 #35
0
from netCDF4 import Dataset as NC
from cdo import Cdo
from datetime import datetime, timedelta
import gdal
from glob import glob
from os.path import join, split
cdo = Cdo()
reftime = "2008-1-1"

components = ["vx", "vy", "vv"]

download_dir = "data"

for glacier in ["W69.10N"]:
    f_tiffs = glob(f"{download_dir}/TSX_{glacier}_*_v02.0.tif")

    for f_tiff in f_tiffs:
        f_nc = f_tiff.replace(".tif", ".nc")
        print(f"Converting {f_tiff} to {f_nc}")
        # use gdal's python binging to convert GeoTiff to netCDF
        # advantage of GDAL: it gets the projection information right
        # disadvantage: the variable is named "Band1", lacks metadata
        ds = gdal.Open(f_tiff)
        ds = gdal.Translate(f_nc, ds)
        ds = None

        # This deduces the mid-point (nominal) date from the filename
        _, _, start_date_str, end_date_str, _, var, _ = f_nc.split("_")
        start_date = datetime.strptime(start_date_str, "%d%b%y")
        end_date = datetime.strptime(end_date_str, "%d%b%y")
        nominal_date = start_date + (end_date - start_date) / 2
예제 #36
0
def balances(wdir, plotpath, filena, name, model):
    """Plot everything related to energy and water mass budgets.

    This method provides climatological annal mean maps of TOA, atmospheric
    and surface energy budgets, time series of annual mean anomalies in the
    two hemispheres and meridional sections of meridional enthalpy
    transports. Scatter plots of oceanic vs. atmospheric meridional
    enthalpy transports are also provided.

    Arguments:
    - wdir: the working directory;
    - plotpath: the path where the plot has to be saved;
    - filena: the files containing input fields;
    - name: the name of the variable associated with the input field;
    - model: the name of the model to be analysed;
    """
    cdo = Cdo()
    nsub = len(filena)
    pdir = plotpath
    plotentname = pdir + '/{}_heat_transp.png'.format(model)
    plotwmbname = pdir + '/{}_wmb_transp.png'.format(model)
    plotlatname = pdir + '/{}_latent_transp.png'.format(model)

    # timesery = np.zeros([nsub, 2])
    dims, ndims, tmean, zmean, timeser = global_averages(nsub, filena, name)
    transp_mean = np.zeros([nsub, ndims[1]])
    lat_maxm = np.zeros([nsub, 2, len(dims[3])])
    tr_maxm = np.zeros([nsub, 2, len(dims[3])])
    lim = [55, 55, 25]
    for i_f in np.arange(nsub):
        transp = transport(zmean[i_f, :, :], timeser[i_f, :, 0], dims[1])
        transp_mean[i_f, :], list_peak = transports_preproc(
            dims[1], ndims[3], lim[i_f], transp)
        lat_maxm[i_f, :, :] = list_peak[0]
        tr_maxm[i_f, :, :] = list_peak[1]
    if nsub == 3:
        ext_name = [
            'TOA Energy Budget', 'Atmospheric Energy Budget',
            'Surface Energy Budget'
        ]
        transpty = (-6E15, 6E15)
        coords = [dims[0], dims[1]]
        plot_climap_eb(model, pdir, coords, tmean, ext_name)
        fig = plt.figure()
        strings = ['Meridional enthalpy transports', 'Latitude [deg]', '[W]']
        lats = dims[1]
        for i in np.arange(nsub):
            filename = filena[i] + '.nc'
            if name[i] == 'toab':
                nameout = 'total'
            elif name[i] == 'atmb':
                nameout = 'atmos'
            elif name[i] == 'surb':
                nameout = 'ocean'
            nc_f = wdir + '/{}_transp_mean_{}.nc'.format(nameout, model)
            removeif(nc_f)
            lat_model = 'lat_{}'.format(model)
            pr_output(transp_mean[i, :], filename, nc_f, nameout, lat_model)
            name_model = '{}_{}'.format(nameout, model)
            cdo.chname('{},{}'.format(nameout, name_model),
                       input=nc_f,
                       output='aux.nc')
            move('aux.nc', nc_f)
            cdo.chname('lat,{}'.format(lat_model), input=nc_f, output='aux.nc')
            move('aux.nc', nc_f)
            plot_1m_transp(lats, transp_mean[i, :], transpty, strings)
        plt.grid()
        plt.savefig(plotentname)
        plt.close(fig)
        plot_1m_scatter(model, pdir, lat_maxm, tr_maxm)
    elif nsub == 2:
        ext_name = ['Water mass budget', 'Latent heat budget']
        transpwy = (-2E9, 2E9)
        transply = (-6E15, 6E15)
        coords = [dims[0], dims[1]]
        plot_climap_wm(model, pdir, coords, tmean, ext_name, name)
        nc_f = wdir + '/{}_transp_mean_{}.nc'.format('wmb', model)
        removeif(nc_f)
        filena[0] = filena[0].split('.nc', 1)[0]
        filename = filena[0] + '.nc'
        pr_output(transp_mean[0, :], filename, nc_f, 'wmb', 'lat')
        nc_f = wdir + '/{}_transp_mean_{}.nc'.format('latent', model)
        removeif(nc_f)
        filena[1] = filena[1].split('.nc', 1)[0]
        filename = filena[1] + '.nc'
        pr_output(transp_mean[1, :], filename, nc_f, 'latent', 'lat')
        strings = ['Water mass transports', 'Latitude [deg]', '[kg*s-1]']
        fig = plt.figure()
        plot_1m_transp(dims[1], transp_mean[0, :], transpwy, strings)
        plt.grid()
        plt.savefig(plotwmbname)
        plt.close(fig)
        strings = ['Latent heat transports', 'Latitude [deg]', '[W]']
        fig = plt.figure()
        plot_1m_transp(dims[1], transp_mean[1, :], transply, strings)
        plt.grid()
        plt.savefig(plotlatname)
        plt.close(fig)
    for i_f in np.arange(nsub):
        fig = plt.figure()
        axi = plt.subplot(111)
        axi.plot(dims[3], timeser[i_f, :, 0], 'k', label='Global')
        axi.plot(dims[3], timeser[i_f, :, 1], 'r', label='SH')
        axi.plot(dims[3], timeser[i_f, :, 2], 'b', label='NH')
        plt.title('Annual mean {}'.format(ext_name[i_f]))
        plt.xlabel('Years')
        if ext_name[i_f] == 'Water mass budget':
            plt.ylabel('[Kg m-2 s-1]')
        else:
            plt.ylabel('[W/m2]')
        axi.legend(loc='upper center',
                   bbox_to_anchor=(0.5, -0.13),
                   shadow=True,
                   ncol=3)
        plt.tight_layout()
        plt.grid()
        plt.savefig(pdir + '/{}_{}_timeser.png'.format(model, name[i_f]))
        plt.close(fig)
예제 #37
0
def _aggregate_specific_years(work_dir, infile, times, remove=True):
    """ aggregate infile to times with mean and sd"""

    cdo = Cdo()
    onameM = work_dir + os.sep + "temp" + os.sep + tempfile.NamedTemporaryFile(
    ).name.split('/')[-1]
    onameS = work_dir + os.sep + "temp" + os.sep + tempfile.NamedTemporaryFile(
    ).name.split('/')[-1]
    oname = work_dir + os.sep + "temp" + os.sep + tempfile.NamedTemporaryFile(
    ).name.split('/')[-1]
    tmpname = work_dir + os.sep + "temp" + os.sep + tempfile.NamedTemporaryFile(
    ).name.split('/')[-1]
    cdo.selyear(",".join([str(t) for t in times]),
                input=infile,
                output=tmpname,
                options='-f nc4 -b F32')
    cdo.timselmean(12, input=tmpname, output=onameM, options='-f nc4 -b F32')
    #cdo.timselstd(12,input=tmpname,output=onameS,options='-f nc4 -b F32')
    name = cdo.showname(input=onameM)
    cdo.setname(name[0] + "_std -timselstd,12",
                input=tmpname,
                output=onameS,
                options='-L -f nc4 -b F32')
    cdo.merge(input=[onameM, onameS], output=oname)
    if remove:
        os.remove(infile)
    os.remove(tmpname)
    os.remove(onameM)
    os.remove(onameS)

    return oname
예제 #38
0
def call(resource=[],
         variable=None,
         dimension_map=None,
         calc=None,
         calc_grouping=None,
         conform_units_to=None,
         memory_limit=None,
         prefix=None,
         regrid_destination=None,
         regrid_options='bil',
         level_range=None,
         geom=None,
         output_format_options=None,
         search_radius_mult=2.,
         select_nearest=False,
         select_ugid=None,
         spatial_wrapping=None,
         t_calendar=None,
         time_region=None,
         time_range=None,
         dir_output=None,
         output_format='nc'):
    '''
    ocgis operation call

    :param resource:
    :param variable: variable in the input file to be picked
    :param dimension_map: dimension map in case of unconventional storage of data
    :param calc: ocgis calc syntax for calculation partion
    :param calc_grouping: time aggregate grouping
    :param conform_units_to:
    :param memory_limit: limit the amount of data to be loaded into the memory at once \
        if None (default) free memory is detected by birdhouse
    :param level_range: subset of given levels
    :param prefix: string for the file base name
    :param regrid_destination: file path with netCDF file with grid for output file
    :param geom: name of shapefile stored in birdhouse shape cabinet
    :param output_format_options: output options for netCDF e.g compression level()
    :param regrid_destination: file containing the targed grid (griddes.txt or netCDF file)
    :param regrid_options: methods for regridding:
                          'bil' = Bilinear interpolation
                          'bic' = Bicubic interpolation
                          'dis' = Distance-weighted average remapping
                          'nn' = nearest neighbour
                          'con' = First-order conservative remapping
                          'laf' = largest area fraction reamapping
    :param search_radius_mult: search radius for point geometries. All included gridboxes will be returned
    :param select_nearest: nearest neighbour selection for point geometries
    :param select_ugid: ugid for appropriate polygons
    :param spatial_wrapping: how to handle coordinates in case of subsets, options: None (default), 'wrap', 'unwrap'
    :param time_region: select single month
    :param time_range: sequence of two datetime.datetime objects to mark start and end point
    :param dir_output (default= curdir):
    :param output_format:
    :return: output file path
    '''
    LOGGER.info('Start ocgis module call function')
    from ocgis import OcgOperations, RequestDataset, env
    from ocgis.util.large_array import compute
    from datetime import datetime as dt
    import uuid

    # prepare the environment
    env.DIR_SHPCABINET = DIR_SHP
    env.OVERWRITE = True
    # env.DIR_OUTPUT = dir_output
    # LOGGER.debug(' **** env.DIR_OUTPUT  = %s ' % env.DIR_OUTPUT)

    if dir_output is None:
        dir_output = abspath(curdir)

    # check time_range fromat:

    if time_range is not None:
        try:
            LOGGER.debug('time_range type= %s , %s ' %
                         (type(time_range[0]), type(time_range[1])))
            if type(time_range[0] is 'datetime.date'):
                time_range = [
                    dt.combine(time_range[0], dt.min.time()),
                    dt.combine(time_range[1], dt.min.time())
                ]
            LOGGER.debug('time_range changed to type= %s , %s ' %
                         (type(time_range[0]), type(time_range[1])))
        except:
            LOGGER.exception('failed to confert data to datetime')

    #
    # if geom is not None:
    #     spatial_reorder = True
    #     spatial_wrapping = 'wrap'
    # else:
    #     spatial_reorder = False
    #     spatial_wrapping = None
    #

    if spatial_wrapping == 'wrap':
        spatial_reorder = True
    else:
        spatial_reorder = False
    LOGGER.debug('spatial_reorder: %s and spatial_wrapping: %s ' %
                 (spatial_reorder, spatial_wrapping))

    if prefix is None:
        prefix = str(uuid.uuid1())
        env.PREFIX = prefix
    #
    # if output_format_options is False:
    #     output_format_options = None
    # elif output_format_options is True:
    #     output_format_options = {'data_model': 'NETCDF4',  # NETCDF4_CLASSIC
    #                              'variable_kwargs': {'zlib': True, 'complevel': 9}}
    # else:
    if output_format_options is not None:
        LOGGER.info('output_format_options are set to %s ' %
                    (output_format_options))

    if type(resource) != list:
        resource = list([resource])
    # execute ocgis
    LOGGER.info('Execute ocgis module call function')

    # if has_Lambert_Conformal(resource) is True and geom is not None:
    #     LOGGER.debug('input has Lambert_Conformal projection and can not prcessed with ocgis:\
    #      https://github.com/NCPP/ocgis/issues/424')
    #     return None
    # else:
    try:
        LOGGER.debug('call module curdir = %s ' % abspath(curdir))
        rd = RequestDataset(resource,
                            variable=variable,
                            level_range=level_range,
                            dimension_map=dimension_map,
                            conform_units_to=conform_units_to,
                            time_region=time_region,
                            t_calendar=t_calendar,
                            time_range=time_range)

        # from ocgis.constants import DimensionMapKey
        # rd.dimension_map.set_bounds(DimensionMapKey.TIME, None)

        ops = OcgOperations(
            dataset=rd,
            output_format_options=output_format_options,
            dir_output=dir_output,
            spatial_wrapping=spatial_wrapping,
            spatial_reorder=spatial_reorder,
            # regrid_destination=rd_regrid,
            # options=options,
            calc=calc,
            calc_grouping=calc_grouping,
            geom=geom,
            output_format=output_format,
            prefix=prefix,
            search_radius_mult=search_radius_mult,
            select_nearest=select_nearest,
            select_ugid=select_ugid,
            add_auxiliary_files=False)
        LOGGER.info('OcgOperations set')
    except:
        LOGGER.exception('failed to setup OcgOperations')
        return None

    try:
        LOGGER.info('ocgis module call as ops.execute()')
        geom_file = ops.execute()
    except:
        LOGGER.exception('failed to execute ocgis operation')
        return None
    #
    # try:
    #     from numpy import sqrt
    #     from flyingpigeon.utils import FreeMemory
    #
    #     if memory_limit is None:
    #         f = FreeMemory()
    #         mem_kb = f.user_free
    #         mem_mb = mem_kb / 1024.
    #         mem_limit = mem_mb / 2.  # set limit to half of the free memory
    #     else:
    #         mem_limit = memory_limit
    #
    #     if mem_limit >= 1024. * 4:
    #         mem_limit = 1024. * 4
    #         # 475.0 MB for openDAP
    #
    #     LOGGER.info('memory_limit = %s Mb' % (mem_limit))
    #
    #     data_kb = ops.get_base_request_size()['total']
    #     data_mb = data_kb / 1024.
    #
    #     # data_kb = size['total']/reduce(lambda x,y: x*y,size['variables'][variable]['value']['shape'])
    #     LOGGER.info('data_mb  = %s Mb' % (data_mb))
    #
    #     if data_mb <= mem_limit:  # input is smaler than the half of free memory size
    #         try:
    #             LOGGER.info('ocgis module call as ops.execute()')
    #             geom_file = ops.execute()
    #         except Exception as e:
    #             LOGGER.debug('failed to execute ocgis operation')
    #             raise
    #             return None
    #
    #     else:
    #         ##########################
    #         # calcultion of chunk size
    #         ##########################
    #         try:
    #             size = ops.get_base_request_size()
    #             nb_time_coordinates_rd = size['variables'][variable]['temporal']['shape'][0]
    #             element_in_kb = size['total']/reduce(lambda x, y: x*y, size['variables'][variable]['value']['shape'])
    #             element_in_mb = element_in_kb / 1024.
    #             tile_dim = sqrt(mem_limit/(element_in_mb*nb_time_coordinates_rd))  # maximum chunk size
    #
    #             LOGGER.info('ocgis module call compute with chunks')
    #             if calc is None:
    #                 calc = '%s=%s*1' % (variable, variable)
    #                 LOGGER.info('calc set to = %s ' % calc)
    #             ops = OcgOperations(dataset=rd,
    #                                 output_format_options=output_format_options,
    #                                 dir_output=dir_output,
    #                                 spatial_wrapping=spatial_wrapping,
    #                                 spatial_reorder=spatial_reorder,
    #                                 # regrid_destination=rd_regrid,
    #                                 # options=options,
    #                                 calc=calc,
    #                                 calc_grouping=calc_grouping,
    #                                 geom=geom,
    #                                 output_format=output_format,
    #                                 prefix=prefix,
    #                                 search_radius_mult=search_radius_mult,
    #                                 select_nearest=select_nearest,
    #                                 select_ugid=select_ugid,
    #                                 add_auxiliary_files=False)
    #             geom_file = compute(ops, tile_dimension=int(tile_dim), verbose=True)
    #             print 'ocgis calculated'
    #         except Exception as e:
    #             LOGGER.debug('failed to compute ocgis with chunks')
    #             raise
    #             return None
    #     LOGGER.info('Succeeded with ocgis module call function')
    # except:
    #     LOGGER.exception('failed to compare dataload with free memory, calling as execute instead')

    ############################################
    # remapping according to regrid informations
    ############################################
    if regrid_destination is not None:
        try:
            from tempfile import mkstemp
            from cdo import Cdo
            cdo = Cdo()
            output = '%s.nc' % uuid.uuid1()
            remap = 'remap%s' % regrid_options
            call = [op for op in dir(cdo) if remap in op]
            cmd = "output = cdo.%s('%s',input='%s', output='%s')" \
                  % (str(call[0]), regrid_destination, geom_file, output)
            exec cmd
        except Exception as e:
            LOGGER.debug('failed to remap')
            raise
            return None
    else:
        output = geom_file

    # try:
    #     from flyingpigeon.utils import unrotate_pole
    #     lat, lon = unrotate_pole(output)
    # except:
    #     LOGGER.exception('failed to unrotate pole')
    return output
예제 #39
0
파일: remap.py 프로젝트: yuanyao0804/RVIC
# -*- coding: utf-8 -*-
'''
remap.py
'''

import os
from cdo import Cdo
cdo = Cdo()
from logging import getLogger
from .log import LOG_NAME

# -------------------------------------------------------------------- #
# create logger
log = getLogger(LOG_NAME)
# -------------------------------------------------------------------- #


# -------------------------------------------------------------------- #
# Remap a file using CDO
def remap(grid_file, in_file, out_file, operator='remapcon',
          remap_options=None):
    '''Remap infile using cdo'''

    log.info('Remapping %s to %s', in_file, out_file)

    remap_method = getattr(cdo, operator)

    if remap_options:
        remap_method(grid_file, input=in_file, output=out_file,
                     options=remap_options)
    else:
예제 #40
0
 def test_returnCdf(self):
     cdo = Cdo(cdfMod=CDF_MOD)
     ofile = tempfile.NamedTemporaryFile(delete=True, prefix='cdoPy').name
     press = cdo.stdatm("0", output=ofile, options="-f nc")
     self.assertEqual(ofile, press)
     variables = cdo.stdatm("0", options="-f nc", returnCdf=True).variables
     print(variables)
     cdf = cdo.stdatm("0", options="-f nc", returnCdf=True)
     press = cdf.variables['P'][:]
     self.assertEqual(1013.25, press.min())
     press = cdo.stdatm("0", output=ofile, options="-f nc")
     self.assertEqual(ofile, press)
     cdo.setReturnArray()
     outfile = 'test_returnCdf.nc'
     cdf = cdo.stdatm("0", output=outfile, options="-f nc")
     press = cdf.variables["P"][:]
     self.assertEqual(1013.25, press.min())
     cdo.unsetReturnArray()
     press = cdo.stdatm("0", output=outfile, options="-f nc")
     self.assertEqual(press, outfile)
     cdf = cdo.stdatm("0", output=outfile, options="-f nc", returnCdf=True)
     press = cdf.variables["P"][:]
     self.assertEqual(1013.25, press.min())
     print("press = " + press.min().__str__())
     cdo.unsetReturnArray()
     press = cdo.stdatm("0", output=ofile, options="-f nc")
     self.assertEqual(ofile, press)
     rm([
         outfile,
     ])
예제 #41
0
def get_anomalies(nc_file, frac=0.2, reference=None, method='ocgis', sseas='serial', variable=None):
    """
    Anomalisation of data subsets for weather classification by subtracting a smoothed annual cycle

    :param nc_file: input netCDF file
    :param frac: Number between 0-1 for strength of smoothing
               (0 = close to the original data, 1 = flat line)
               default = 0.2
    :param reference: Period to calculate annual cycle

    :returns str: path to output netCDF file
    """
    from netCDF4 import Dataset

    if variable is None:
        variable = utils.get_variable(nc_file)
        # if more when 2 variables:
        if (variable.count(variable)==0):
            _ds=Dataset(nc_file)
            # Works only if we have one 3D variables
            for j in variable:
                if len(_ds.variables[j].dimensions)==3: _var=j
            variable=_var
            _ds.close()
    LOGGER.debug('3D Variable selected: %s'%(variable))

    try:
        if (method == 'cdo'):
            from cdo import Cdo
            from os import system

            ip2, nc_anual_cycle = mkstemp(dir='.', suffix='.nc')

            cdo = Cdo()
            #ip, nc_anual_cycle_tmp = mkstemp(dir='.', suffix='.nc')
            # TODO: if reference is none, use utils.get_time for nc_file to set the ref range
            #       But will need to fix 360_day issue (use get_time_nc from analogs)

            # com = 'seldate'
            # comcdo = 'cdo %s,%s-%s-%s,%s-%s-%s %s %s' % (com, reference[0].year, reference[0].month, reference[0].day,
            #                                              reference[1].year, reference[1].month, reference[1].day,
            #                                              nc_file, nc_anual_cycle_tmp)
            # LOGGER.debug('CDO: %s' % (comcdo))
            # system(comcdo)

            # Sub cdo with this trick... Cdo keeps the precision and anomalies are integers...
            calc = '%s=%s'%(variable, variable)
            nc_anual_cycle_tmp = call(nc_file, time_range=reference, variable=variable, calc=calc)
            nc_anual_cycle = cdo.ydaymean(input=nc_anual_cycle_tmp, output=nc_anual_cycle)
        else:
            calc = [{'func': 'mean', 'name': variable}]
            calc_grouping = calc_grouping = ['day', 'month']
            nc_anual_cycle = call(nc_file,
                                  calc=calc,
                                  calc_grouping=calc_grouping,
                                  variable=variable,
                                  time_range=reference)
        LOGGER.info('annual cycle calculated: %s' % (nc_anual_cycle))

    except Exception as e:
        msg = 'failed to calcualte annual cycle %s' % e
        LOGGER.error(msg)
        raise Exception(msg)

    try:
        # spline for smoothing
        #import statsmodels.api as sm
        #from numpy import tile, empty, linspace
        from cdo import Cdo
        cdo = Cdo()
        # variable = utils.get_variable(nc_file)
        ds = Dataset(nc_anual_cycle, mode='a')
        vals = ds.variables[variable]
        vals_sm = empty(vals.shape)
        ts = vals.shape[0]
        x = linspace(1, ts*3, num=ts*3, endpoint=True)

        if ('serial' not in sseas):
            # Multiprocessing =======================

            from multiprocessing import Pool
            pool = Pool()

            valex = [0.]
            valex = valex*vals.shape[1]*vals.shape[2]

            # TODO redo with reshape
            ind = 0
            for lat in range(vals.shape[1]):
                for lon in range(vals.shape[2]):
                    valex[ind] = vals[:, lat, lon]
                    ind += 1

            LOGGER.debug('Start smoothing with multiprocessing')
            # TODO fraction option frac=... is not used here
            tmp_sm = pool.map(_smooth, valex)
            pool.close()
            pool.join()

            # TODO redo with reshape
            ind=0
            for lat in range(vals.shape[1]):
                for lon in range(vals.shape[2]):
                    vals_sm[:, lat, lon] = tmp_sm[ind]
                    ind+=1
        else:
            # Serial ==================================
            vals_sm = empty(vals.shape)
            for lat in range(vals.shape[1]):
                for lon in range(vals.shape[2]):
                    try:
                        y = tile(vals[:, lat, lon], 3)
                        # ys = smooth(y, window_size=91, order=2, deriv=0, rate=1)[ts:ts*2]
                        ys = sm.nonparametric.lowess(y, x, frac=frac)[ts:ts*2, 1]
                        vals_sm[:, lat, lon] = ys
                    except:
                        msg = 'failed for lat %s lon %s' % (lat, lon)
                        LOGGER.exception(msg)
                        raise Exception(msg)
                LOGGER.debug('done for %s - %s ' % (lat, lon))

        vals[:, :, :] = vals_sm[:, :, :]
        ds.close()
        LOGGER.info('smothing of annual cycle done')
    except:
        msg = 'failed smothing of annual cycle'
        LOGGER.exception(msg)
        raise Exception(msg)
    try:
        ip, nc_anomal = mkstemp(dir='.', suffix='.nc')
        try:
            nc_anomal = cdo.sub(input=[nc_file, nc_anual_cycle], output=nc_anomal)
            LOGGER.info('cdo.sub; anomalisation done: %s ' % nc_anomal)
        except:
            # bug cdo: https://code.mpimet.mpg.de/boards/1/topics/3909
            ip3, nc_in1 = mkstemp(dir='.', suffix='.nc')
            ip4, nc_in2 = mkstemp(dir='.', suffix='.nc')
            ip5, nc_out = mkstemp(dir='.', suffix='.nc')
            nc_in1 = cdo.selvar(variable, input=nc_file, output=nc_in1)
            nc_in2 = cdo.selvar(variable, input=nc_anual_cycle, output=nc_in2)
            nc_out = cdo.sub(input=[nc_in1, nc_in2], output=nc_out)
            nc_anomal = nc_out
    except:
        msg = 'failed substraction of annual cycle'
        LOGGER.exception(msg)
        raise Exception(msg)
    return nc_anomal
예제 #42
0
def write_output(wdir, model, file_list, varlist):
    """Write auxiliary variables to new NC files, write new attributes.

    Arguments:
    - wdir: the work directory where the outputs are stored;
    - model: the name of the model;
    - file_list: the list containing the input fields;
    - varlist: a list containing the variables to be written to NC files, i.e.
      tlcl (the temperature at the LCL), t_z (the temperature at the boundary
      layer top), htop (the height of the boundary layer top); their dimensions
      are as (time, lat, lon);

    Author:
    Valerio Lembo, University of Hamburg (2019).
    """
    import fourier_coefficients
    cdo = Cdo()
    fourc = fourier_coefficients

    dataset = Dataset(file_list[0])
    ztlcl = varlist[0]
    t_z = varlist[1]
    htop = varlist[2]
    tlcl_temp = wdir + '/tlcl.nc'
    removeif(tlcl_temp)
    with Dataset(tlcl_temp, 'w', format='NETCDF4') as w_nc_fid:
        w_nc_fid.description = (
            "Monthly mean LCL temperature from {} model. ".format(model),
            "Calculated by Thermodynamics model diagnostics ",
            "in ESMValTool. Author Valerio Lembo, ",
            "Meteorologisches Institut, Universitaet ", "Hamburg.")
        with Dataset(file_list[0]) as dataset:
            fourc.extr_time(dataset, w_nc_fid)
            fourc.extr_lat(dataset, w_nc_fid, 'lat')
            fourc.extr_lon(dataset, w_nc_fid)
        w_nc_var = w_nc_fid.createVariable(
            'tlcl', 'f8', ('time', 'lat', 'lon'))
        w_nc_var.setncatts({
            'long_name':
            "LCL Temperature",
            'units':
            "K",
            'level_desc':
            "surface",
            'var_desc':
            ("LCL temperature from LCL ", "height (Magnus formulas and dry ",
             "adiabatic lapse ratio)"),
            'statistic':
            'monthly mean'
        })
        w_nc_fid.variables['tlcl'][:] = ztlcl
    tabl_temp = wdir + '/tabl.nc'
    removeif(tabl_temp)
    with Dataset(tabl_temp, 'w', format='NETCDF4') as w_nc_fid:
        w_nc_fid.description = (
            "Monthly mean BL top temperature for {} model. ".format(model),
            "Calculated by Thermodynamics model diagnostics ",
            "in ESMValTool. Author Valerio ",
            "Lembo, Meteorologisches Institut, ",
            "Universitaet Hamburg.")
        with Dataset(file_list[0]) as dataset_tabl:
            fourc.extr_time(dataset_tabl, w_nc_fid)
            fourc.extr_lat(dataset_tabl, w_nc_fid, 'lat')
            fourc.extr_lon(dataset_tabl, w_nc_fid)
        w_nc_var = w_nc_fid.createVariable(
            'tabl', 'f8', ('time', 'lat', 'lon'))
        w_nc_var.setncatts({
            'long_name':
            "Temperature at BL top",
            'units':
            "K",
            'level_desc':
            "surface",
            'var_desc':
            ("Temperature at the Boundary Layer ",
             "top, from boundary layer thickness and ", "barometric equation"),
            'statistic':
            'monthly mean'
        })
        w_nc_fid.variables['tabl'][:] = t_z
    htop_temp = wdir + '/htop.nc'
    removeif(htop_temp)
    with Dataset(htop_temp, 'w', format='NETCDF4') as w_nc_fid:
        w_nc_fid.description = (
            "Monthly mean height of the BL top for {} model. ".format(model),
            "Calculated by Thermodynamics model diagnostics ",
            "in ESMValTool. Author Valerio ",
            "Lembo, Meteorologisches Institut, ",
            "Universitaet Hamburg.")
        with Dataset(file_list[0]) as dataset_htop:
            fourc.extr_time(dataset_htop, w_nc_fid)
            fourc.extr_lat(dataset_htop, w_nc_fid, 'lat')
            fourc.extr_lon(dataset_htop, w_nc_fid)
        w_nc_var = w_nc_fid.createVariable(
            'htop', 'f8', ('time', 'lat', 'lon'))
        w_nc_var.setncatts({
            'long_name':
            "Height at BL top",
            'units':
            "m",
            'level_desc':
            "surface",
            'var_desc':
            ("Height at the Boundary Layer top, ",
             "from boundary layer thickness and ", "barometric equation"),
            'statistic':
            'monthly mean'
        })
        w_nc_fid.variables['htop'][:] = htop
    tlcl_file = wdir + '/{}_tlcl.nc'.format(model)
    cdo.setrtomiss('400,1e36', input=tlcl_temp, output=tlcl_file)
    tabl_file = wdir + '/{}_tabl.nc'.format(model)
    cdo.setrtomiss('400,1e36', input=tabl_temp, output=tabl_file)
    htop_file = wdir + '/{}_htop.nc'.format(model)
    cdo.setrtomiss('12000,1e36', input=htop_temp, output=htop_file)
    os.remove(tlcl_temp)
    os.remove(tabl_temp)
    os.remove(htop_temp)
    return htop_file, tabl_file, tlcl_file
예제 #43
0
 def test_combine(self):
     cdo = Cdo(cdfMod=CDF_MOD)
     cdo.debug = DEBUG
     stdatm = cdo.stdatm("0", options="-f nc")
     stdatm_ = cdo.stdatm("0", options="-f nc")
예제 #44
0
 def test_returnMaArray(self):
     cdo = Cdo(cdfMod=CDF_MOD)
     cdo.debug = DEBUG
     topo = cdo.topo(options='-f nc', returnMaArray='topo')
     self.assertEqual(-1890.0, round(topo.mean()))
     bathy = cdo.setrtomiss(0,
                            10000,
                            input=cdo.topo(options='-f nc'),
                            returnMaArray='topo')
     self.assertEqual(-3386.0, round(bathy.mean()))
     oro = cdo.setrtomiss(-10000,
                          0,
                          input=cdo.topo(options='-f nc'),
                          returnMaArray='topo')
     self.assertEqual(1142.0, round(oro.mean()))
     bathy = cdo.remapnn('r2x2',
                         input=cdo.topo(options='-f nc'),
                         returnMaArray='topo')
     self.assertEqual(-4298.0, bathy[0, 0])
     self.assertEqual(-2669.0, bathy[0, 1])
     ta = cdo.remapnn('r2x2', input=cdo.topo(options='-f nc'))
     tb = cdo.subc(-2669.0, input=ta)
     withMask = cdo.div(input=ta + " " + tb, returnMaArray='topo')
     self.assertEqual('--', withMask[0, 1].__str__())
     self.assertEqual(False, withMask.mask[0, 0])
     self.assertEqual(False, withMask.mask[1, 0])
     self.assertEqual(False, withMask.mask[1, 1])
     self.assertEqual(True, withMask.mask[0, 1])
예제 #45
0
def input_data(wdir, file_list):
    """Manipulate input fields and read datasets.

    Arguments:
    - wdir: the working directory path;
    - file_list: the list of file containing ts, hus,
    ps, uas, vas, hfss, te;

    Author:
    Valerio Lembo, University of Hamburg, 2019
    """
    cdo = Cdo()
    ts_file = file_list[0]
    #removeif(ts_miss_file)
    #cdo.setctomiss('0', input=file_list[0], output=ts_miss_file)
    hus_file = file_list[1]
    #removeif(hus_miss_file)
    #cdo.setctomiss('0', input=file_list[1], output=hus_miss_file)
    ps_file = file_list[2]
    #removeif(ps_miss_file)
    #cdo.setctomiss('0', input=file_list[2], output=ps_miss_file)
    #vv_missfile = wdir + '/V.nc'
    #removeif(vv_missfile)
    vv_file = wdir + '/V.nc'
    #removeif(vv_file)
    cdo.sqrt(
        input='-add -sqr {} -sqr {}'.format(file_list[3], file_list[4]),
        options='-b F32',
        output=vv_file)
    #cdo.setctomiss('0', input=vv_file, output=vv_missfile)
    #os.remove(vv_file)
    hfss_file = file_list[5]
    #removeif(hfss_miss_file)
    #cdo.setctomiss('0', input=file_list[5], output=hfss_miss_file)
    te_file = file_list[6]
    #removeif(te_miss_file)
    #cdo.setctomiss('0', input=file_list[6], output=te_miss_file)
    with Dataset(ts_file) as dataset:
        t_s = dataset.variables['ts'][:, :, :]
    with Dataset(hus_file) as dataset:
        hus = dataset.variables['hus'][:, :, :, :]
        lev = dataset.variables['plev'][:]
    with Dataset(ps_file) as dataset:
        p_s = dataset.variables['ps'][:, :, :]
    with Dataset(vv_file) as dataset:
        vv_hor = dataset.variables['uas'][:, :, :]
    with Dataset(hfss_file) as dataset:
        hfss = dataset.variables['hfss'][:, :, :]
    with Dataset(te_file) as dataset:
        t_e = dataset.variables['rlut'][:, :, :]
    huss = hus[:, 0, :, :]
    huss = np.where(lev[0] >= p_s, huss, 0.)
    nlev = len(lev)
    for l_l in range(nlev):
        aux = hus[:, l_l, :, :]
        aux = np.where((p_s >= lev[l_l]), aux, 0.)
        huss = huss + aux
    #remove_files = [
        #ts_file, hus_file, ps_file, vv_file, hfss_file,
        #te_file
    #]
    #for filen in remove_files:
    #    os.remove(filen)
    return hfss, huss, p_s, t_e, t_s, vv_hor
예제 #46
0
파일: cmip5.py 프로젝트: weilin2018/pycmbs
    def get_model_data_generic(self, interval='season', **kwargs):
        """
        unique parameters are:
            filename - file basename
            variable - name of the variable as the short_name in the netcdf file

            kwargs is a dictionary with keys for each model. Then a dictionary with properties follows

        """

        if not self.type in kwargs.keys():
            print ''
            print 'WARNING: it is not possible to get data using generic function, as method missing: ', self.type, kwargs.keys(
            )
            assert False

        locdict = kwargs[self.type]

        # read settings and details from the keyword arguments
        # no defaults; everything should be explicitely specified in either the config file or the dictionaries
        varname = locdict.pop('variable', None)
        #~ print self.type
        #~ print locdict.keys()
        assert varname is not None, 'ERROR: provide varname!'

        units = locdict.pop('unit', None)
        assert units is not None, 'ERROR: provide unit!'

        lat_name = locdict.pop('lat_name', 'lat')
        lon_name = locdict.pop('lon_name', 'lon')
        model_suffix = locdict.pop('model_suffix', None)
        model_prefix = locdict.pop('model_prefix', None)
        file_format = locdict.pop('file_format')
        scf = locdict.pop('scale_factor')
        valid_mask = locdict.pop('valid_mask')
        custom_path = locdict.pop('custom_path', None)
        thelevel = locdict.pop('level', None)

        target_grid = self._actplot_options['targetgrid']
        interpolation = self._actplot_options['interpolation']

        if custom_path is None:
            filename1 = self.get_raw_filename(
                varname,
                **kwargs)  # routine needs to be implemented by each subclass
        else:
            filename1 = custom_path + self.get_raw_filename(varname, **kwargs)

        if filename1 is None:
            print_log(WARNING, 'No valid model input data')
            return None

        force_calc = False

        if self.start_time is None:
            raise ValueError('Start time needs to be specified')
        if self.stop_time is None:
            raise ValueError('Stop time needs to be specified')

        #/// PREPROCESSING ///
        cdo = Cdo()
        s_start_time = str(self.start_time)[0:10]
        s_stop_time = str(self.stop_time)[0:10]

        #1) select timeperiod and generate monthly mean file
        if target_grid == 't63grid':
            gridtok = 'T63'
        else:
            gridtok = 'SPECIAL_GRID'

        file_monthly = filename1[:
                                 -3] + '_' + s_start_time + '_' + s_stop_time + '_' + gridtok + '_monmean.nc'  # target filename
        file_monthly = get_temporary_directory() + os.path.basename(
            file_monthly)

        sys.stdout.write('\n *** Model file monthly: %s\n' % file_monthly)

        if not os.path.exists(filename1):
            print 'WARNING: File not existing: ' + filename1
            return None

        cdo.monmean(options='-f nc',
                    output=file_monthly,
                    input='-' + interpolation + ',' + target_grid +
                    ' -seldate,' + s_start_time + ',' + s_stop_time + ' ' +
                    filename1,
                    force=force_calc)

        sys.stdout.write('\n *** Reading model data... \n')
        sys.stdout.write('     Interval: ' + interval + '\n')

        #2) calculate monthly or seasonal climatology
        if interval == 'monthly':
            mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
            mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
            mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
            mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
            cdo.ymonmean(options='-f nc -b 32',
                         output=mdata_clim_file,
                         input=file_monthly,
                         force=force_calc)
            cdo.ymonsum(options='-f nc -b 32',
                        output=mdata_sum_file,
                        input=file_monthly,
                        force=force_calc)
            cdo.ymonstd(options='-f nc -b 32',
                        output=mdata_clim_std_file,
                        input=file_monthly,
                        force=force_calc)
            cdo.div(options='-f nc',
                    output=mdata_N_file,
                    input=mdata_sum_file + ' ' + mdata_clim_file,
                    force=force_calc)  # number of samples
        elif interval == 'season':
            mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
            mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
            mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
            mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
            cdo.yseasmean(options='-f nc -b 32',
                          output=mdata_clim_file,
                          input=file_monthly,
                          force=force_calc)
            cdo.yseassum(options='-f nc -b 32',
                         output=mdata_sum_file,
                         input=file_monthly,
                         force=force_calc)
            cdo.yseasstd(options='-f nc -b 32',
                         output=mdata_clim_std_file,
                         input=file_monthly,
                         force=force_calc)
            cdo.div(options='-f nc -b 32',
                    output=mdata_N_file,
                    input=mdata_sum_file + ' ' + mdata_clim_file,
                    force=force_calc)  # number of samples
        else:
            raise ValueError(
                'Unknown temporal interval. Can not perform preprocessing!')

        if not os.path.exists(mdata_clim_file):
            return None

        #3) read data
        if interval == 'monthly':
            thetime_cylce = 12
        elif interval == 'season':
            thetime_cylce = 4
        else:
            print interval
            raise ValueError('Unsupported interval!')
        mdata = Data(mdata_clim_file,
                     varname,
                     read=True,
                     label=self._unique_name,
                     unit=units,
                     lat_name=lat_name,
                     lon_name=lon_name,
                     shift_lon=False,
                     scale_factor=scf,
                     level=thelevel,
                     time_cycle=thetime_cylce)
        mdata_std = Data(mdata_clim_std_file,
                         varname,
                         read=True,
                         label=self._unique_name + ' std',
                         unit='-',
                         lat_name=lat_name,
                         lon_name=lon_name,
                         shift_lon=False,
                         level=thelevel,
                         time_cycle=thetime_cylce)
        mdata.std = mdata_std.data.copy()
        del mdata_std
        mdata_N = Data(mdata_N_file,
                       varname,
                       read=True,
                       label=self._unique_name + ' std',
                       unit='-',
                       lat_name=lat_name,
                       lon_name=lon_name,
                       shift_lon=False,
                       scale_factor=scf,
                       level=thelevel)
        mdata.n = mdata_N.data.copy()
        del mdata_N

        # ensure that climatology always starts with January, therefore set date and then sort
        mdata.adjust_time(year=1700,
                          day=15)  # set arbitrary time for climatology
        mdata.timsort()

        #4) read monthly data
        mdata_all = Data(file_monthly,
                         varname,
                         read=True,
                         label=self._unique_name,
                         unit=units,
                         lat_name=lat_name,
                         lon_name=lon_name,
                         shift_lon=False,
                         time_cycle=12,
                         scale_factor=scf,
                         level=thelevel)
        mdata_all.adjust_time(day=15)

        #mask_antarctica masks everything below 60 degrees S.
        #here we only mask Antarctica, if only LAND points shall be used
        if valid_mask == 'land':
            mask_antarctica = True
        elif valid_mask == 'ocean':
            mask_antarctica = False
        else:
            mask_antarctica = False

        if target_grid == 't63grid':
            mdata._apply_mask(
                get_T63_landseamask(False,
                                    area=valid_mask,
                                    mask_antarctica=mask_antarctica))
            mdata_all._apply_mask(
                get_T63_landseamask(False,
                                    area=valid_mask,
                                    mask_antarctica=mask_antarctica))
        else:
            tmpmsk = get_generic_landseamask(False,
                                             area=valid_mask,
                                             target_grid=target_grid,
                                             mask_antarctica=mask_antarctica)
            mdata._apply_mask(tmpmsk)
            mdata_all._apply_mask(tmpmsk)
            del tmpmsk

        mdata_mean = mdata_all.fldmean()

        mdata._raw_filename = filename1
        mdata._monthly_filename = file_monthly
        mdata._clim_filename = mdata_clim_file
        mdata._varname = varname

        # return data as a tuple list
        retval = (mdata_all.time, mdata_mean, mdata_all)

        del mdata_all
        return mdata, retval
예제 #47
0
def init_mkthe_direntr(model, wdir, filedict, te_file, flags):
    """Compute the MEP with the direct method.

    Arguments:
    ---------
    model: the model name;
    wdir: the working directory where the outputs are stored;
    filedict: a dictionary of file names containing the input fields;
    te_file: a file containing the emission temperature computed from OLR;
    flags: (wat: a flag for the water mass budget module (y or n),
            entr: a flag for the material entropy production (y or n);
            met: a flag for the material entropy production method
            (1: indirect, 2, direct, 3: both);
            evap: a flag for the evaporation flux);

    Returns:
    -------
    A list of files containing the components of the MEP with the direct
    method.
    """
    cdo = Cdo()
    met = flags[3]
    if met in {'2', '3'}:
        evspsbl_file, prr_file = wfluxes(model, wdir, filedict, flags)
        hfss_file = filedict['/hfss_']
        hus_file = filedict['/hus_']
        ps_file = filedict['/ps_']
        ts_file = filedict['/ts_']
        uas_file = filedict['/uas_']
        vas_file = filedict['/vas_']
        uasmn_file = wdir + '/{}_uas_mm.nc'.format(model)
        uasmn_file = mon_from_day(wdir, model, 'uas', uas_file)
        uas_file = uasmn_file
        vasmn_file = wdir + '/{}_uas_mm.nc'.format(model)
        vasmn_file = mon_from_day(wdir, model, 'vas', vas_file)
        vas_file = vasmn_file
        mk_list = [
            ts_file, hus_file, ps_file, uas_file, vas_file, hfss_file, te_file
        ]
        htop_file, tabl_file, tlcl_file = mkthe_main(wdir, mk_list, model)
        # Working temperatures for the hydrological cycle
        tcloud_file = (wdir + '/{}_tcloud.nc'.format(model))
        removeif(tcloud_file)
        cdo.mulc('0.5',
                 input='-add {} {}'.format(tlcl_file, te_file),
                 options='-b F32',
                 output=tcloud_file)
        tcolumn_file = (wdir + '/{}_t_vertav_pot.nc'.format(model))
        removeif(tcolumn_file)
        cdo.mulc('0.5',
                 input='-add {} {}'.format(ts_file, tcloud_file),
                 options='-b F32',
                 output=tcolumn_file)
        # Working temperatures for the kin. en. diss. (updated)
        tasvert_file = (wdir + '/{}_tboundlay.nc'.format(model))
        removeif(tasvert_file)
        cdo.fldmean(input='-mulc,0.5 -add {} {}'.format(ts_file, tabl_file),
                    options='-b F32',
                    output=tasvert_file)
        aux_files = [
            evspsbl_file, htop_file, prr_file, tabl_file, tasvert_file,
            tcloud_file, tcolumn_file, tlcl_file
        ]
    else:
        aux_files = []
    return aux_files
예제 #48
0
levels = 0
positive = "up"
long_name = "GEOS-Chem levels"
axis = "Z"
""")

# # Initialize the CDO Library and Prepare Command
#
# * Initialization is easy
# * You may get a warning
#

# In[ ]:

from cdo import Cdo
cdo = Cdo()
# cdo.CDO = '/work/ROMO/anaconda3/envs/geo/bin/cdo

# # Now Configure the CDO Command
#
# cmd has 5 parts (run and described in bottom-to-top order)
#
# * Set the vertical axis,
# * rename TSTEP axis to time, remove TFLAG, set the time
# * convert from tons/year to tons/s, tons/m2/s, then kg/m2/s
# * Set units to mass fluxes
# * Set grid to LCC and Regrid to the lon/lat grid
#

# In[ ]:
예제 #49
0
def method_A(resource=[],
             start=None,
             end=None,
             timeslice=20,
             variable=None,
             title=None,
             cmap='seismic'):
    """returns the result

    :param resource: list of paths to netCDF files
    :param start: beginning of reference period (if None (default),
                  the first year of the consistent ensemble will be detected)
    :param end: end of comparison period (if None (default), the last year of the consistent ensemble will be detected)
    :param timeslice: period length for mean calculation of reference and comparison period
    :param variable: variable name to be detected in the netCDF file. If not set (not recommended),
                     the variable name will be detected
    :param title: str to be used as title for the signal mal
    :param cmap: define the color scheme for signal map plotting

    :return: signal.nc, low_agreement_mask.nc, high_agreement_mask.nc, graphic.png, text.txt
    """
    from os.path import split
    from cdo import Cdo
    cdo = Cdo()
    cdo.forceOutput = True

    try:
        # preparing the resource
        file_dic = sort_by_filename(resource, historical_concatination=True)
        logger.info('file names sorted experimets: %s' % len(file_dic.keys()))
    except Exception as e:
        msg = 'failed to sort the input files'
        logger.exception(msg)
        raise Exception(msg)

    try:
        mergefiles = []
        for key in file_dic.keys():

            if type(file_dic[key]) == list and len(file_dic[key]) > 1:
                input = []
                for i in file_dic[key]:
                    input.extend([i.replace(' ', '\\\ ')])
                    mergefiles.append(
                        cdo.mergetime(input=input,
                                      output=key + '_mergetime.nc'))
            else:
                mergefiles.extend(file_dic[key])
        logger.info('datasets merged %s ' % mergefiles)
    except Exception as e:
        msg = 'seltime and mergetime failed %s' % e
        logger.exception(msg)
        raise Exception(e)

    try:
        text_src = open('infiles.txt', 'a')
        for key in file_dic.keys():
            text_src.write(key + '\n')
        text_src.close()
    except Exception as e:
        msg = 'failed to write source textfile'
        logger.exception(msg)
        raise Exception(msg)

# configure reference and compare period
    try:
        if start is None:
            st_set = set()
            en_set = set()
            for f in mergefiles:
                times = get_time(f)
                st_set.update([times[0].year])
        if end is None:
            en_set.update([times[-1].year])
            start = max(st_set)
        if end is None:
            end = min(en_set)
        logger.info('Start and End: %s - %s ' % (start, end))
        if start >= end:
            logger.error(
                'ensemble is inconsistent!!! start year is later than end year'
            )
    except Exception as e:
        msg = 'failed to detect start and end times of the ensemble'
        logger.exception(msg)
        raise Exception(msg)

# set the periodes:
    try:
        start = int(start)
        end = int(end)
        if timeslice is None:
            timeslice = int((end - start) / 3)
            if timeslice == 0:
                timeslice = 1
        else:
            timeslice = int(timeslice)
        start1 = start
        start2 = start1 + timeslice - 1
        end1 = end - timeslice + 1
        end2 = end
        logger.info('timeslice and periodes set')
    except Exception as e:
        msg = 'failed to set the periodes'
        logger.exception(msg)
        raise Exception(msg)

    try:
        files = []
        for i, mf in enumerate(mergefiles):
            files.append(
                cdo.selyear('{0}/{1}'.format(start1, end2),
                            input=[mf.replace(' ', '\ ')],
                            output='file_{0}_.nc'.format(i)))  # python version
        logger.info('timeseries selected from defined start to end year')
    except Exception as e:
        msg = 'seltime and mergetime failed'
        logger.exception(msg)
        raise Exception(msg)

    try:
        # ensemble mean
        nc_ensmean = cdo.ensmean(input=files, output='nc_ensmean.nc')
        logger.info('ensemble mean calculation done')
    except Exception as e:
        msg = 'ensemble mean failed'
        logger.exception(msg)
        raise Exception(msg)

    try:
        # ensemble std
        nc_ensstd = cdo.ensstd(input=files, output='nc_ensstd.nc')
        logger.info('ensemble std and calculation done')
    except Exception as e:
        msg = 'ensemble std or failed'
        logger.exception(msg)
        raise Exception(msg)

#  get the get the signal as difference from the beginning (first years) and end period (last years), :
    try:
        selyearstart = cdo.selyear('%s/%s' % (start1, start2),
                                   input=nc_ensmean,
                                   output='selyearstart.nc')
        selyearend = cdo.selyear('%s/%s' % (end1, end2),
                                 input=nc_ensmean,
                                 output='selyearend.nc')
        meanyearst = cdo.timmean(input=selyearstart, output='meanyearst.nc')
        meanyearend = cdo.timmean(input=selyearend, output='meanyearend.nc')
        signal = cdo.sub(input=[meanyearend, meanyearst], output='signal.nc')
        logger.info('Signal calculation done')
    except Exception as e:
        msg = 'calculation of signal failed'
        logger.exception(msg)
        raise Exception(msg)

    # get the intermodel standard deviation (mean over whole period)
    try:
        # std_selyear = cdo.selyear('%s/%s' % (end1,end2), input=nc_ensstd, output='std_selyear.nc')
        # std = cdo.timmean(input = std_selyear, output = 'std.nc')

        std = cdo.timmean(input=nc_ensstd, output='std.nc')
        std2 = cdo.mulc('2', input=std, output='std2.nc')
        logger.info('calculation of internal model std for time period done')
    except Exception as e:
        msg = 'calculation of internal model std failed'
        logger.exception(msg)
        raise Exception(msg)
    try:
        absolut = cdo.abs(input=signal, output='absolut_signal.nc')
        high_agreement_mask = cdo.gt(
            input=[absolut, std2],
            output='large_change_with_high_model_agreement.nc')
        low_agreement_mask = cdo.lt(
            input=[absolut, std],
            output='small_signal_or_low_agreement_of_models.nc')
        logger.info('high and low mask done')
    except Exception as e:
        msg = 'calculation of robustness mask failed'
        logger.exception(msg)
        raise Exception(msg)

    try:
        if variable is None:
            variable = get_variable(signal)
        logger.info('variable to be plotted: %s' % variable)

        if title is None:
            title = 'Change of %s (difference of mean %s-%s to %s-%s)' % (
                variable, end1, end2, start1, start2)
        graphic = None
        graphic = map_ensembleRobustness(signal,
                                         high_agreement_mask,
                                         low_agreement_mask,
                                         variable=variable,
                                         cmap=cmap,
                                         title=title)

        logger.info('graphic generated')
    except Exception as e:
        msg('graphic generation failed: %s' % e)
        logger.debug(msg)
        raise Exception(msg)

    return signal, low_agreement_mask, high_agreement_mask, graphic, text_src  #
예제 #50
0
def remove_mean_trend(fana, varname):
    """
    Removing the smooth trend from 3D netcdf file
    """

    if type(fana) == list:
        fana = fana[0]

    backup_ana = 'orig_mod_' + path.basename(fana)

    cdo = Cdo(env=environ)

    # create backup of input file
    # Again, an issue with cdo versioning.
    # TODO: Fix CDO versioning workaround...

    try:
        cdo_cp = getattr(cdo, 'copy')
        cdo_cp(input=fana, output=backup_ana)
    except:
        if (path.isfile(backup_ana) == False):
            com = 'copy'
            comcdo = 'cdo -O %s %s %s' % (com, fana, backup_ana)
            system(comcdo)
        else:
            backup_ana = 'None'

    # create fmana - mean field
    fmana = '%s.nc' % uuid.uuid1()

    cdo_op = getattr(cdo, 'fldmean')
    cdo_op(input=fana, output=fmana)

    mean_arc_dataset = Dataset(fmana)
    mean_arcvar = mean_arc_dataset.variables[varname][:]
    data = mean_arcvar[:, 0, 0]
    mean_arc_dataset.close()
    x = np.linspace(0, len(data) - 1, len(data))
    y = data

    # Very slow method.
    # TODO: sub by fast one
    # (there is one in R, but doesn't want to add R to analogs...)
    spl = UnivariateSpline(x, y)

    smf = (len(y)) * np.var(y)
    spl.set_smoothing_factor(smf)
    trend = np.zeros(len(y), dtype=np.float)
    trend[:] = spl(x)

    #    orig_arc_dataset = Dataset(fana,'r+')
    orig_arc_dataset = Dataset(fana, 'a')
    orig_arcvar = orig_arc_dataset.variables.pop(varname)
    orig_data = orig_arcvar[:]

    det = np.zeros(np.shape(orig_data), dtype=np.float)
    det = (orig_data.T - trend).T

    orig_arcvar[:] = det

    at = {k: orig_arcvar.getncattr(k) for k in orig_arcvar.ncattrs()}
    maxat = np.max(det)
    minat = np.min(det)
    act = np.zeros((2), dtype=np.float32)
    valid = np.zeros((2), dtype=np.float32)
    act[0] = minat
    act[1] = maxat
    valid[0] = minat - abs(0.2 * minat)
    valid[1] = maxat + abs(0.2 * maxat)
    act_attr = {}
    val_attr = {}

    act_attr['actual_range'] = act
    val_attr['valid_range'] = valid
    orig_arcvar.setncatts(act_attr)
    orig_arcvar.setncatts(val_attr)

    orig_arc_dataset.close()

    return backup_ana
예제 #51
0
 def test_listAllOperators(self):
     cdo = Cdo(cdfMod=CDF_MOD)
     operators = cdo.operators
     operators.sort()
예제 #52
0
            .format(mips[exper], exper, key))
        modelss = df_experkey.source_id.unique()
        modelss = modelss[modelss != 'NorCPM1']
        modelss = modelss[modelss != 'MCM-UA-1-0']
        for model in modelss:
            print(model)
            os.system("mkdir -p {}/AImelt/CMIP_6/{}/{}/{}".format(
                scratchdir, exper, key, model))
            dltab = df_experkey.query("source_id=='{}'".format(model))
            for ens in range(len(dltab.zstore.values)):
                zstore = dltab.zstore.values[ens]
                mapper = gcs.get_mapper(zstore)
                ds = xr.open_zarr(mapper, consolidated=True)
                ds.to_netcdf("/glade/scratch/raymonds/temp/cdoinput.nc")
                ds = Cdo().seasavg(
                    input="/glade/scratch/raymonds/temp/cdoinput.nc",
                    returnXDataset=True)
                ds = ds.sel(time=ds["time.season"] == "JJA")
                attrs = ds.attrs

                if key == 'zg':
                    plev = np.abs(ds.plev.values - 50000.0)
                    p = np.argmin(plev)
                    ds = ds[key]
                    ds = ds[:, p, :, :]
                    ds = ds.to_dataset()
                    ds.attrs = attrs
                    ds.encoding["unlimited_dims"] = "time"
                else:
                    ds = ds[key]
                    ds = ds.to_dataset()
예제 #53
0
 def test_chain(self):
     cdo = Cdo(cdfMod=CDF_MOD)
     ofile = cdo.setname("veloc",
                         input=" -copy -random,r1x1",
                         options="-f nc")
     self.assertEqual(["veloc"], cdo.showname(input=ofile))
예제 #54
0
gridded data.
"""
from netCDF4 import Dataset, num2date

from cdo import Cdo
from nco import Nco
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import sys
import metpy.calc as mpcalc
from metpy.interpolate import cross_section

cdo = Cdo()
cdo.timmean(input='PFile.nc', output='AFile.nc')
nco = Nco()
nco.ncwa(input='AFile.nc', output='BFile.nc', options=['-a', 'bnds'])
nco.ncks(input='BFile.nc',
         output='CFile.nc',
         options=['-C', '-O', '-x', '-v', 'time_bnds'])

cdo.timmean(input='rhumFile.nc', output='dFile.nc')
nco.ncwa(input='dFile.nc', output='eFile.nc', options=['-a', 'bnds'])
nco.ncks(input='eFile.nc',
         output='fFile.nc',
         options=['-C', '-O', '-x', '-v', 'time_bnds'])
fFile = Dataset('fFile.nc', 'r')

rhum = fFile.variables['rhum'][:]
예제 #55
0
    def test_forceOutput(self):
        cdo = Cdo(cdfMod=CDF_MOD)
        cdo.debug = DEBUG
        outs = []
        # tempfiles
        outs.append(cdo.stdatm("0,10,20"))
        outs.append(cdo.stdatm("0,10,20"))
        self.assertNotEqual(outs[0], outs[1])
        outs = []

        # deticated output, force = true (=defaut setting)
        ofile = 'test_force'
        outs.append(cdo.stdatm("0,10,20", output=ofile))
        mtime0 = os.stat(ofile).st_mtime
        #to make it compatible with systems providing no nanos.
        import time
        time.sleep(1)
        outs.append(cdo.stdatm("0,10,20", output=ofile))
        mtime1 = os.stat(ofile).st_mtime
        self.assertNotEqual(mtime0, mtime1)
        self.assertEqual(outs[0], outs[1])
        os.remove(ofile)
        outs = []

        # dedicated output, force = false
        ofile = 'test_force_false'
        outs.append(cdo.stdatm("0,10,20", output=ofile, force=False))
        mtime0 = os.stat(outs[0]).st_mtime
        outs.append(cdo.stdatm("0,10,20", output=ofile, force=False))
        mtime1 = os.stat(outs[1]).st_mtime
        self.assertEqual(mtime0, mtime1)
        self.assertEqual(outs[0], outs[1])
        os.remove(ofile)
        outs = []

        # dedicated output, global force setting
        ofile = 'test_force_global'
        cdo.forceOutput = False
        outs.append(cdo.stdatm("0,10,20", output=ofile))
        mtime0 = os.stat(outs[0]).st_mtime
        outs.append(cdo.stdatm("0,10,20", output=ofile))
        mtime1 = os.stat(outs[1]).st_mtime
        self.assertEqual(mtime0, mtime1)
        self.assertEqual(outs[0], outs[1])
        os.remove(ofile)
        outs = []
예제 #56
0
def get_generic_landseamask(shift_lon,
                            mask_antarctica=True,
                            area='land',
                            interpolation_method='remapnn',
                            target_grid='t63grid',
                            force=False):
    """
    get generic land/sea mask. The routine uses the CDO command 'topo'
    to generate a 0.5 degree land/sea mask and remaps this
    using nearest neighbor
    to the target grid

    NOTE: using inconsistent land/sea masks between datasets can
    result in considerable biases. Note also that
    the application of l/s mask is dependent on the spatial resolution

    This routine implements a VERY simple approach, but assuming
    that all areas >0 m height are land and the rest is ocean.

    Parameters
    ----------
    shift_lon : bool
        specifies if longitudes shall be shifted
    interpolation_method : str
        specifies the interpolation method
        that shall be used for remapping the 0.5degree data
        to the target grid. This can be any of ['remapnn','remapcon',
        'remapbil']
    target_grid : str
        specifies target grid to interpolate to as
        similar to CDO remap functions. This can be either a string or
        a filename which includes valid geometry information
    force : bool
        force calculation (removes previous file) = slower

    area : str
        ['land','ocean']. When 'land', then the mask returned
        is True on land pixels, for ocean it is vice versa.
        in any other case, you get a valid field everywhere
        (globally)

    mask_antarctica : bool
        mask antarctica; if True, then the mask is
        FALSE over Antarctice (<60S)

    Returns
    -------
    returns a Data object
    """

    print('WARNING: Automatic generation of land/sea mask. \
            Ensure that this is what you want!')

    cdo = Cdo()

    #/// construct output filename.
    #If a filename was given for the grid, replace path separators ///
    target_grid1 = target_grid.replace(os.sep, '_')
    outputfile = get_temporary_directory() + 'land_sea_fractions_' \
        + interpolation_method + '_' + target_grid1 + '.nc'

    print 'outfile: ', outputfile
    print 'cmd: ', '-remapnn,' + target_grid + ' -topo'

    #/// interpolate data to grid using CDO ///
    cdo.monmean(options='-f nc',
                output=outputfile,
                input='-remapnn,' + target_grid + ' -topo',
                force=force)

    #/// generate L/S mask from topography (land = height > 0.
    ls_mask = Data(outputfile,
                   'topo',
                   read=True,
                   label='generic land-sea mask',
                   lat_name='lat',
                   lon_name='lon',
                   shift_lon=shift_lon)
    print('Land/sea mask can be found on file: %s' % outputfile)

    if area == 'land':
        msk = ls_mask.data > 0.  # gives land
    elif area == 'ocean':
        msk = ls_mask.data <= 0.
    else:
        msk = np.ones(ls_mask.data.shape).astype('bool')
    ls_mask.data[~msk] = 0.
    ls_mask.data[msk] = 1.
    ls_mask.data = ls_mask.data.astype('bool')

    #/// mask Antarctica if desired ///
    if mask_antarctica:
        ls_mask.data[ls_mask.lat < -60.] = False

    # ensure that also the mask attribute is set properly
    ls_mask._apply_mask(~msk)

    return ls_mask
예제 #57
0
 def test_cdf_mod(self):
     cdo = Cdo(cdfMod=CDF_MOD)
     cdo.setReturnArray()
     print('cdo.cdfMod:' + cdo.cdfMod)
     self.assertEqual(cdo.cdfMod, CDF_MOD)
예제 #58
0
from utils import get_variable, get_values, get_time
import config
import os
from os.path import join, basename
from shutil import copyfile

import logging
LOGGER = logging.getLogger("PYWPS")

from cdo import Cdo
cdo_version = Cdo().version()
cdo = Cdo(env=os.environ)

import pandas


def select_run_tokeep(f_xhist, f_xrcp, f_yhist, f_yrcp):
    run_xrcp = [basename(f).split("_")[4] for f in f_xrcp]
    run_xhist = [basename(f).split("_")[4] for f in f_xhist]
    run_yrcp = [basename(f).split("_")[4] for f in f_yrcp]
    run_yhist = [basename(f).split("_")[4] for f in f_yhist]
    rset_xrcp = set(run_xrcp)
    rset_xhist = set(run_xhist)
    rset_yrcp = set(run_yrcp)
    rset_yhist = set(run_yhist)
    rset_all = set(run_xhist + run_xrcp + run_yhist + run_yrcp)
    rset_others = [rset_xhist, rset_yrcp, rset_yhist]
    run_tokeep = rset_xrcp.intersection(*rset_others)
    return run_tokeep

예제 #59
0
 def test_output_set_to_none(self):
     cdo = Cdo(cdfMod=CDF_MOD)
     self.assertEqual(str, type(cdo.topo(output=None)))
     self.assertEqual(
         "GRIB",
         cdo.sinfov(input="-topo", output=None)[0].split(' ')[-1])
예제 #60
0
 def test_cdo_general(self):
     # test if cdos work in general
     cdo = Cdo()