Esempio n. 1
0
    def __init__(self, rf, rows=None, cols=None):
        self.rffile = OpenRecordFile(rf)
        self.id_size = struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows is None and cols is None:
            rows = self.cell_count
            cols = 1
        elif rows is None:
            rows = self.cell_count / cols
        elif cols is None:
            cols = self.cell_count / rows
        else:
            if cols * rows != self.cell_count:
                raise ValueError(
                    ("The product of cols (%d) and rows (%d) " +
                     "must equal cells (%d)") % (cols, rows, self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)
        self.createDimension('SURF', 1)
        self.variables = PseudoNetCDFVariables(self.__var_get,
                                               ['AIRTEMP', 'SURFTEMP'])
Esempio n. 2
0
    def __init__(self, rf, chemparam=None):
        """
        Initialization included reading the header and learning
        about the format.

        see __readheader and __gettimestep() for more info
        """

        self.rffile = OpenRecordFile(rf)
        if chemparam is None:
            self._aerosol_names = None
        else:
            self._aerosol_names = get_chemparam_names(chemparam)

        self.padded_time_hdr_size = struct.calcsize(self.time_hdr_fmt + "ii")
        self.__readheader()
        self.__gettimestep()
        self.dimensions = {}
        self.createDimension('LAY', self.nlayers)
        self.createDimension('COL', self.nx)
        self.createDimension('ROW', self.ny)
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('DATE-TIME', 2)

        self.variables = PseudoNetCDFVariables(
            self.__var_get, [sn.strip() for sn in self.spcnames])
Esempio n. 3
0
    def __init__(self, rf, rows, cols):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        self.rffile = OpenRecordFile(rf)

        self.id_size = struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows == None and cols == None:
            rows = self.cell_count
            cols = 1
        elif rows == None:
            rows = self.cell_count / cols
        elif cols == None:
            cols = self.cell_count / rows
        else:
            if cols * rows != self.cell_count:
                raise ValueError(
                    "The product of cols (%d) and rows (%d) must equal cells (%d)"
                    % (cols, rows, self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('ROW', rows)
        self.createDimension('COL', cols)
        self.createDimension('LAY', self.nlayers)
        self.variables = PseudoNetCDFVariables(self.__var_get,
                                               ['HGHT', 'PRES'])
Esempio n. 4
0
    def __init__(self, rf, multi=False):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        self.__rffile = OpenRecordFile(rf)
        self.__readheader()
        irr_record_type = dtype(
            dict(names=(
                ['SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K'] +
                ['RXN_%02d' % i for i in range(1, self.nrxns + 1)] + ['EPAD']),
                 formats=(['>i', '>i', '>f', '>i', '>i', '>i', '>i', '>i'] +
                          ['>f' for i in range(1, self.nrxns + 1)] + ['>i'])))

        varkeys = [i for i in irr_record_type.names[8:-1]] + ['TFLAG']
        self.groups = defaultdict(PseudoNetCDFFile)
        padatatype = []
        pavarkeys = []
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('VAR', len(varkeys) - 1)
        self.createDimension('DATE-TIME', 2)
        for di, domain in enumerate(self._padomains):
            dk = 'PA%02d' % (di + 1)
            prefix = dk + '_'
            pavarkeys.extend([prefix + k for k in varkeys])
            grp = self.groups[dk]
            for propk, propv in domain.items():
                setattr(grp, propk, propv)
            grp.createDimension('TSTEP', self.time_step_count)
            grp.createDimension('VAR', len(varkeys) - 1)
            grp.createDimension('DATE-TIME', 2)
            grp.createDimension('COL', domain['iend'] - domain['istart'] + 1)
            grp.createDimension('ROW', domain['jend'] - domain['jstart'] + 1)
            grp.createDimension('LAY', domain['tlay'] - domain['blay'] + 1)
            padatatype.append(
                (dk, irr_record_type, (len(grp.dimensions['ROW']),
                                       len(grp.dimensions['COL']),
                                       len(grp.dimensions['LAY']))))
            if len(self._padomains) == 1:
                self.createDimension('COL',
                                     domain['iend'] - domain['istart'] + 1)
                self.createDimension('ROW',
                                     domain['jend'] - domain['jstart'] + 1)
                self.createDimension('LAY',
                                     domain['tlay'] - domain['blay'] + 1)
                for propk, propv in domain.items():
                    setattr(grp, propk, propv)
            exec(
                """def varget(k):
                return self._irr__variables('%s', k)""" % dk, dict(self=self),
                locals())
            if len(self._padomains) == 1:
                self.variables = PseudoNetCDFVariables(varget, varkeys)
            else:
                grp.variables = PseudoNetCDFVariables(varget, varkeys)
        self.__memmaps = memmap(self.__rffile.infile.name, dtype(padatatype),
                                'r', self.data_start_byte)
Esempio n. 5
0
def _isMine(path):
    testf = OpenRecordFile(path)
    if testf.record_size == 80:
        testf.next()
        if testf.record_size == 16:
            testf.next()
            if testf.record_size == 28:
                return True
    return False
Esempio n. 6
0
    def __init__(self, rf, rows, cols, mode='r'):
        self.__mode = mode
        self._rffile = OpenRecordFile(rf)
        self._rffile.infile.seek(0, 2)
        self.__rf = rf
        rflen = self._rffile.infile.tell()
        self._rffile._newrecord(0)

        self.createDimension('ROW', rows)
        self.createDimension('COL', cols)
        first_line, = self._rffile.read('8s')
        if first_line == 'LUCAT11 ':
            self.createDimension('LANDUSE', 11)
            self._newstyle = True
        elif first_line == 'LUCAT26 ':
            self.createDimension('LANDUSE', 26)
            self._newstyle = True
        else:
            self.createDimension('LANDUSE', 11)
            self._newstyle = False
        nland = len(self.dimensions['LANDUSE'])
        nrows = len(self.dimensions['ROW'])
        ncols = len(self.dimensions['COL'])
        if self._newstyle:
            self.__fland_dtype = dtype(
                dict(names=['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'],
                     formats=[
                         '>i', '8>S', '>i', '>i',
                         '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i'
                     ]))
            self.__other_dtype = dtype(
                dict(names=['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'],
                     formats=[
                         '>i', '8>S', '>i', '>i',
                         '(%d, %d)>f' % (nrows, ncols), '>i'
                     ]))
        else:
            self.__fland_dtype = dtype(
                dict(names=['SPAD2', 'DATA', 'EPAD2'],
                     formats=[
                         '>i',
                         '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i'
                     ]))
            self.__other_dtype = dtype(
                dict(names=['SPAD2', 'DATA', 'EPAD2'],
                     formats=['>i', '(%d, %d)>f' % (nrows, ncols), '>i']))

        self.__addvars()
        if self._newstyle:
            self.__keys = [first_line]

        else:
            self.__keys = ['LUCAT11']
Esempio n. 7
0
    def __init__(self,rf,rows=None,cols=None):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        
        self.rffile=OpenRecordFile(rf)
        
        self.id_size=struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows==None and cols==None:
            rows=self.cell_count
            cols=1
        elif rows==None:
            rows=self.cell_count/cols
        elif cols==None:
            cols=self.cell_count/rows
        else:
            if cols*rows!=self.cell_count:
                raise ValueError("The product of cols (%d) and rows (%d) must equal cells (%d)" %  (cols,rows,self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)

        self.variables=PseudoNetCDFVariables(self.__var_get,[self.var_name])
Esempio n. 8
0
 def __init__(self, rf):
     """
     Initialization included reading the header and learning
     about the format.
     
     see __readheader and __gettimestep() for more info
     """
     self.rffile = OpenRecordFile(rf)
     self.padded_time_hdr_size = struct.calcsize(self.time_hdr_fmt + "ii")
     self.__readheader()
     self.__gettimestep()
     self.__gettimeprops()
     self.createDimension('TSTEP', self.time_step_count)
     self.createDimension('STK', self.nstk)
     varkeys = [
         'XSTK', 'YSTK', 'HSTK', 'DSTK', 'TSTK', 'VSTK', 'KCELL', 'FLOW',
         'PLMHT'
     ] + [i.strip() for i in self.spcnames]
     self.variables = PseudoNetCDFVariables(self.__var_get, varkeys)
Esempio n. 9
0
    def __init__(self, rf):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """

        self.rffile = OpenRecordFile(rf)

        self.padded_time_hdr_size = struct.calcsize(self.time_hdr_fmt + "ii")
        self.__readheader()
        self.__gettimestep()
        self.dimensions = {}
        self.createDimension('LAY', self.nlayers)
        self.createDimension('COL', self.nx)
        self.createDimension('ROW', self.ny)
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('DATE-TIME', 2)

        self.variables = PseudoNetCDFVariables(self.__var_get,
                                               map(str.strip, self.spcnames))
Esempio n. 10
0
 def __init__(self,rf,multi=False):
     """
     Initialization included reading the header and learning
     about the format.
     
     see __readheader and __gettimestep() for more info
     """
     self.__rffile=OpenRecordFile(rf)
     self.__readheader()
     irr_record_type=dtype(
                 dict(names=(['SPAD','DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K']+
                             [ 'RXN_%02d' % i for i in range(1,self.nrxns+1)]+
                             ['EPAD']),
                      formats=(['>i', '>i', '>f', '>i', '>i', '>i', '>i', '>i']+ 
                              [ '>f' for i in range(1,self.nrxns+1)]+
                              ['>i'])))
 
     varkeys=[i for i in irr_record_type.names[8:-1]]+['TFLAG']
     self.groups = defaultdict(PseudoNetCDFFile)
     padatatype = []
     pavarkeys = []
     self.createDimension('TSTEP', self.time_step_count)
     self.createDimension('VAR',  len(varkeys)-1)
     self.createDimension('DATE-TIME', 2)
     for di, domain in enumerate(self._padomains):
         dk ='PA%02d' % (di + 1)
         prefix = dk + '_'
         pavarkeys.extend([prefix + k for k in varkeys])
         grp = self.groups[dk]
         for propk, propv in domain.items():
             setattr(grp, propk, propv)
         grp.createDimension('TSTEP', self.time_step_count)
         grp.createDimension('VAR',  len(varkeys)-1)
         grp.createDimension('DATE-TIME', 2)
         grp.createDimension('COL', domain['iend']-domain['istart']+1)
         grp.createDimension('ROW', domain['jend']-domain['jstart']+1)
         grp.createDimension('LAY', domain['tlay']-domain['blay']+1)
         padatatype.append((dk, irr_record_type, (len(grp.dimensions['ROW']), len(grp.dimensions['COL']), len(grp.dimensions['LAY']))))
         if len(self._padomains) == 1:
             self.createDimension('COL', domain['iend']-domain['istart']+1)
             self.createDimension('ROW', domain['jend']-domain['jstart']+1)
             self.createDimension('LAY', domain['tlay']-domain['blay']+1)
             for propk, propv in domain.items():
                 setattr(grp, propk, propv)
         exec("""def varget(k):
             return self._irr__variables('%s', k)""" % dk, dict(self = self), locals())
         if len(self._padomains) == 1:
             self.variables = PseudoNetCDFVariables(varget,varkeys)
         else:
             grp.variables = PseudoNetCDFVariables(varget,varkeys)
     self.__memmaps = memmap(self.__rffile.infile.name, dtype(padatatype), 'r', self.data_start_byte)
Esempio n. 11
0
def _isMine(path):
    testf = OpenRecordFile(path)
    if testf.record_size == 80:
        testf.next()
        if testf.record_size == 16:
            testf.next()
            if testf.record_size == 28:
                return True
    return False
Esempio n. 12
0
 def __init__(self,rf):
     """
     Initialization included reading the header and learning
     about the format.
     
     see __readheader and __gettimestep() for more info
     """
     self.rffile=OpenRecordFile(rf)
     self.padded_time_hdr_size=struct.calcsize(self.time_hdr_fmt+"ii")
     self.__readheader()
     self.__gettimestep()
     self.__gettimeprops()
     self.createDimension('TSTEP',self.time_step_count)
     self.createDimension('STK',self.nstk)
     varkeys=['XSTK','YSTK','HSTK','DSTK','TSTK','VSTK','KCELL','FLOW','PLMHT']+[i.strip() for i in self.spcnames]
     self.variables=PseudoNetCDFVariables(self.__var_get,varkeys)
Esempio n. 13
0
    def __init__(self, rffile, rows, cols):
        rf = OpenRecordFile(rffile)
        self.__time_hdr_fmts = {12: "fii", 8: "fi"}[rf.record_size]
        self.__time_hdr_fmts_size = rf.record_size
        self.STIME, self.SDATE = rf.unpack("fi")

        rf.next()
        lays = 1
        record_size = rf.record_size
        while rf.record_size == record_size:
            lays += 1
            rf.next()
        self.__dummy_length = (rf.record_size + 8) / 4
        lays //= 2
        record = rows * cols * 4 + 8
        total_size = self.__dummy_length
        times = 0
        while total_size < rf.length:
            times += 1
            total_size += record * 2 * lays + self.__time_hdr_fmts_size + 8
        times -= 1

        self.variables = OrderedDict
        del rf
        self.createDimension('TSTEP', times)
        self.createDimension('DATE-TIME', 2)
        self.createDimension('LAY', lays)
        self.createDimension('ROW', rows)
        self.createDimension('COL', cols)
        self.createDimension('VAR', 2)

        self.NVARS = len(self.dimensions['VAR'])
        self.NLAYS = len(self.dimensions['LAY'])
        self.NROWS = len(self.dimensions['ROW'])
        self.NCOLS = len(self.dimensions['COL'])
        self.FTYPE = 1

        self.__memmap = memmap(rffile, '>f', 'r', offset=0)

        if self.__time_hdr_fmts_size == 12:
            self.LSTAGGER = self.__memmap[3].view('i')
        else:
            self.LSTAGGER = nan

        self.variables = PseudoNetCDFVariables(self.__variables,
                                               ['TFLAG', 'U', 'V'])
Esempio n. 14
0
 def __init__(self,rf):
     """
     Initialization included reading the header and learning
     about the format.
     
     see __readheader and __gettimestep() for more info
     """
             
     self.rffile=OpenRecordFile(rf)
     
     self.padded_time_hdr_size=struct.calcsize(self.time_hdr_fmt+"ii")
     self.__readheader()
     self.__gettimestep()
     self.dimensions={}
     self.createDimension('LAY', self.nlayers)
     self.createDimension('COL', self.nx)
     self.createDimension('ROW', self.ny)
     self.createDimension('TSTEP', self.time_step_count)
     self.createDimension('DATE-TIME', 2)
         
     self.variables=PseudoNetCDFVariables(self.__var_get,map(str.strip,self.spcnames))
Esempio n. 15
0
    def __init__(self,rf,rows=None,cols=None):
        self.rffile=OpenRecordFile(rf)
        self.id_size=struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows==None and cols==None:
            rows=self.cell_count
            cols=1
        elif rows==None:
            rows=self.cell_count/cols
        elif cols==None:
            cols=self.cell_count/rows
        else:
            if cols*rows!=self.cell_count:
                raise ValueError("The product of cols (%d) and rows (%d) must equal cells (%d)" %  (cols,rows,self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)
        self.createDimension('SURF', 1)
        self.variables=PseudoNetCDFVariables(self.__var_get,['AIRTEMP','SURFTEMP'])
Esempio n. 16
0
    def __init__(self,rffile,rows,cols):
        rf=OpenRecordFile(rffile)
        self.__time_hdr_fmts={12: "fii", 8: "fi"}[rf.record_size]
        self.__time_hdr_fmts_size=rf.record_size
        self.STIME,self.SDATE=rf.unpack("fi")
        
        rf.next()
        lays=1
        record_size=rf.record_size
        while rf.record_size==record_size:
            lays+=1
            rf.next()
        self.__dummy_length=(rf.record_size+8)/4
        lays//=2
        record=rows*cols*4+8
        total_size=self.__dummy_length
        times=0
        while total_size<rf.length:
            times+=1
            total_size+=record*2*lays+self.__time_hdr_fmts_size+8
        times-=1
        
        self.variables=OrderedDict
        del rf
        self.createDimension('TSTEP',times)
        self.createDimension('DATE-TIME',2)
        self.createDimension('LAY',lays)
        self.createDimension('ROW',rows)
        self.createDimension('COL',cols)
        self.createDimension('VAR',2)
        
        self.NVARS=len(self.dimensions['VAR'])
        self.NLAYS=len(self.dimensions['LAY'])
        self.NROWS=len(self.dimensions['ROW'])
        self.NCOLS=len(self.dimensions['COL'])
        self.FTYPE=1
        
        self.__memmap=memmap(rffile,'>f','r',offset=0)
        
        if self.__time_hdr_fmts_size==12:
            self.LSTAGGER=self.__memmap[3].view('i')
        else:
            self.LSTAGGER=nan

        self.variables=PseudoNetCDFVariables(self.__variables,['TFLAG','U','V'])
Esempio n. 17
0
 def __init__(self, rf, rows, cols, mode = 'r'):
     self.__mode = mode
     self._rffile = OpenRecordFile(rf)
     self._rffile.infile.seek(0, 2)
     self.__rf = rf
     rflen = self._rffile.infile.tell()
     self._rffile._newrecord(0)
     
     self.createDimension('ROW', rows)
     self.createDimension('COL', cols)
     first_line, =  self._rffile.read('8s')
     if first_line ==  'LUCAT11 ':
         self.createDimension('LANDUSE', 11)
         self._newstyle =  True
     elif first_line ==  'LUCAT26 ':
         self.createDimension('LANDUSE', 26)
         self._newstyle =  True
     else:
         self.createDimension('LANDUSE', 11)
         self._newstyle =  False
     nland = len(self.dimensions['LANDUSE'])
     nrows = len(self.dimensions['ROW'])
     ncols = len(self.dimensions['COL'])
     if self._newstyle:
         self.__fland_dtype = dtype(dict(names = ['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '8>S', '>i', '>i', '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i']))
         self.__other_dtype = dtype(dict(names = ['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '8>S', '>i', '>i', '(%d, %d)>f' % (nrows, ncols), '>i']))
     else:
         self.__fland_dtype = dtype(dict(names = ['SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i']))
         self.__other_dtype = dtype(dict(names = ['SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '(%d, %d)>f' % (nrows, ncols), '>i']))
         
     self.__addvars()
     if self._newstyle:
         self.__keys = [first_line]
         
     else:
         self.__keys = ['LUCAT11']
Esempio n. 18
0
class irr(PseudoNetCDFFile):
    """
    irr provides a PseudoNetCDF interface for CAMx
    irr files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).

    ex:
        >>> irr_path = 'camx_irr.bin'
        >>> rows,cols = 65,83
        >>> irrfile = irr(irr_path,rows,cols)
        >>> irrfile.variables.keys()
        ['TFLAG', 'RXN_01', 'RXN_02', 'RXN_03', ...]
        >>> v = irrfile.variables['RXN_01']
        >>> tflag = irrfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> irrfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """

    id_fmt = "if5i"
    data_fmt = "f"

    @classmethod
    def isMine(self, path):
        return _isMine(path)

    def __init__(self, rf, multi=False):
        """
        Initialization included reading the header and learning
        about the format.

        see __readheader and __gettimestep() for more info
        """
        self.__rffile = OpenRecordFile(rf)
        self.__readheader()
        irr_record_type = dtype(
            dict(names=(
                ['SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K'] +
                ['RXN_%02d' % i for i in range(1, self.nrxns + 1)] + ['EPAD']),
                 formats=(['>i', '>i', '>f', '>i', '>i', '>i', '>i', '>i'] +
                          ['>f' for i in range(1, self.nrxns + 1)] + ['>i'])))

        varkeys = [i for i in irr_record_type.names[8:-1]] + ['TFLAG']
        self.groups = defaultdict(PseudoNetCDFFile)
        padatatype = []
        pavarkeys = []
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('VAR', len(varkeys) - 1)
        self.createDimension('DATE-TIME', 2)
        for di, domain in enumerate(self._padomains):
            dk = 'PA%02d' % (di + 1)
            prefix = dk + '_'
            pavarkeys.extend([prefix + k for k in varkeys])
            grp = self.groups[dk]
            for propk, propv in domain.items():
                setattr(grp, propk, propv)
            grp.createDimension('TSTEP', self.time_step_count)
            grp.createDimension('VAR', len(varkeys) - 1)
            grp.createDimension('DATE-TIME', 2)
            grp.createDimension('COL', domain['iend'] - domain['istart'] + 1)
            grp.createDimension('ROW', domain['jend'] - domain['jstart'] + 1)
            grp.createDimension('LAY', domain['tlay'] - domain['blay'] + 1)
            padatatype.append(
                (dk, irr_record_type, (len(grp.dimensions['ROW']),
                                       len(grp.dimensions['COL']),
                                       len(grp.dimensions['LAY']))))
            if len(self._padomains) == 1:
                pncols = domain['iend'] - domain['istart'] + 1
                pnrows = domain['jend'] - domain['jstart'] + 1
                pnlays = domain['tlay'] - domain['blay'] + 1
                self.createDimension('COL', pncols)
                self.createDimension('ROW', pnrows)
                self.createDimension('LAY', pnlays)
                for propk, propv in domain.items():
                    setattr(grp, propk, propv)

            varget = partial(self.__variables, dk)
            if len(self._padomains) == 1:
                self.variables = PseudoNetCDFVariables(varget, varkeys)
            else:
                grp.variables = PseudoNetCDFVariables(varget, varkeys)
        self.__memmaps = memmap(self.__rffile.infile.name, dtype(padatatype),
                                'r', self.data_start_byte)

    def __del__(self):
        try:
            del self.__memmaps
        except Exception:
            pass

    def __decorator(self, name, pncfv):
        def decor(k):
            return dict(units='ppm/hr',
                        var_desc=k.ljust(16),
                        long_name=k.ljust(16))

        for k, v in decor(name).items():
            setattr(pncfv, k, v)
        return pncfv

    def __variables(self, pk, rxn):
        pncvar = PseudoNetCDFVariable
        if rxn == 'TFLAG':
            return ConvertCAMxTime(self.__memmaps[pk][:, 0, 0, 0]['DATE'],
                                   self.__memmaps[pk][:, 0, 0, 0]['TIME'],
                                   len(self.groups[pk].dimensions['VAR']))
        tmpvals = self.__memmaps[pk][rxn].swapaxes(1, 3).swapaxes(2, 3)
        return self.__decorator(
            rxn,
            pncvar(self,
                   rxn,
                   'f', ('TSTEP', 'LAY', 'ROW', 'COL'),
                   values=tmpvals))

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        assert (self.__rffile.record_size == 80)
        self.runmessage = self.__rffile.read("80s")
        self.start_date, self.start_time, self.end_date, self.end_time = \
            self.__rffile.read("ifif")
        self.time_step = 100.
        self.time_step_count = len([i for i in self.timerange()])
        self._grids = []
        for grid in range(self.__rffile.read("i")[-1]):
            self._grids.append(
                dict(
                    zip([
                        'orgx', 'orgy', 'ncol', 'nrow', 'xsize', 'ysize',
                        'iutm'
                    ], self.__rffile.read("iiiiiii"))))

        self._padomains = []
        for padomain in range(self.__rffile.read("i")[-1]):
            self._padomains.append(
                dict(
                    zip([
                        'grid', 'istart', 'iend', 'jstart', 'jend', 'blay',
                        'tlay'
                    ], self.__rffile.read("iiiiiii"))))
        self.nrxns = self.__rffile.read('i')[-1]

        self.data_start_byte = self.__rffile.record_start
        self.record_fmt = self.id_fmt + str(self.nrxns) + self.data_fmt
        self.record_size = self.__rffile.record_size
        self.padded_size = self.record_size + 8
        domain = self._padomains[0]
        self.records_per_time = (domain['iend'] - domain['istart'] + 1) *\
                                (domain['jend'] - domain['jstart'] + 1) *\
                                (domain['tlay'] - domain['blay'] + 1)
        self.time_data_block = self.padded_size * self.records_per_time
        self.time_step = 100.

    def timerange(self):
        return timerange((self.start_date, self.start_time + self.time_step),
                         timeadd((self.end_date, self.end_time),
                                 (0, self.time_step)), self.time_step)
Esempio n. 19
0
class landuse(PseudoNetCDFFile):
    """
    landuse provides a PseudoNetCDF interface for CAMx
    landuse files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> landuse_path =  'camx_landuse.bin'
        >>> rows, cols =  65, 83
        >>> landusefile =  landuse(landuse_path, rows, cols)
        >>> landusefile.variables.keys()
        ['TFLAG', 'FLAND', 'TOPO']
        >>> tflag =  landusefile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0, 0, :]
        array([2005185,      0])
        >>> tflag[-1, 0, :]
        array([2005185, 240000])
        >>> v =  landusefile.variables['FLAND']
        >>> v.dimensions
        ('LANDUSE', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> landusefile.dimensions
        {'LANDUSE': 11, 'ROW': 65, 'COL': 83, 'VAR': 2}
    """
    
    def __init__(self, rf, rows, cols, mode = 'r'):
        self.__mode = mode
        self._rffile = OpenRecordFile(rf)
        self._rffile.infile.seek(0, 2)
        self.__rf = rf
        rflen = self._rffile.infile.tell()
        self._rffile._newrecord(0)
        
        self.createDimension('ROW', rows)
        self.createDimension('COL', cols)
        first_line, =  self._rffile.read('8s')
        if first_line ==  'LUCAT11 ':
            self.createDimension('LANDUSE', 11)
            self._newstyle =  True
        elif first_line ==  'LUCAT26 ':
            self.createDimension('LANDUSE', 26)
            self._newstyle =  True
        else:
            self.createDimension('LANDUSE', 11)
            self._newstyle =  False
        nland = len(self.dimensions['LANDUSE'])
        nrows = len(self.dimensions['ROW'])
        ncols = len(self.dimensions['COL'])
        if self._newstyle:
            self.__fland_dtype = dtype(dict(names = ['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '8>S', '>i', '>i', '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i']))
            self.__other_dtype = dtype(dict(names = ['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '8>S', '>i', '>i', '(%d, %d)>f' % (nrows, ncols), '>i']))
        else:
            self.__fland_dtype = dtype(dict(names = ['SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i']))
            self.__other_dtype = dtype(dict(names = ['SPAD2', 'DATA', 'EPAD2'], formats = ['>i', '(%d, %d)>f' % (nrows, ncols), '>i']))
            
        self.__addvars()
        if self._newstyle:
            self.__keys = [first_line]
            
        else:
            self.__keys = ['LUCAT11']

    def __addvars(self):
        nrows =  len(self.dimensions['ROW'])
        ncols =  len(self.dimensions['COL'])
        nland =  len(self.dimensions['LANDUSE'])
        self._rffile.infile.seek(0, 2)
        rflen = self._rffile.infile.tell()
        fland_dtype = self.__fland_dtype
        other_dtype = self.__other_dtype
        nfland = fland_dtype.itemsize
        nfland1opt = nfland + other_dtype.itemsize
        nfland2opt = nfland + other_dtype.itemsize * 2
        if rflen == nfland:
            file_dtype = dtype(dict(names = ['FLAND'], formats = [fland_dtype]))
        elif rflen == nfland1opt:
            file_dtype = dtype(dict(names = ['FLAND', 'VAR1'], formats = [fland_dtype, other_dtype]))
        elif rflen == nfland2opt:
            file_dtype = dtype(dict(names = ['FLAND', 'LAI', 'TOPO'], formats = [fland_dtype, other_dtype, other_dtype]))
        else:
            raise IOError('File size is expected to be %d, %d, or %d; was %d' % (nfland, nfland1opt, nfland2opt, rflen))
        
        data = memmap(self.__rf, mode = self.__mode, dtype = file_dtype, offset = 0)
        if not self._newstyle:
            varkeys = ['FLAND', 'TOPO']
        else:
            varkeys = [data[k]['KEY'][0].strip() for k in file_dtype.names]
            varkeys = [k.decode() if hasattr(k, 'decode') else k for k in varkeys]
        
        for varkey, dkey in zip(varkeys, file_dtype.names):
            var = self.createVariable(varkey, 'f', {'FLAND': ('LANDUSE', 'ROW', 'COL')}.get(dkey, ('ROW', 'COL')))
            var[:] = data[dkey]['DATA']
            var.var_desc = varkey.ljust(16)
            var.units = {'FLAND': 'Fraction'}.get(dkey, '')
            var.long_name = varkey.ljust(16)
Esempio n. 20
0
class one3d(PseudoNetCDFFile):
    """
    one3d provides a PseudoNetCDF interface for CAMx
    one3d files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).

    ex:
        >>> one3d_path = 'camx_one3d.bin'
        >>> rows,cols = 65,83
        >>> one3dfile = one3d(one3d_path,rows,cols)
        >>> one3dfile.variables.keys()
        ['TFLAG', 'UNKNOWN']
        >>> tflag = one3dfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = one3dfile.variables['UNKNOWN']
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> one3dfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """

    id_fmt = "fi"
    data_fmt = "f"
    var_name = "UNKNOWN"
    units = "UNKNOWN"

    def __init__(self, rf, rows=None, cols=None):
        """
        Initialization included reading the header and learning
        about the format.

        see __readheader and __gettimestep() for more info
        """

        self.rffile = OpenRecordFile(rf)

        self.id_size = struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows is None and cols is None:
            rows = self.cell_count
            cols = 1
        elif rows is None:
            rows = self.cell_count / cols
        elif cols is None:
            cols = self.cell_count / rows
        else:
            if cols * rows != self.cell_count:
                raise ValueError(
                    ("The product of cols (%d) and rows (%d) " +
                     "must equal cells (%d)") % (cols, rows, self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)

        self.variables = PseudoNetCDFVariables(self.__var_get, [self.var_name])

    def __var_get(self, key):
        props = dict(units=self.units,
                     var_desc=self.var_name.ljust(16),
                     long_name=self.var_name.ljust(16))
        values = self.getArray()

        var = self.createVariable(key, 'f', ('TSTEP', 'LAY', 'ROW', 'COL'))
        var[:] = values
        for k, v in props.items():
            setattr(var, k, v)
        return var

    def __readheader(self):
        """
        __readheader reads the header section of the vertical diffusivity file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        self.data_start_byte = 0
        self.start_time, self.start_date = self.rffile.read(self.id_fmt)
        self.record_size = self.rffile.record_size
        self.padded_size = self.record_size + 8
        self.cell_count = (self.record_size - self.id_size) // struct.calcsize(
            self.data_fmt)
        self.record_fmt = self.id_fmt + self.data_fmt * (self.cell_count)

    def __gettimestep(self):
        """
        Header information provides start and end date, but does not
        indicate the increment between.  This routine reads the first
        and second date/time and initializes variables indicating the
        timestep length and the anticipated number.
        """
        self.rffile._newrecord(self.padded_size)
        d, t = self.start_date, self.start_time
        self.nlayers = 0
        while timediff((self.start_date, self.start_time), (d, t)) == 0:
            t, d = self.rffile.read(self.id_fmt)
            self.nlayers += 1
        self.time_step = timediff((self.start_date, self.start_time), (d, t))

        while True:
            try:
                self.seek(d, t, 1, False)
                d, t = timeadd((d, t), (0, self.time_step))
            except Exception:
                break
        self.end_date, self.end_time = timeadd((d, t), (0, -self.time_step))
        self.time_step_count = int(
            timediff((self.start_date, self.start_time),
                     (self.end_date, self.end_time)) / self.time_step) + 1

    def __timerecords(self, dt):
        """
        routine returns the number of records to increment from the
        data start byte to find the first time
        """
        d, t = dt
        nsteps = int(
            timediff((self.start_date, self.start_time),
                     (d, t)) / self.time_step)
        nk = self.__layerrecords(self.nlayers + 1)
        return nsteps * nk

    def __layerrecords(self, k):
        """
        routine returns the number of records to increment from the
        data start byte to find the first klayer
        """
        return k - 1

    def __recordposition(self, date, time, k):
        """
        routine uses timerecords and layerrecords multiplied
        by the fortran padded size to return the byte position
        of the specified record

        date - integer
        time - float
        k - integer
        """
        ntime = self.__timerecords((date, time))
        nk = self.__layerrecords(k)
        return (nk + ntime) * self.padded_size + self.data_start_byte

    def seek(self, date=None, time=None, k=1, chkvar=True):
        """
        Move file cursor to beginning of specified record
        see __recordposition for a definition of variables
        """
        if date is None:
            date = self.start_date
        if time is None:
            time = self.start_time

        if chkvar:
            chkt1 = timediff((self.end_date, self.end_time), (date, time)) > 0
            chkt2 = timediff((self.start_date, self.start_time),
                             (date, time)) < 0
            if chkt1 or chkt2:
                raise KeyError(("Vertical Diffusivity file includes " +
                                "(%i,%6.1f) thru (%i,%6.1f); you requested " +
                                "(%i,%6.1f)") %
                               (self.start_date, self.start_time,
                                self.end_date, self.end_time, date, time))
            if k < 1 or k > self.nlayers:
                raise KeyError(
                    ("Vertical Diffusivity file include layers 1" +
                     "thru %i; you requested %i") % (self.nlayers, k))
        self.rffile._newrecord(self.__recordposition(date, time, k))

    def read(self):
        """
        provide direct access to the underlying RecordFile read
        method
        """
        return self.rffile.read(self.record_fmt)

    def read_into(self, dest):
        """
        put values from rffile read into dest
        dest - numpy or numeric array
        """
        return read_into(self.rffile, dest, self.id_fmt, self.data_fmt)

    def seekandreadinto(self, dest, date=None, time=None, k=1):
        """
        see seek and read_into
        """
        self.seek(date, time, k)
        return self.read_into(dest)

    def seekandread(self, date=None, time=None, k=1):
        """
        see seek and read
        """
        self.seek(date, time, k)
        return self.read()

    def values(self):
        for d, t, k in self.__iter__():
            yield self.seekandread(d, t, k)

    def items(self):
        for d, t, k in self.__iter__():
            yield d, t, k, self.seekandread(d, t, k)

    def keys(self):
        for d, t in self.timerange():
            for k in range(1, self.nlayers + 1):
                yield d, t, k

    __iter__ = keys

    def getArray(self):
        a = zeros(
            (self.time_step_count, self.nlayers, len(
                self.dimensions['ROW']), len(self.dimensions['COL'])), 'f')
        for ti, (d, t) in enumerate(self.timerange()):
            for ki, k in enumerate(range(1, self.nlayers + 1)):
                self.seekandreadinto(a[ti, ki, ...], d, t, k)
        return a

    def timerange(self):
        return timerange((self.start_date, self.start_time),
                         (self.end_date, self.end_time + self.time_step),
                         self.time_step)
Esempio n. 21
0
class point_source(PseudoNetCDFFile):
    """
    point_source provides a PseudoNetCDF interface for CAMx
    point_source files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> point_source_path = 'camx_point_source.bin'
        >>> rows,cols = 65,83
        >>> point_sourcefile = point_source(point_source_path,rows,cols)
        >>> point_sourcefile.variables.keys()
        ['TFLAG', 'ETFLAG', 'TFLAG', 'XSTK', 'YSTK', 'HSTK', 'DSTK', 'TSTK',
         'VSTK', 'KCELL', 'FLOW', 'PLMHT', 'NSTKS', 'NO', 'NO2', ...]
        >>> tflag = point_sourcefile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = point_sourcefile.variables['XSTK']
        >>> v.dimensions
        ('NSTK',)
        >>> v.shape
        (38452,)
        >>> v = point_sourcefile.variables['NO2']
        >>> v.dimensions
        ('TSTEP', 'NSTK')
        >>> v.shape
        (25, 38452)
        >>> point_sourcefile.dimensions
        {'TSTEP': 25, 'NSTK': 38452}
    """
    
    emiss_hdr_fmt="10i60i3ifif"
    grid_hdr_fmt="ffiffffiiiiifff"
    cell_hdr_fmt="iiii"
    time_hdr_fmt="ifif"
    spc_fmt="10i"
    nstk_hdr_fmt="ii"
    padded_nstk_hdr_size=struct.calcsize("ii"+nstk_hdr_fmt)
    padded_time_hdr_size=struct.calcsize("ii"+time_hdr_fmt)
    stk_hdr_fmt="ffffff"
    id_fmt="i"+spc_fmt
    id_size=struct.calcsize(id_fmt)
    data_fmt="f"
    stkprops=['XSTK','YSTK','HSTK','DSTK','TSTK','VSTK']
    stktimeprops=['KCELL','FLOW','PLMHT']

    def __init__(self,rf):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        self.rffile=OpenRecordFile(rf)
        self.padded_time_hdr_size=struct.calcsize(self.time_hdr_fmt+"ii")
        self.__readheader()
        self.__gettimestep()
        self.__gettimeprops()
        self.createDimension('TSTEP',self.time_step_count)
        self.createDimension('STK',self.nstk)
        varkeys=['XSTK','YSTK','HSTK','DSTK','TSTK','VSTK','KCELL','FLOW','PLMHT']+[i.strip() for i in self.spcnames]
        self.variables=PseudoNetCDFVariables(self.__var_get,varkeys)
        
    def __var_get(self,key):
        constr=self.__variables
        decor=lambda *args,**kwds: {'notread': 1}

        values=constr(key)
        if key in self.stkprops:
            var=self.createVariable(key,'f',('STK',))
        else:
            var=self.createVariable(key,'f',('TSTEP','STK'))
        var[:] = values
        for k,v in decor(key).items():
            setattr(var,k,v)
        return var

    def __variables(self,k):
        if k in self.stkprops:
            return array(self.stk_props)[:,self.stkprops.index(k)]
        elif k in self.stktimeprops:
            return array(self.stk_time_props)[:,:,2:][:,:,self.stktimeprops.index(k)]
        else:
            return self.getArray()[:,self.spcnames.index(k.ljust(10)),:]
            
    def header(self):
        rdum=0.
        idum=0
        ione=1
        return [
                [self.name,self.note,ione,self.nspec,self.start_date,self.start_time,self.end_date,self.end_time],
                [rdum,rdum,self.iutm,self.xorg,self.yorg,self.delx,self.dely,self.nx,self.ny,self.nz,idum,idum,rdum,rdum,rdum],
                [ione,ione,self.nx,self.ny],
                self.spcnames,
                [ione,self.nstk],
                self.stk_props,
                self.stk_time_props
                ]

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        vals=self.rffile.read(self.emiss_hdr_fmt)
        self.name,self.note,ione,self.nspec,self.start_date,self.start_time,self.end_date,self.end_time=vals[0:10],vals[10:70],vals[70],vals[71],vals[72],vals[73],vals[74],vals[75]
        rdum,rdum,self.iutm,self.xorg,self.yorg,self.delx,self.dely,self.nx,self.ny,self.nz,idum,idum,rdum,rdum,rdum=self.rffile.read(self.grid_hdr_fmt)
        if self.nz==0:
            #Special case of gridded emissions
            #Seems to be same as avrg
            self.nlayers=1
        else:
            self.nlayers=self.nz
        ione,ione,nx,ny=self.rffile.read(self.cell_hdr_fmt)
        if not (self.nx,self.ny)==(nx,ny):
            raise ValueError("nx, ny defined first as %i, %i and then as %i, %i" % (self.nx,self.ny,nx,ny))
        species_temp=self.rffile.read(self.nspec*self.spc_fmt)
        self.spcnames=[]
        for i in range(0,self.nspec*10,10):
            self.spcnames.append(Int2Asc(species_temp[i:i+10]))
        
        ione,self.nstk=self.rffile.read(self.nstk_hdr_fmt)

        stkprms=zeros((self.nstk*len(self.stk_hdr_fmt),),'f')
        read_into(self.rffile,stkprms,'')
        self.rffile.next()
        #self.rffile.previous()
        #self.tmplist=self.rffile.read('ffffff'*self.nstk)
        
        stkprms=stkprms.reshape((self.nstk,len(self.stk_hdr_fmt)))
        for i in range(stkprms.shape[0]):
            if stkprms[i,-1]==array_nan:
                stkprms[i,-1]=float('-nan')
        self.stk_props=stkprms.tolist()
        self.data_start_byte=self.rffile.record_start
        self.start_date,self.start_time,end_date,end_time=self.rffile.read(self.time_hdr_fmt)
        
        self.time_step=timediff((self.start_date,self.start_time),(end_date,end_time))
        self.end_time += self.time_step
        self.time_step_count=int(timediff((self.start_date,self.start_time),(self.end_date,self.end_time),(2400,24)[int(self.time_step % 2)])/self.time_step)
        
        self.stk_time_prop_fmt=""+("iiiff"*self.nstk)
        self.padded_stk_time_prop_size=struct.calcsize("ii"+self.stk_time_prop_fmt)
        
        self.record_fmt=("i10i")+self.data_fmt*(self.nstk)
        self.record_size=struct.calcsize(self.record_fmt)
        self.padded_size=self.record_size+8

    def __gettimestep(self):
        """
        this is taken care of in the readheader routine
        record format provides start and end for each hour,
        which translates to t1 and t2
        """
        pass
    
    def __gettimeprops(self):
        self.stk_time_props=[]
        for ti,(d,t) in enumerate(timerange((self.start_date,self.start_time),(self.end_date,self.end_time),self.time_step,(2400,24)[int(self.time_step % 2)])):
            tmpprop=zeros((len(self.stk_time_prop_fmt)),'f')
            tmpprop[...]=self.seekandread(d,t,1,True,self.stk_time_prop_fmt)
            tmpprop=tmpprop.reshape(self.nstk,5)
            for i in range(tmpprop.shape[0]):
                if tmpprop[i,-2]==array_nan:
                    tmpprop[i,-2]=float('-nan')

            self.stk_time_props.append(tmpprop.tolist())
            
    def __timerecords(self,dt):
        """
        Calculate the number of records to increment to reach time (d,t)
        """
        d, t = dt
        nsteps=timediff((self.start_date,self.start_time),(d,t),(2400,24)[int(self.time_step % 2)])
        nspec=self.__spcrecords(self.nspec+1)
        return nsteps*(nspec)
        
    def __spcrecords(self,spc):
        """
        Calculated number of records before spc
        """
        
        return spc-1

    def __recordposition(self,date,time,spc,offset=False):
        """
        Use time (d,t), spc, and k to calculate number of records before
        desired record
        
        date - integer julian
        time - float
        spc - integer
        """
        ntime=self.__timerecords((date,time))
        nhdr=((ntime/self.__spcrecords(self.nspec+1))+1)
        nspc=self.__spcrecords(spc)
        noffset=-abs(int(offset))
        byte=self.data_start_byte
        byte+=nhdr*(self.padded_time_hdr_size+self.padded_nstk_hdr_size+self.padded_stk_time_prop_size)
        byte+=(ntime+nspc)*self.padded_size
        byte+=noffset*self.padded_stk_time_prop_size
        return byte
        
    def seek(self,date=None,time=None,spc=-1,offset=False):
        """
        Move file cursor to the beginning of the specified record
        see __recordposition for parameter definitions
        """
        #chkvar=True
        #if chkvar and timediff((self.end_date,self.end_time),(date,time),24)>0 or timediff((self.start_date,self.start_time),(date,time),24)<0:
        #    raise KeyError("Point emission file includes (%i,%6.1f) thru (%i,%6.1f); you requested (%i,%6.1f)" % (self.start_date,self.start_time,self.end_date,self.end_time,date,time))
        #if chkvar and spc<1 or spc>self.nspec:
        #    raise KeyError("Point emission file include species 1 thru %i; you requested %i" % (self.nspec,spc))

        self.rffile._newrecord(self.__recordposition(date,time,spc,offset))
    
    def read(self,fmt=None):
        """
        Provide direct access to record file read
        """
        if fmt==None:
            fmt=self.record_fmt
        return self.rffile.read(fmt)
        
    def read_into(self,dest):
        """
        Transfer values from current record to dest
        dest - numeric or numpy array
        """
        
        return read_into(self.rffile,dest,self.id_fmt,self.data_fmt)
    
    def seekandreadinto(self,dest,date=None,time=None,spc=1):
        """
        see seek and read_into
        """
        
        self.seek(date,time,spc)
        self.read_into(dest)
        
    def seekandread(self,date=None,time=None,spc=1,offset=False,fmt=None):
        """
        see seek and read
        """
        self.seek(date,time,spc,offset)
        return self.read(fmt)

    def values(self):
        for d,t,spc in self.__iter__():
            yield self.seekandread(d,t,spc)
            
    def items(self):
        for d,t,spc in self.__iter__():
            yield d,t,spc,self.seekandread(d,t,spc)
            
    def keys(self):
        for ti,(d,t) in enumerate(self.timerange()):
            for spc in range(1,len(self.spcnames)+1):
                yield d,t,spc

    __iter__=keys
    
    def getArray(self):
        a=zeros((self.time_step_count,self.nspec,self.nstk),'f')
        for ti,(d,t) in enumerate(self.timerange()):
            for spc in range(1,len(self.spcnames)+1):
                self.seekandreadinto(a[ti,spc-1,...],d,t,spc)
        return a.copy()

    def timerange(self):
        return timerange((self.start_date,self.start_time),(self.end_date,self.end_time),self.time_step,eod=24)
Esempio n. 22
0
    def __init__(self,rf,multi=False, **props):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info

        Keywords (i.e., props) for projection: P_ALP, P_BET, P_GAM, XCENT, YCENT, XORIG, YORIG, XCELL, YCELL
        """
        self.__rffile=OpenRecordFile(rf)
        self.__readheader()
        self.__ipr_record_type={
            24: dtype(
                        dict(
                            names=['SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J', 'K', 
                                    'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 
                                    'NADV', 'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 
                                    'BDIF', 'TDIF', 'DDEP', 'WDEP', 'AERCHEM', 'FCONC', 'UCNV', 'AVOL', 
                                    'EPAD'], 
                            formats=['>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i', '>i', 
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>i'])),
            26: dtype(
                        dict(
                            names=['SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J', 'K', 
                                    'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 
                                    'NADV', 'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 
                                    'BDIF', 'TDIF', 'DDEP', 'WDEP', 'INORGACHEM', 'ORGACHEM', 'AQACHEM', 'FCONC', 'UCNV', 'AVOL', 
                                    'EPAD'], 
                            formats=['>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i', '>i', 
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>i']))
                                }[len(self.prcnames)]

        prcs=['SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K', 
                'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 
                'NADV', 'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 
                'BDIF', 'TDIF', 'DDEP', 'WDEP']+{24: ['AERCHEM'], 26: ['INORGACHEM', 'ORGACHEM', 'AQACHEM']}[len(self.prcnames)]+['FCONC', 'UCNV', 'AVOL', 
                'EPAD']
        varkeys=['_'.join(i) for i in cartesian(prcs,self.spcnames)]
        varkeys+=['SPAD','DATE','TIME','PAGRID','NEST','I','J','K','TFLAG']
        self.groups = {}
        NSTEPS = len([i_ for i_ in self.timerange()])
        NVARS = len(varkeys)
        self.createDimension('VAR', NVARS)
        self.createDimension('DATE-TIME', 2)
        self.createDimension('TSTEP', NSTEPS)
        padatatype = []
        pavarkeys = []
        for di, domain in enumerate(self.padomains):
            dk = 'PA%02d' % di
            prefix = dk + '_'
            grp = self.groups[dk] = PseudoNetCDFFile()
            pavarkeys.extend([prefix + k for k in varkeys])
            grp.createDimension('VAR', NVARS)
            grp.createDimension('DATE-TIME', 2)
            grp.createDimension('TSTEP', NSTEPS)
            grp.createDimension('COL', domain['iend'] - domain['istart'] + 1)
            grp.createDimension('ROW', domain['jend'] - domain['jstart'] + 1)
            grp.createDimension('LAY', domain['tlay'] - domain['blay'] + 1)
            padatatype.append((dk, self.__ipr_record_type, (len(grp.dimensions['ROW']), len(grp.dimensions['COL']), len(grp.dimensions['LAY']))))
            if len(self.padomains) == 1:
                self.createDimension('COL', domain['iend']-domain['istart']+1)
                self.createDimension('ROW', domain['jend']-domain['jstart']+1)
                self.createDimension('LAY', domain['tlay']-domain['blay']+1)
            exec("""def varget(k):
                return self._ipr__variables('%s', k)""" % dk, dict(self = self), locals())
            if len(self.padomains) == 1:
                self.variables = PseudoNetCDFVariables(varget,varkeys)
            else:
                grp.variables = PseudoNetCDFVariables(varget,varkeys)
        
        self.__memmaps=memmap(self.__rffile.infile.name,dtype(padatatype),'r',self.data_start_byte).reshape(NSTEPS, len(self.spcnames))
        for k, v in props.items():
            setattr(self, k, v)
        try:
            add_cf_from_ioapi(self)
        except:
            pass
Esempio n. 23
0
class height_pressure(PseudoNetCDFFile):
    """
    height_pressure provides a PseudoNetCDF interface for CAMx
    height_pressure files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> height_pressure_path = 'camx_height_pressure.bin'
        >>> rows,cols = 65,83
        >>> height_pressurefile = height_pressure(height_pressure_path,rows,cols)
        >>> height_pressurefile.variables.keys()
        ['TFLAG', 'HGHT', 'PRES']
        >>> v = height_pressurefile.variables['V']
        >>> tflag = height_pressurefile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> height_pressurefile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """

    id_fmt = "fi"
    data_fmt = "f"

    def __init__(self, rf, rows, cols):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        self.rffile = OpenRecordFile(rf)

        self.id_size = struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows == None and cols == None:
            rows = self.cell_count
            cols = 1
        elif rows == None:
            rows = self.cell_count / cols
        elif cols == None:
            cols = self.cell_count / rows
        else:
            if cols * rows != self.cell_count:
                raise ValueError(
                    "The product of cols (%d) and rows (%d) must equal cells (%d)"
                    % (cols, rows, self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('ROW', rows)
        self.createDimension('COL', cols)
        self.createDimension('LAY', self.nlayers)
        self.variables = PseudoNetCDFVariables(self.__var_get,
                                               ['HGHT', 'PRES'])

    def __var_get(self, key):
        constr = lambda hp: self.getArray({'HGHT': 0, 'PRES': 1}[hp])
        decor = lambda hp: {
            'HGHT':
            dict(units='m',
                 var_desc='HGHT'.ljust(16),
                 long_name='HGHT'.ljust(16)),
            'PRES':
            dict(units='hPa',
                 var_desc='PRES'.ljust(16),
                 long_name='PRES'.ljust(16))
        }[hp]
        values = constr(key)
        var = self.createVariable(key, 'f', ('TSTEP', 'LAY', 'ROW', 'COL'))
        var[:] = values
        for k, v in decor(key).items():
            setattr(var, k, v)
        return var

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        self.data_start_byte = 0
        self.start_time, self.start_date = self.rffile.read(self.id_fmt)
        self.record_size = self.rffile.record_size
        self.padded_size = self.record_size + 8
        self.cell_count = (self.record_size - self.id_size) / struct.calcsize(
            self.data_fmt)
        self.record_fmt = self.id_fmt + self.data_fmt * (self.cell_count)

    def __gettimestep(self):
        """
        Header information provides start and end date, but does not
        indicate the increment between.  This routine reads the first
        and second date/time and initializes variables indicating the
        timestep length and the anticipated number.
        """
        self.rffile._newrecord(self.padded_size * 2)
        d, t = self.start_date, self.start_time
        self.nlayers = 0
        while timediff((self.start_date, self.start_time), (d, t)) == 0:
            t, d = self.rffile.read(self.id_fmt)
            self.rffile.next()
            self.nlayers += 1
        self.time_step = timediff((self.start_date, self.start_time), (d, t))

        while True:
            try:
                self.seek(d, t, self.nlayers, 1, False)
                d, t = timeadd((d, t), (0, self.time_step))
            except:
                break
        self.end_date, self.end_time = timeadd((d, t), (0, -self.time_step))
        self.time_step_count = int(
            timediff((self.start_date, self.start_time),
                     (self.end_date, self.end_time)) / self.time_step) + 1

    def __timerecords(self, dt):
        """
        routine returns the number of records to increment from the
        data start byte to find the first time
        """
        d, t = dt
        nsteps = int(
            timediff((self.start_date, self.start_time),
                     (d, t)) / self.time_step)
        nk = self.__layerrecords(self.nlayers + 1)
        return nsteps * nk

    def __layerrecords(self, k):
        """
        routine returns the number of records to increment from the
        data start byte to find the first klayer
        """
        return (k - 1) * 2

    def __recordposition(self, date, time, k, hp):
        """ 
        routine uses timerecords and layerrecords multiplied plus hp
        by the fortran padded size to return the byte position of the specified record
        
        date - integer
        time - float
        k - integer
        hp - integer (0=h,1=p)
        """
        ntime = self.__timerecords((date, time))
        nk = self.__layerrecords(k)
        return (nk + ntime + hp) * self.padded_size + self.data_start_byte

    def seek(self, date=None, time=None, k=1, hp=0, chkvar=True):
        """
        Move file cursor to specified record
        """
        if date == None:
            date = self.start_date
        if time == None:
            time = self.start_time

        if chkvar and timediff(
            (self.end_date, self.end_time), (date, time)) > 0 or timediff(
                (self.start_date, self.start_time), (date, time)) < 0:
            raise KeyError(
                "Vertical Diffusivity file includes (%i,%6.1f) thru (%i,%6.1f); you requested (%i,%6.1f)"
                % (self.start_date, self.start_time, self.end_date,
                   self.end_time, date, time))
        if chkvar and k < 1 or k > self.nlayers:
            raise KeyError(
                "Vertical Diffusivity file include layers 1 thru %i; you requested %i"
                % (self.nlayers, k))
        if chkvar and hp < 0 or hp > 1:
            raise KeyError(
                "Height pressure or indexed 0 and 1; you requested %i" % (hp))

        self.rffile._newrecord(self.__recordposition(date, time, k, hp))

    def read(self):
        """
        Call recordfile read method directly
        """
        return self.rffile.read(self.record_fmt)

    def read_into(self, dest):
        """
        put values from rffile read into dest
        dest - numpy or numeric array
        """
        return read_into(self.rffile, dest, self.id_fmt, self.data_fmt)

    def seekandreadinto(self, dest, date=None, time=None, k=1, hp=0):
        """
        see seek and read
        """
        self.seek(date, time, k, hp)
        return self.read_into(dest)

    def seekandread(self, date=None, time=None, k=1, hp=0):
        """
        see seek and read
        """
        self.seek(date, time, k, hp)
        return self.read()

    def values(self):
        for d, t, k in self.__iter__():
            yield self.seekandread(d, t, k, 0), self.seekandread(d, t, k, 1)

    def items(self):
        for d, t, k in self.__iter__():
            yield d, t, k, self.seekandread(d, t, k,
                                            0), self.seekandread(d, t, k, 1)

    def keys(self):
        for d, t in self.timerange():
            for k in range(1, self.nlayers + 1):
                yield d, t, k

    __iter__ = keys

    def getArray(self, hp):
        a = zeros((self.time_step_count, len(self.dimensions['LAY']),
                   len(self.dimensions['ROW']), len(self.dimensions['COL'])),
                  'f')

        for ti, (d, t) in enumerate(self.timerange()):
            for ki, k in enumerate(xrange(1, self.nlayers + 1)):
                self.seekandreadinto(a[ti, ki, ..., ...], d, t, k, hp)
        return a

    def timerange(self):
        return timerange(
            (self.start_date, self.start_time),
            timeadd((self.end_date, self.end_time), (0, self.time_step),
                    (2400, 24)[int(self.time_step % 2)]), self.time_step,
            (2400, 24)[int(self.time_step % 2)])
Esempio n. 24
0
class temperature(PseudoNetCDFFile):
    """
    temperature provides a PseudoNetCDF interface for CAMx
    temperature files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> temperature_path = 'camx_temperature.bin'
        >>> rows,cols = 65,83
        >>> temperaturefile = temperature(temperature_path,rows,cols)
        >>> temperaturefile.variables.keys()
        ['TFLAG', 'AIRTEMP', 'SURFTEMP']
        >>> tflag = temperaturefile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = temperaturefile.variables['SURFTEMP']
        >>> v.dimensions
        ('TSTEP', 'ROW', 'COL')
        >>> v.shape
        (25, 65, 83)
        >>> v = temperaturefile.variables['AIRTEMP']
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> temperaturefile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """
    
    id_fmt='fi'
    data_fmt='f'
    def __init__(self,rf,rows=None,cols=None):
        self.rffile=OpenRecordFile(rf)
        self.id_size=struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows==None and cols==None:
            rows=self.cell_count
            cols=1
        elif rows==None:
            rows=self.cell_count/cols
        elif cols==None:
            cols=self.cell_count/rows
        else:
            if cols*rows!=self.cell_count:
                raise ValueError("The product of cols (%d) and rows (%d) must equal cells (%d)" %  (cols,rows,self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)
        self.createDimension('SURF', 1)
        self.variables=PseudoNetCDFVariables(self.__var_get,['AIRTEMP','SURFTEMP'])

    def __var_get(self,key):
        decor=lambda k: dict(units='K',var_desc=k.ljust(16),long_name=k.ljust(16))
        constr=lambda k: self.__variables(k)
        values=constr(key)
        dims={'AIRTEMP':('TSTEP','LAY','ROW','COL'),'SURFTEMP':('TSTEP','SURF','ROW','COL')}[key]
        var=self.createVariable(key,'f',dims)
        var[:] = values
        for k,v in decor(key).items():
            setattr(var,k,v)
        return var

    def __readheader(self):
        self.data_start_byte=0
        self.rffile._newrecord(0)
        
        self.area_size=self.rffile.record_size
        self.area_count=(self.area_size-self.id_size)/struct.calcsize(self.data_fmt)
        self.area_padded_size=self.area_size+8
        self.area_fmt=self.id_fmt+self.data_fmt*(self.area_count)

        self.start_time,self.start_date=self.rffile.read(self.id_fmt)
        
        self.record_size=self.rffile.record_size
        self.padded_size=self.record_size+8
        self.cell_count=(self.record_size-self.id_size)/struct.calcsize(self.data_fmt)
        
        self.record_fmt=self.id_fmt+self.data_fmt*(self.cell_count)
    
    def __gettimestep(self):
        d,t=date,time=self.start_date,self.start_time
        self.nlayers=-1
        while (d,t)==(date,time):
            self.nlayers+=1
            t,d=self.rffile.read(self.id_fmt)
        self.time_step=timediff((self.start_date,self.start_time),(d,t))
        self.rffile.infile.seek(0,2)
        self.rffile.previous()
        self.end_time,self.end_date=self.rffile.read(self.id_fmt)
        self.time_step_count=int(timediff((self.start_date,self.start_time),(self.end_date,self.end_time))/self.time_step)+1
    
    def __variables(self,k):
        if k=='SURFTEMP':
            out=zeros((len(self.dimensions['TSTEP']),1,len(self.dimensions['ROW']),len(self.dimensions['COL'])),'f')
            vars=self.__surfmaps()
        elif k=='AIRTEMP':
            out=zeros((len(self.dimensions['TSTEP']),len(self.dimensions['LAY']),len(self.dimensions['ROW']),len(self.dimensions['COL'])),'f')
            vars=self.__airmaps()
        for i,(d,t) in enumerate(self.timerange()):
            out[i,...]=vars.next()
        return out
        
    def __surfpos(self):
        pos=self.data_start_byte+12
        inc=self.area_padded_size+self.padded_size*self.nlayers
        self.rffile.infile.seek(0,2)
        rflen=self.rffile.tell()
        while pos<rflen:
            yield pos
            pos+=inc
        raise StopIteration
        
    def __surfmaps(self):
        for pos in self.__surfpos():
            yield memmap(self.rffile.infile.name,'>f','r',pos,(self.area_count,)).reshape(len(self.dimensions['ROW']),len(self.dimensions['COL']))
            
    def __airpos(self):
        pos=self.area_padded_size+self.data_start_byte
        inc=self.area_padded_size+self.padded_size*self.nlayers
        self.rffile.infile.seek(0,2)
        rflen=self.rffile.tell()
        while pos<rflen:
            yield pos
            pos+=inc
        raise StopIteration
    
    def __airmaps(self):
        for pos in self.__airpos():
            yield memmap(self.rffile.infile.name,'>f','r',pos,((self.cell_count+4)*self.nlayers,)).reshape(self.nlayers,self.cell_count+4)[:,3:-1].reshape(len(self.dimensions['LAY']),len(self.dimensions['ROW']),len(self.dimensions['COL']))

    def timerange(self):
        return timerange((self.start_date,self.start_time),timeadd((self.end_date,self.end_time),(0,self.time_step),(2400,24)[int(self.time_step % 2)]),self.time_step,(2400,24)[int(self.time_step % 2)])
Esempio n. 25
0
class point_source(PseudoNetCDFFile):
    """
    point_source provides a PseudoNetCDF interface for CAMx
    point_source files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> point_source_path = 'camx_point_source.bin'
        >>> rows,cols = 65,83
        >>> point_sourcefile = point_source(point_source_path,rows,cols)
        >>> point_sourcefile.variables.keys()
        ['TFLAG', 'ETFLAG', 'TFLAG', 'XSTK', 'YSTK', 'HSTK', 'DSTK', 'TSTK',
         'VSTK', 'KCELL', 'FLOW', 'PLMHT', 'NSTKS', 'NO', 'NO2', ...]
        >>> tflag = point_sourcefile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = point_sourcefile.variables['XSTK']
        >>> v.dimensions
        ('NSTK',)
        >>> v.shape
        (38452,)
        >>> v = point_sourcefile.variables['NO2']
        >>> v.dimensions
        ('TSTEP', 'NSTK')
        >>> v.shape
        (25, 38452)
        >>> point_sourcefile.dimensions
        {'TSTEP': 25, 'NSTK': 38452}
    """

    emiss_hdr_fmt = "10i60i3ifif"
    grid_hdr_fmt = "ffiffffiiiiifff"
    cell_hdr_fmt = "iiii"
    time_hdr_fmt = "ifif"
    spc_fmt = "10i"
    nstk_hdr_fmt = "ii"
    padded_nstk_hdr_size = struct.calcsize("ii" + nstk_hdr_fmt)
    padded_time_hdr_size = struct.calcsize("ii" + time_hdr_fmt)
    stk_hdr_fmt = "ffffff"
    id_fmt = "i" + spc_fmt
    id_size = struct.calcsize(id_fmt)
    data_fmt = "f"
    stkprops = ['XSTK', 'YSTK', 'HSTK', 'DSTK', 'TSTK', 'VSTK']
    stktimeprops = ['KCELL', 'FLOW', 'PLMHT']

    def __init__(self, rf):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        self.rffile = OpenRecordFile(rf)
        self.padded_time_hdr_size = struct.calcsize(self.time_hdr_fmt + "ii")
        self.__readheader()
        self.__gettimestep()
        self.__gettimeprops()
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('STK', self.nstk)
        varkeys = [
            'XSTK', 'YSTK', 'HSTK', 'DSTK', 'TSTK', 'VSTK', 'KCELL', 'FLOW',
            'PLMHT'
        ] + [i.strip() for i in self.spcnames]
        self.variables = PseudoNetCDFVariables(self.__var_get, varkeys)

    def __var_get(self, key):
        constr = self.__variables
        decor = lambda *args, **kwds: {'notread': 1}

        values = constr(key)
        if key in self.stkprops:
            var = self.createVariable(key, 'f', ('STK', ))
        else:
            var = self.createVariable(key, 'f', ('TSTEP', 'STK'))
        var[:] = values
        for k, v in decor(key).items():
            setattr(var, k, v)
        return var

    def __variables(self, k):
        if k in self.stkprops:
            return array(self.stk_props)[:, self.stkprops.index(k)]
        elif k in self.stktimeprops:
            return array(self.stk_time_props)[:, :,
                                              2:][:, :,
                                                  self.stktimeprops.index(k)]
        else:
            return self.getArray()[:, self.spcnames.index(k.ljust(10)), :]

    def header(self):
        rdum = 0.
        idum = 0
        ione = 1
        return [[
            self.name, self.note, ione, self.nspec, self.start_date,
            self.start_time, self.end_date, self.end_time
        ],
                [
                    rdum, rdum, self.iutm, self.xorg, self.yorg, self.delx,
                    self.dely, self.nx, self.ny, self.nz, idum, idum, rdum,
                    rdum, rdum
                ], [ione, ione, self.nx, self.ny], self.spcnames,
                [ione, self.nstk], self.stk_props, self.stk_time_props]

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        vals = self.rffile.read(self.emiss_hdr_fmt)
        self.name, self.note, ione, self.nspec, self.start_date, self.start_time, self.end_date, self.end_time = vals[
            0:10], vals[10:70], vals[70], vals[71], vals[72], vals[73], vals[
                74], vals[75]
        rdum, rdum, self.iutm, self.xorg, self.yorg, self.delx, self.dely, self.nx, self.ny, self.nz, idum, idum, rdum, rdum, rdum = self.rffile.read(
            self.grid_hdr_fmt)
        if self.nz == 0:
            #Special case of gridded emissions
            #Seems to be same as avrg
            self.nlayers = 1
        else:
            self.nlayers = self.nz
        ione, ione, nx, ny = self.rffile.read(self.cell_hdr_fmt)
        if not (self.nx, self.ny) == (nx, ny):
            raise ValueError(
                "nx, ny defined first as %i, %i and then as %i, %i" %
                (self.nx, self.ny, nx, ny))
        species_temp = self.rffile.read(self.nspec * self.spc_fmt)
        self.spcnames = []
        for i in range(0, self.nspec * 10, 10):
            self.spcnames.append(Int2Asc(species_temp[i:i + 10]))

        ione, self.nstk = self.rffile.read(self.nstk_hdr_fmt)

        stkprms = zeros((self.nstk * len(self.stk_hdr_fmt), ), 'f')
        read_into(self.rffile, stkprms, '')
        self.rffile.next()
        #self.rffile.previous()
        #self.tmplist=self.rffile.read('ffffff'*self.nstk)

        stkprms = stkprms.reshape((self.nstk, len(self.stk_hdr_fmt)))
        for i in range(stkprms.shape[0]):
            if stkprms[i, -1] == array_nan:
                stkprms[i, -1] = float('-nan')
        self.stk_props = stkprms.tolist()
        self.data_start_byte = self.rffile.record_start
        self.start_date, self.start_time, end_date, end_time = self.rffile.read(
            self.time_hdr_fmt)

        self.time_step = timediff((self.start_date, self.start_time),
                                  (end_date, end_time))
        self.end_time += self.time_step
        self.time_step_count = int(
            timediff((self.start_date, self.start_time),
                     (self.end_date, self.end_time),
                     (2400, 24)[int(self.time_step % 2)]) / self.time_step)

        self.stk_time_prop_fmt = "" + ("iiiff" * self.nstk)
        self.padded_stk_time_prop_size = struct.calcsize(
            "ii" + self.stk_time_prop_fmt)

        self.record_fmt = ("i10i") + self.data_fmt * (self.nstk)
        self.record_size = struct.calcsize(self.record_fmt)
        self.padded_size = self.record_size + 8

    def __gettimestep(self):
        """
        this is taken care of in the readheader routine
        record format provides start and end for each hour,
        which translates to t1 and t2
        """
        pass

    def __gettimeprops(self):
        self.stk_time_props = []
        for ti, (d, t) in enumerate(
                timerange((self.start_date, self.start_time),
                          (self.end_date, self.end_time), self.time_step,
                          (2400, 24)[int(self.time_step % 2)])):
            tmpprop = zeros((len(self.stk_time_prop_fmt)), 'f')
            tmpprop[...] = self.seekandread(d, t, 1, True,
                                            self.stk_time_prop_fmt)
            tmpprop = tmpprop.reshape(self.nstk, 5)
            for i in range(tmpprop.shape[0]):
                if tmpprop[i, -2] == array_nan:
                    tmpprop[i, -2] = float('-nan')

            self.stk_time_props.append(tmpprop.tolist())

    def __timerecords(self, dt):
        """
        Calculate the number of records to increment to reach time (d,t)
        """
        d, t = dt
        nsteps = timediff((self.start_date, self.start_time), (d, t),
                          (2400, 24)[int(self.time_step % 2)])
        nspec = self.__spcrecords(self.nspec + 1)
        return nsteps * (nspec)

    def __spcrecords(self, spc):
        """
        Calculated number of records before spc
        """

        return spc - 1

    def __recordposition(self, date, time, spc, offset=False):
        """
        Use time (d,t), spc, and k to calculate number of records before
        desired record
        
        date - integer julian
        time - float
        spc - integer
        """
        ntime = self.__timerecords((date, time))
        nhdr = ((ntime / self.__spcrecords(self.nspec + 1)) + 1)
        nspc = self.__spcrecords(spc)
        noffset = -abs(int(offset))
        byte = self.data_start_byte
        byte += nhdr * (self.padded_time_hdr_size + self.padded_nstk_hdr_size +
                        self.padded_stk_time_prop_size)
        byte += (ntime + nspc) * self.padded_size
        byte += noffset * self.padded_stk_time_prop_size
        return byte

    def seek(self, date=None, time=None, spc=-1, offset=False):
        """
        Move file cursor to the beginning of the specified record
        see __recordposition for parameter definitions
        """
        #chkvar=True
        #if chkvar and timediff((self.end_date,self.end_time),(date,time),24)>0 or timediff((self.start_date,self.start_time),(date,time),24)<0:
        #    raise KeyError("Point emission file includes (%i,%6.1f) thru (%i,%6.1f); you requested (%i,%6.1f)" % (self.start_date,self.start_time,self.end_date,self.end_time,date,time))
        #if chkvar and spc<1 or spc>self.nspec:
        #    raise KeyError("Point emission file include species 1 thru %i; you requested %i" % (self.nspec,spc))

        self.rffile._newrecord(self.__recordposition(date, time, spc, offset))

    def read(self, fmt=None):
        """
        Provide direct access to record file read
        """
        if fmt == None:
            fmt = self.record_fmt
        return self.rffile.read(fmt)

    def read_into(self, dest):
        """
        Transfer values from current record to dest
        dest - numeric or numpy array
        """

        return read_into(self.rffile, dest, self.id_fmt, self.data_fmt)

    def seekandreadinto(self, dest, date=None, time=None, spc=1):
        """
        see seek and read_into
        """

        self.seek(date, time, spc)
        self.read_into(dest)

    def seekandread(self, date=None, time=None, spc=1, offset=False, fmt=None):
        """
        see seek and read
        """
        self.seek(date, time, spc, offset)
        return self.read(fmt)

    def values(self):
        for d, t, spc in self.__iter__():
            yield self.seekandread(d, t, spc)

    def items(self):
        for d, t, spc in self.__iter__():
            yield d, t, spc, self.seekandread(d, t, spc)

    def keys(self):
        for ti, (d, t) in enumerate(self.timerange()):
            for spc in range(1, len(self.spcnames) + 1):
                yield d, t, spc

    __iter__ = keys

    def getArray(self):
        a = zeros((self.time_step_count, self.nspec, self.nstk), 'f')
        for ti, (d, t) in enumerate(self.timerange()):
            for spc in range(1, len(self.spcnames) + 1):
                self.seekandreadinto(a[ti, spc - 1, ...], d, t, spc)
        return a.copy()

    def timerange(self):
        return timerange((self.start_date, self.start_time),
                         (self.end_date, self.end_time),
                         self.time_step,
                         eod=24)
Esempio n. 26
0
class landuse(PseudoNetCDFFile):
    """
    landuse provides a PseudoNetCDF interface for CAMx
    landuse files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> landuse_path =  'camx_landuse.bin'
        >>> rows, cols =  65, 83
        >>> landusefile =  landuse(landuse_path, rows, cols)
        >>> landusefile.variables.keys()
        ['TFLAG', 'FLAND', 'TOPO']
        >>> tflag =  landusefile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0, 0, :]
        array([2005185,      0])
        >>> tflag[-1, 0, :]
        array([2005185, 240000])
        >>> v =  landusefile.variables['FLAND']
        >>> v.dimensions
        ('LANDUSE', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> landusefile.dimensions
        {'LANDUSE': 11, 'ROW': 65, 'COL': 83, 'VAR': 2}
    """
    def __init__(self, rf, rows, cols, mode='r'):
        self.__mode = mode
        self._rffile = OpenRecordFile(rf)
        self._rffile.infile.seek(0, 2)
        self.__rf = rf
        rflen = self._rffile.infile.tell()
        self._rffile._newrecord(0)

        self.createDimension('ROW', rows)
        self.createDimension('COL', cols)
        first_line, = self._rffile.read('8s')
        if first_line == 'LUCAT11 ':
            self.createDimension('LANDUSE', 11)
            self._newstyle = True
        elif first_line == 'LUCAT26 ':
            self.createDimension('LANDUSE', 26)
            self._newstyle = True
        else:
            self.createDimension('LANDUSE', 11)
            self._newstyle = False
        nland = len(self.dimensions['LANDUSE'])
        nrows = len(self.dimensions['ROW'])
        ncols = len(self.dimensions['COL'])
        if self._newstyle:
            self.__fland_dtype = dtype(
                dict(names=['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'],
                     formats=[
                         '>i', '8>S', '>i', '>i',
                         '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i'
                     ]))
            self.__other_dtype = dtype(
                dict(names=['SPAD1', 'KEY', 'EPAD1', 'SPAD2', 'DATA', 'EPAD2'],
                     formats=[
                         '>i', '8>S', '>i', '>i',
                         '(%d, %d)>f' % (nrows, ncols), '>i'
                     ]))
        else:
            self.__fland_dtype = dtype(
                dict(names=['SPAD2', 'DATA', 'EPAD2'],
                     formats=[
                         '>i',
                         '(%d, %d, %d)>f' % (nland, nrows, ncols), '>i'
                     ]))
            self.__other_dtype = dtype(
                dict(names=['SPAD2', 'DATA', 'EPAD2'],
                     formats=['>i', '(%d, %d)>f' % (nrows, ncols), '>i']))

        self.__addvars()
        if self._newstyle:
            self.__keys = [first_line]

        else:
            self.__keys = ['LUCAT11']

    def __addvars(self):
        nrows = len(self.dimensions['ROW'])
        ncols = len(self.dimensions['COL'])
        nland = len(self.dimensions['LANDUSE'])
        self._rffile.infile.seek(0, 2)
        rflen = self._rffile.infile.tell()
        fland_dtype = self.__fland_dtype
        other_dtype = self.__other_dtype
        nfland = fland_dtype.itemsize
        nfland1opt = nfland + other_dtype.itemsize
        nfland2opt = nfland + other_dtype.itemsize * 2
        if rflen == nfland:
            file_dtype = dtype(dict(names=['FLAND'], formats=[fland_dtype]))
        elif rflen == nfland1opt:
            file_dtype = dtype(
                dict(names=['FLAND', 'VAR1'],
                     formats=[fland_dtype, other_dtype]))
        elif rflen == nfland2opt:
            file_dtype = dtype(
                dict(names=['FLAND', 'LAI', 'TOPO'],
                     formats=[fland_dtype, other_dtype, other_dtype]))
        else:
            raise IOError('File size is expected to be %d, %d, or %d; was %d' %
                          (nfland, nfland1opt, nfland2opt, rflen))

        data = memmap(self.__rf, mode=self.__mode, dtype=file_dtype, offset=0)
        if not self._newstyle:
            varkeys = ['FLAND', 'TOPO']
        else:
            varkeys = [data[k]['KEY'][0].strip() for k in file_dtype.names]
            varkeys = [
                k.decode() if hasattr(k, 'decode') else k for k in varkeys
            ]

        for varkey, dkey in zip(varkeys, file_dtype.names):
            var = self.createVariable(varkey, 'f',
                                      {'FLAND': ('LANDUSE', 'ROW', 'COL')
                                       }.get(dkey, ('ROW', 'COL')))
            var[:] = data[dkey]['DATA']
            var.var_desc = varkey.ljust(16)
            var.units = {'FLAND': 'Fraction'}.get(dkey, '')
            var.long_name = varkey.ljust(16)
Esempio n. 27
0
class irr(PseudoNetCDFFile):
    """
    irr provides a PseudoNetCDF interface for CAMx
    irr files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> irr_path = 'camx_irr.bin'
        >>> rows,cols = 65,83
        >>> irrfile = irr(irr_path,rows,cols)
        >>> irrfile.variables.keys()
        ['TFLAG', 'RXN_01', 'RXN_02', 'RXN_03', ...]
        >>> v = irrfile.variables['RXN_01']
        >>> tflag = irrfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> irrfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """
    
    id_fmt="if5i"
    data_fmt="f"
    @classmethod
    def isMine(self, path):
        return _isMine(path)
    def __init__(self,rf,multi=False):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        self.__rffile=OpenRecordFile(rf)
        self.__readheader()
        irr_record_type=dtype(
                    dict(names=(['SPAD','DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K']+
                                [ 'RXN_%02d' % i for i in range(1,self.nrxns+1)]+
                                ['EPAD']),
                         formats=(['>i', '>i', '>f', '>i', '>i', '>i', '>i', '>i']+ 
                                 [ '>f' for i in range(1,self.nrxns+1)]+
                                 ['>i'])))
    
        varkeys=[i for i in irr_record_type.names[8:-1]]+['TFLAG']
        self.groups = defaultdict(PseudoNetCDFFile)
        padatatype = []
        pavarkeys = []
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('VAR',  len(varkeys)-1)
        self.createDimension('DATE-TIME', 2)
        for di, domain in enumerate(self._padomains):
            dk ='PA%02d' % (di + 1)
            prefix = dk + '_'
            pavarkeys.extend([prefix + k for k in varkeys])
            grp = self.groups[dk]
            for propk, propv in domain.items():
                setattr(grp, propk, propv)
            grp.createDimension('TSTEP', self.time_step_count)
            grp.createDimension('VAR',  len(varkeys)-1)
            grp.createDimension('DATE-TIME', 2)
            grp.createDimension('COL', domain['iend']-domain['istart']+1)
            grp.createDimension('ROW', domain['jend']-domain['jstart']+1)
            grp.createDimension('LAY', domain['tlay']-domain['blay']+1)
            padatatype.append((dk, irr_record_type, (len(grp.dimensions['ROW']), len(grp.dimensions['COL']), len(grp.dimensions['LAY']))))
            if len(self._padomains) == 1:
                self.createDimension('COL', domain['iend']-domain['istart']+1)
                self.createDimension('ROW', domain['jend']-domain['jstart']+1)
                self.createDimension('LAY', domain['tlay']-domain['blay']+1)
                for propk, propv in domain.items():
                    setattr(grp, propk, propv)
            exec("""def varget(k):
                return self._irr__variables('%s', k)""" % dk, dict(self = self), locals())
            if len(self._padomains) == 1:
                self.variables = PseudoNetCDFVariables(varget,varkeys)
            else:
                grp.variables = PseudoNetCDFVariables(varget,varkeys)
        self.__memmaps = memmap(self.__rffile.infile.name, dtype(padatatype), 'r', self.data_start_byte)

    def __del__(self):
        try:
            self.__memmaps.close()
            del self.__memmaps
        except:
            pass
                
    
    def __decorator(self,name,pncfv):
        decor=lambda k: dict(units='ppm/hr', var_desc=k.ljust(16), long_name=k.ljust(16))
        for k,v in decor(name).items():
            setattr(pncfv,k,v)        
        return pncfv
        
    def __variables(self,pk, rxn):
        if rxn=='TFLAG':
            return ConvertCAMxTime(self.__memmaps[pk][:, 0,0,0]['DATE'],self.__memmaps[pk][:,0,0,0]['TIME'],len(self.groups[pk].dimensions['VAR']))
        return self.__decorator(rxn,PseudoNetCDFVariable(self,rxn,'f',('TSTEP','LAY','ROW','COL'),values=self.__memmaps[pk][rxn].swapaxes(1, 3).swapaxes(2, 3)))
        
    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        assert(self.__rffile.record_size == 80)
        self.runmessage=self.__rffile.read("80s")
        self.start_date,self.start_time,self.end_date,self.end_time=self.__rffile.read("ifif")
        self.time_step=100.
        self.time_step_count=len([i for i in self.timerange()])
        self._grids=[]
        for grid in range(self.__rffile.read("i")[-1]):
            self._grids.append(
                            dict(
                                zip(
                                    ['orgx','orgy','ncol','nrow','xsize','ysize','iutm'], 
                                    self.__rffile.read("iiiiiii")
                                    )
                                )
                            )
        
        self._padomains=[]
        for padomain in range(self.__rffile.read("i")[-1]):
            self._padomains.append(
                                dict(
                                    zip(
                                        ['grid','istart','iend','jstart','jend','blay','tlay'],
                                        self.__rffile.read("iiiiiii")
                                        )
                                    )
                                )
        self.nrxns=self.__rffile.read('i')[-1]
        
        self.data_start_byte=self.__rffile.record_start
        self.record_fmt=self.id_fmt + str(self.nrxns) + self.data_fmt
        self.record_size=self.__rffile.record_size
        self.padded_size=self.record_size+8
        domain=self._padomains[0]
        self.records_per_time=(domain['iend']-domain['istart']+1)*(domain['jend']-domain['jstart']+1)*(domain['tlay']-domain['blay']+1)
        self.time_data_block=self.padded_size*self.records_per_time
        self.time_step=100.

    def timerange(self):
        return timerange((self.start_date,self.start_time+self.time_step),timeadd((self.end_date,self.end_time),(0,self.time_step)),self.time_step)
Esempio n. 28
0
class ipr(PseudoNetCDFFile):
    """
    ipr provides a PseudoNetCDF interface for CAMx
    ipr files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> ipr_path = 'camx_ipr.bin'
        >>> iprfile = ipr(ipr_path)
        >>> iprfile.variables.keys()
        ['TFLAG', 'SPAD_O3', 'DATE_O3', 'TIME_O3', 'SPC_O3', 
         'PAGRID_O3', 'NEST_O3', 'I_O3', 'J_O3', 'K_O3', 
         'INIT_O3', 'CHEM_O3', 'EMIS_O3', 'PTEMIS_O3', 
         'PIG_O3', 'WADV_O3', 'EADV_O3', 'SADV_O3', 'NADV_O3', 
         'BADV_O3', 'TADV_O3', 'DIL_O3', 'WDIF_O3', 'EDIF_O3', 
         'SDIF_O3', 'NDIF_O3', 'BDIF_O3', 'TDIF_O3', 'DDEP_O3', 
         'WDEP_O3', 'INORGACHEM_O3', 'ORGACHEM_O3', 'AQACHEM_O3', 
         'FCONC_O3', 'UCNV_O3', 'AVOL_O3', 'EPAD_O3']
        >>> v = iprfile.variables['CHEM_O3']
        >>> tflag = iprfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> iprfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """

    id_fmt = "if10s5i"
    dt_fmt = "if"
    data_fmt = "f"

    def __init__(self, rf, multi=False, **props):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info

        Keywords (i.e., props) for projection: P_ALP, P_BET, P_GAM, XCENT, YCENT, XORIG, YORIG, XCELL, YCELL
        """
        self.__rffile = OpenRecordFile(rf)
        self.__readheader()
        self.__ipr_record_type = {
            24:
            dtype(
                dict(names=[
                    'SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J',
                    'K', 'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV',
                    'EADV', 'SADV', 'NADV', 'BADV', 'TADV', 'DIL', 'WDIF',
                    'EDIF', 'SDIF', 'NDIF', 'BDIF', 'TDIF', 'DDEP', 'WDEP',
                    'AERCHEM', 'FCONC', 'UCNV', 'AVOL', 'EPAD'
                ],
                     formats=[
                         '>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i',
                         '>i', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>i'
                     ])),
            26:
            dtype(
                dict(names=[
                    'SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J',
                    'K', 'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV',
                    'EADV', 'SADV', 'NADV', 'BADV', 'TADV', 'DIL', 'WDIF',
                    'EDIF', 'SDIF', 'NDIF', 'BDIF', 'TDIF', 'DDEP', 'WDEP',
                    'INORGACHEM', 'ORGACHEM', 'AQACHEM', 'FCONC', 'UCNV',
                    'AVOL', 'EPAD'
                ],
                     formats=[
                         '>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i',
                         '>i', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>i'
                     ]))
        }[len(self.prcnames)]

        prcs = [
            'SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K', 'INIT',
            'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 'NADV',
            'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 'BDIF',
            'TDIF', 'DDEP', 'WDEP'
        ] + {
            24: ['AERCHEM'],
            26: ['INORGACHEM', 'ORGACHEM', 'AQACHEM']
        }[len(self.prcnames)] + ['FCONC', 'UCNV', 'AVOL', 'EPAD']
        varkeys = ['_'.join(i) for i in cartesian(prcs, self.spcnames)]
        varkeys += [
            'SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K', 'TFLAG'
        ]
        self.groups = {}
        NSTEPS = len([i_ for i_ in self.timerange()])
        NVARS = len(varkeys)
        self.createDimension('VAR', NVARS)
        self.createDimension('DATE-TIME', 2)
        self.createDimension('TSTEP', NSTEPS)
        padatatype = []
        pavarkeys = []
        for di, domain in enumerate(self.padomains):
            dk = 'PA%02d' % di
            prefix = dk + '_'
            grp = self.groups[dk] = PseudoNetCDFFile()
            pavarkeys.extend([prefix + k for k in varkeys])
            grp.createDimension('VAR', NVARS)
            grp.createDimension('DATE-TIME', 2)
            grp.createDimension('TSTEP', NSTEPS)
            grp.createDimension('COL', domain['iend'] - domain['istart'] + 1)
            grp.createDimension('ROW', domain['jend'] - domain['jstart'] + 1)
            grp.createDimension('LAY', domain['tlay'] - domain['blay'] + 1)
            padatatype.append(
                (dk, self.__ipr_record_type, (len(grp.dimensions['ROW']),
                                              len(grp.dimensions['COL']),
                                              len(grp.dimensions['LAY']))))
            if len(self.padomains) == 1:
                self.createDimension('COL',
                                     domain['iend'] - domain['istart'] + 1)
                self.createDimension('ROW',
                                     domain['jend'] - domain['jstart'] + 1)
                self.createDimension('LAY',
                                     domain['tlay'] - domain['blay'] + 1)
            exec(
                """def varget(k):
                return self._ipr__variables('%s', k)""" % dk, dict(self=self),
                locals())
            if len(self.padomains) == 1:
                self.variables = PseudoNetCDFVariables(varget, varkeys)
            else:
                grp.variables = PseudoNetCDFVariables(varget, varkeys)

        self.__memmaps = memmap(self.__rffile.infile.name, dtype(padatatype),
                                'r', self.data_start_byte).reshape(
                                    NSTEPS, len(self.spcnames))
        for k, v in props.items():
            setattr(self, k, v)
        try:
            add_cf_from_ioapi(self)
        except:
            pass

    def __del__(self):
        try:
            self.__memmaps.close()
            del self.__memmaps
        except:
            pass

    def __decorator(self, name, pncfv):
        spc = name.split('_')[-1]
        prc = name.split('_')[0]
        # IPR units are consistent with 'IPR'
        if prc == 'UCNV':
            units = 'm**3/mol'
        elif prc == 'AVOL':
            units = 'm**3'
        else:
            units = get_uamiv_units('IPR', spc)
        decor = lambda k: dict(
            units=units, var_desc=k.ljust(16), long_name=k.ljust(16))
        for k, v in decor(name).items():
            setattr(pncfv, k, v)
        return pncfv

    def __variables(self, pk, proc_spc):
        if proc_spc in self.__ipr_record_type.names:
            proc = proc_spc
            proc_spc = proc_spc + '_' + self.spcnames[0]
            return PseudoNetCDFVariable(
                self,
                proc_spc,
                'f', ('TSTEP', 'LAY', 'ROW', 'COL'),
                values=self.__memmaps[pk][:, 0, :, :, :][proc].swapaxes(
                    1, 3).swapaxes(2, 3))
        if proc_spc == 'TFLAG':
            thisdate = self.__memmaps[pk][:, 0, :, :, :]['DATE'].swapaxes(
                1, 3).swapaxes(2, 3)[..., 0, 0, 0]
            thistime = self.__memmaps[pk][:, 0, :, :, :]['TIME'].swapaxes(
                1, 3).swapaxes(2, 3)[..., 0, 0, 0]
            return ConvertCAMxTime(thisdate, thistime,
                                   len(self.groups[pk].dimensions['VAR']))
        for k in self.__ipr_record_type.names:
            proc = proc_spc[:len(k)]
            spc = proc_spc[len(k) + 1:]
            if proc == k and spc in self.spcnames:
                spc = self.spcnames.index(spc)
                dvals = self.__memmaps[pk][:, spc][proc].swapaxes(1,
                                                                  3).swapaxes(
                                                                      2, 3)
                return self.__decorator(
                    proc_spc,
                    PseudoNetCDFVariable(self,
                                         proc_spc,
                                         'f', ('TSTEP', 'LAY', 'ROW', 'COL'),
                                         values=dvals))
        raise KeyError("Bad!")

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """

        self.runmessage = self.__rffile.read("80s")
        self.start_date, self.start_time, self.end_date, self.end_time = self.__rffile.read(
            "ifif")

        self.grids = []
        for grid in range(self.__rffile.read("i")[-1]):
            self.grids.append(
                dict(
                    zip(['orgx', 'orgy', 'ncol', 'nrow', 'xsize', 'ysize'],
                        self.__rffile.read("iiiiii"))))

        self.spcnames = []
        for spc in range(self.__rffile.read("i")[-1]):
            self.spcnames.append(self.__rffile.read("10s")[-1].strip())

        self.nspec = len(self.spcnames)
        self.padomains = []

        for padomain in range(self.__rffile.read("i")[-1]):
            self.padomains.append(
                dict(
                    zip([
                        'grid', 'istart', 'iend', 'jstart', 'jend', 'blay',
                        'tlay'
                    ], self.__rffile.read("iiiiiii"))))
        self.activedomain = self.padomains[0]
        self.prcnames = []

        for i in range(self.__rffile.read('i')[-1]):
            self.prcnames.append(self.__rffile.read('25s')[-1].strip())

        self.data_start_byte = self.__rffile.record_start
        self.record_fmt = self.id_fmt + str(len(self.prcnames)) + self.data_fmt
        self.record_size = self.__rffile.record_size
        self.SDATE, self.STIME, dummy, dummy, dummy, dummy, dummy, dummy = self.__rffile.read(
            self.id_fmt)
        self.__rffile.previous()
        self.TSTEP = 100.
        self.padded_size = self.record_size + 8
        domain = self.padomains[0]
        self.records_per_time = self.nspec * (
            domain['iend'] - domain['istart'] +
            1) * (domain['jend'] - domain['jstart'] + 1) * (domain['tlay'] -
                                                            domain['blay'] + 1)
        self.time_data_block = self.padded_size * self.records_per_time
        self.time_step = 100.

    def timerange(self):
        return timerange((self.start_date, self.start_time + self.time_step),
                         timeadd((self.end_date, self.end_time),
                                 (0, self.time_step)), self.time_step)
Esempio n. 29
0
class uamiv(PseudoNetCDFFile):
    """
    uamiv provides a PseudoNetCDF interface for CAMx
    uamiv files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).

    ex:
        >>> uamiv_path = 'camx_uamiv.bin'
        >>> rows,cols = 65,83
        >>> uamivfile = uamiv(uamiv_path,rows,cols)
        >>> uamivfile.variables.keys()
        ['TFLAG', 'O3', 'NO', 'NO2', ...]
        >>> tflag = uamivfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = uamivfile.variables['O3']
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> uamivfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """

    emiss_hdr_fmt = "10i60i3ifif"
    grid_hdr_fmt = "ffiffffiiiiifff"
    cell_hdr_fmt = "iiii"
    time_hdr_fmt = "ifif"
    spc_fmt = "10i"
    id_fmt = "i" + spc_fmt
    id_size = struct.calcsize(id_fmt)
    data_fmt = "f"
    ione = 1
    idum = 0
    rdum = 0.

    def __init__(self, rf, chemparam=None):
        """
        Initialization included reading the header and learning
        about the format.

        see __readheader and __gettimestep() for more info
        """

        self.rffile = OpenRecordFile(rf)
        if chemparam is None:
            self._aerosol_names = None
        else:
            self._aerosol_names = get_chemparam_names(chemparam)

        self.padded_time_hdr_size = struct.calcsize(self.time_hdr_fmt + "ii")
        self.__readheader()
        self.__gettimestep()
        self.dimensions = {}
        self.createDimension('LAY', self.nlayers)
        self.createDimension('COL', self.nx)
        self.createDimension('ROW', self.ny)
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('DATE-TIME', 2)

        self.variables = PseudoNetCDFVariables(
            self.__var_get, [sn.strip() for sn in self.spcnames])

    def __var_get(self, key):
        units = get_uamiv_units(self.name, key, self._aerosol_names)
        spcnames = [sn.strip() for sn in self.spcnames]
        if self.name == 'EMISSIONS ':
            def constr(spc):
                return self.getArray(
                    nspec=spcnames.index(spc)).squeeze()[:, newaxis, :, :]

            def decor(spc):
                return dict(units=units, var_desc=spc,
                            long_name=spc.ljust(16))
        else:
            ntimes = len(self.dimensions['TSTEP'])
            nlays = len(self.dimensions['LAY'])
            nrows = len(self.dimensions['ROW'])
            ncols = len(self.dimensions['COL'])

            def constr(spc):
                return self.getArray(nspec=spcnames.index(
                    spc)).squeeze().reshape(ntimes, nlays, nrows, ncols)

            def decor(spc):
                return dict(units=units, var_desc=spc.ljust(16),
                            long_name=spc.ljust(16))

        values = constr(key)
        var = self.createVariable(key, 'f', ('TSTEP', 'LAY', 'ROW', 'COL'))
        var[:] = values
        for k, v in decor(key).items():
            setattr(var, k, v)
        return var

    def header(self):
        rdum = self.rdum
        idum = self.idum
        ione = self.ione
        return [
            [self.name, self.note, ione, self.nspec, self.start_date,
                self.start_time, self.end_date, self.end_time],
            [rdum, rdum, self.iutm, self.xorg, self.yorg, self.delx, self.dely,
                self.nx, self.ny, self.nz, idum, idum, rdum, rdum, rdum],
            [ione, ione, self.nx, self.ny],
            self.spcnames
        ]

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        vals = self.rffile.read(self.emiss_hdr_fmt)
        self.name = vals[0:10]
        self.note = vals[10:70]
        ione = vals[70]
        self.nspec = vals[71]
        self.start_date, self.start_time = vals[72], vals[73]
        self.end_date, self.end_time = vals[74], vals[75]

        self.name = Int2Asc(self.name)
        self.note = Int2Asc(self.note)

        vals = self.rffile.read(self.grid_hdr_fmt)
        self.rdum, rdum, self.iutm = vals[0:3]
        self.xorg, self.yorg, self.delx, self.dely = vals[3:7]
        self.nx, self.ny, self.nz = vals[7:10]
        idum, self.idum, rdum, rdum, rdum = vals[10:]

        if self.name == 'EMISSIONS ':
            # Special case of gridded emissions
            # Seems to be same as avrg
            self.nlayers = 1
        else:
            self.nlayers = self.nz
        self.ione, ione, nx, ny = self.rffile.read(self.cell_hdr_fmt)
        if not (self.nx, self.ny) == (nx, ny):
            raise ValueError(("nx, ny defined first as %i, %i and then " +
                              "as %i, %i") % (self.nx, self.ny, nx, ny))
        species_temp = self.rffile.read(self.nspec * self.spc_fmt)
        self.spcnames = []
        for i in range(0, self.nspec * 10, 10):
            self.spcnames.append(Int2Asc(species_temp[i:i + 10]))

        self.data_start_byte = self.rffile.record_start
        start_date, start_time, end_date, end_time = self.rffile.read(
            self.time_hdr_fmt)

        self.time_step = timediff(
            (start_date, start_time), (end_date, end_time))
        mystep = (2400, 24)[int(self.time_step % 2)]
        self.time_step_count = int(timediff((self.start_date, self.start_time),
                                            (self.end_date, self.end_time),
                                            mystep) // self.time_step)
        if self.name == 'AIRQUALITY':
            self.time_step_count = 1
            self.start_date = self.end_date
        self.record_size = self.rffile.record_size
        self.padded_size = self.record_size + 8
        self.cell_count = (self.record_size - struct.calcsize("i10i")
                           ) // struct.calcsize(self.data_fmt)
        self.record_fmt = ("i10i") + self.data_fmt * (self.cell_count)

    def __gettimestep(self):
        """
        this is taken care of in the readheader routine
        record format provides start and end for each hour,
        which translates to t1 and t2
        """
        pass

    def __timerecords(self, dt):
        """
        Calculate the number of records to increment to reach time (d,t)
        """
        d, t = dt
        nsteps = int(
            timediff((self.start_date, self.start_time), (d, t)) /
            self.time_step)
        nspec = self.__spcrecords(self.nspec + 1)
        return nsteps * nspec

    def __layerrecords(self, k):
        """Calculate the number of records to increment to reach layer k
        """
        return k - 1

    def __spcrecords(self, spc):
        """
        Calculated number of records before spc
        """
        return (spc - 1) * self.__layerrecords(self.nlayers + 1)

    def __recordposition(self, date, time, spc, k):
        """
        Use time (d,t), spc, and k to calculate number of records before
        desired record

        date - integer julian
        time - float
        spc - integer
        k - integer
        """
        ntime = self.__timerecords((date, time))
        nk = self.__layerrecords(k)
        nid = ntime // self.nspec // self.nlayers
        nspec = 0
        if spc != 0:
            nid += 1
            nspec = self.__spcrecords(spc)

        out = (self.data_start_byte + (nspec + nk + ntime) *
               self.padded_size + nid * self.padded_time_hdr_size)
        return out

    def seek(self, date=None, time=None, spc=-1, k=0, chkvar=True):
        """
        Move file cursor to the beginning of the specified record
        see __recordposition for parameter definitions
        """
        spc += 1
        if date is None:
            date = self.start_date
        if time is None:
            time = self.start_time

        if (
            chkvar and
            timediff((self.end_date, self.end_time), (date, time), 24) > 0 or
            timediff((self.start_date, self.start_time), (date, time), 24) < 0
        ):
            raise KeyError(("Gridded emission file includes (%i,%6.1f) " +
                            "thru (%i,%6.1f); you requested (%i,%6.1f)") %
                           (self.start_date, self.start_time,
                            self.end_date, self.end_time, date, time))
        if chkvar and spc < 1 or spc > self.nspec:
            raise KeyError(("Gridded emission file include species 1 thru " +
                            "%i; you requested %i") % (self.nspec, spc))

        # self.rffile._newrecord(self.__recordposition(date,time,1,0))
        # start_date,start_time,end_date,end_time=self.rffile.read("ifif")
        self.rffile._newrecord(self.__recordposition(date, time, spc, k))

    def read(self):
        """
        Provide direct access to record file read
        """
        return self.rffile.read(self.record_fmt)

    def read_into(self, dest):
        """
        Transfer values from current record to dest
        dest - numeric or numpy array
        """
        return read_into(self.rffile, dest, self.id_fmt, self.data_fmt)

    def seekandreadinto(self, dest, date=None, time=None, spc=1, k=1):
        """
        see seek and read_into
        """
        self.seek(date, time, spc, k)
        self.read_into(dest)

    def seekandread(self, date=None, time=None, spc=1, k=1):
        """
        see seek and read
        """
        self.seek(date, time, spc, k)
        return self.read()

    def values(self):
        for d, t, spc, k in self.__iter__():
            yield self.seekandread(d, t, spc, k)

    def items(self):
        for d, t, spc, k in self.__iter__():
            yield d, t, spc, k, self.seekandread(d, t, spc, k)

    def keys(self):
        for d, t in self.timerange():
            for spc in range(len(self.spcnames)):
                for k in range(1, self.nlayers + 1):
                    yield d, t, spc, k
    __iter__ = keys

    def close(self):
        self.rffile.infile.close()

    def getArray(self, krange=slice(1, None), nspec=slice(None),
                 nx=slice(None), ny=slice(None)):
        """Method takes slice arguments. Alternatively, takes a hashable object
        with 2 values (e.g., the list: [0,3]).
        Arguments:
        krange    vertical slice (1 indexed)
        nspec     species  slice (0 indexed)
        nx        column   slice (0 indexed)
        ny        row      slice (0 indexed)
        """
        krange = sliceit(krange)
        nspec = sliceit(nspec)
        nx = sliceit(nx)
        ny = sliceit(ny)

        a = zeros(
            (
                self.time_step_count,
                len(range(*nspec.indices(self.nspec))),
                len(range(*krange.indices(self.nlayers + 1))),
                self.ny,
                self.nx), 'f')
        nlay = self.nlayers
        for ti, (d, t) in enumerate(self.timerange()):
            for sidx, spc in enumerate(range(*nspec.indices(self.nspec))):
                for kidx, k in enumerate(range(*krange.indices(nlay + 1))):
                    self.seekandreadinto(a[ti, sidx, kidx, ...], d, t, spc, k)

        return a[..., ny, nx]

    def timerange(self):
        return timerange((self.start_date, self.start_time),
                         (self.end_date, self.end_time), self.time_step, 24)
Esempio n. 30
0
class temperature(PseudoNetCDFFile):
    """
    temperature provides a PseudoNetCDF interface for CAMx
    temperature files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).

    ex:
        >>> temperature_path = 'camx_temperature.bin'
        >>> rows,cols = 65,83
        >>> temperaturefile = temperature(temperature_path,rows,cols)
        >>> temperaturefile.variables.keys()
        ['TFLAG', 'AIRTEMP', 'SURFTEMP']
        >>> tflag = temperaturefile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = temperaturefile.variables['SURFTEMP']
        >>> v.dimensions
        ('TSTEP', 'ROW', 'COL')
        >>> v.shape
        (25, 65, 83)
        >>> v = temperaturefile.variables['AIRTEMP']
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> temperaturefile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """

    id_fmt = 'fi'
    data_fmt = 'f'

    def __init__(self, rf, rows=None, cols=None):
        self.rffile = OpenRecordFile(rf)
        self.id_size = struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows is None and cols is None:
            rows = self.cell_count
            cols = 1
        elif rows is None:
            rows = self.cell_count / cols
        elif cols is None:
            cols = self.cell_count / rows
        else:
            if cols * rows != self.cell_count:
                raise ValueError(
                    ("The product of cols (%d) and rows (%d) " +
                     "must equal cells (%d)") % (cols, rows, self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)
        self.createDimension('SURF', 1)
        self.variables = PseudoNetCDFVariables(self.__var_get,
                                               ['AIRTEMP', 'SURFTEMP'])

    def __var_get(self, key):
        def decor(k):
            return dict(units='K', var_desc=k.ljust(16), long_name=k.ljust(16))

        def constr(k):
            return self.__variables(k)

        values = constr(key)
        dims = {
            'AIRTEMP': ('TSTEP', 'LAY', 'ROW', 'COL'),
            'SURFTEMP': ('TSTEP', 'SURF', 'ROW', 'COL')
        }[key]
        var = self.createVariable(key, 'f', dims)
        var[:] = values
        for k, v in decor(key).items():
            setattr(var, k, v)
        return var

    def __readheader(self):
        self.data_start_byte = 0
        self.rffile._newrecord(0)

        self.area_size = self.rffile.record_size
        self.area_count = (self.area_size - self.id_size) // struct.calcsize(
            self.data_fmt)
        self.area_padded_size = self.area_size + 8
        self.area_fmt = self.id_fmt + self.data_fmt * (self.area_count)

        self.start_time, self.start_date = self.rffile.read(self.id_fmt)

        self.record_size = self.rffile.record_size
        self.padded_size = self.record_size + 8
        self.cell_count = (self.record_size - self.id_size) // struct.calcsize(
            self.data_fmt)

        self.record_fmt = self.id_fmt + self.data_fmt * (self.cell_count)

    def __gettimestep(self):
        d, t = date, time = self.start_date, self.start_time
        self.nlayers = -1
        while (d, t) == (date, time):
            self.nlayers += 1
            t, d = self.rffile.read(self.id_fmt)
        self.time_step = timediff((self.start_date, self.start_time), (d, t))
        self.rffile.infile.seek(0, 2)
        self.rffile.previous()
        self.end_time, self.end_date = self.rffile.read(self.id_fmt)
        self.time_step_count = int(
            timediff((self.start_date, self.start_time),
                     (self.end_date, self.end_time)) // self.time_step) + 1

    def __variables(self, k):
        if k == 'SURFTEMP':
            out = zeros(
                (len(self.dimensions['TSTEP']), 1, len(
                    self.dimensions['ROW']), len(self.dimensions['COL'])), 'f')
            vars = self.__surfmaps()
        elif k == 'AIRTEMP':
            out = zeros(
                (len(self.dimensions['TSTEP']), len(self.dimensions['LAY']),
                 len(self.dimensions['ROW']), len(self.dimensions['COL'])),
                'f')
            vars = self.__airmaps()
        for i, v in enumerate(vars):
            out[i, ...] = v
        return out

    def __surfpos(self):
        pos = self.data_start_byte + 12
        inc = self.area_padded_size + self.padded_size * self.nlayers
        self.rffile.infile.seek(0, 2)
        rflen = self.rffile.tell()
        while pos < rflen:
            yield pos
            pos += inc
        raise StopIteration

    def __surfmaps(self):
        for pos in self.__surfpos():
            tmpmm = memmap(self.rffile.infile.name, '>f', 'r', pos,
                           (self.area_count, ))
            newshape = [
                len(self.dimensions['ROW']),
                len(self.dimensions['COL'])
            ]
            yield tmpmm.reshape(*newshape)

    def __airpos(self):
        pos = self.area_padded_size + self.data_start_byte
        inc = self.area_padded_size + self.padded_size * self.nlayers
        self.rffile.infile.seek(0, 2)
        rflen = self.rffile.tell()
        while pos < rflen:
            yield pos
            pos += inc
        raise StopIteration

    def __airmaps(self):
        for pos in self.__airpos():
            firstshape = ((self.cell_count + 4) * self.nlayers, )
            tmpmm = memmap(self.rffile.infile.name, '>f', 'r', pos, firstshape)
            newshape1 = [self.nlayers, self.cell_count + 4]
            tmpmm = tmpmm.reshape(*newshape1)[:, 3:-1]
            newshape2 = [
                len(self.dimensions['LAY']),
                len(self.dimensions['ROW']),
                len(self.dimensions['COL'])
            ]
            yield tmpmm.reshape(*newshape2)

    def timerange(self):
        return timerange(
            (self.start_date, self.start_time),
            timeadd((self.end_date, self.end_time), (0, self.time_step),
                    (2400, 24)[int(self.time_step % 2)]), self.time_step,
            (2400, 24)[int(self.time_step % 2)])
Esempio n. 31
0
class uamiv(PseudoNetCDFFile):
    """
    uamiv provides a PseudoNetCDF interface for CAMx
    uamiv files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> uamiv_path = 'camx_uamiv.bin'
        >>> rows,cols = 65,83
        >>> uamivfile = uamiv(uamiv_path,rows,cols)
        >>> uamivfile.variables.keys()
        ['TFLAG', 'O3', 'NO', 'NO2', ...]
        >>> tflag = uamivfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = uamivfile.variables['O3']
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> uamivfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """
    
    
    emiss_hdr_fmt="10i60i3ifif"
    grid_hdr_fmt="ffiffffiiiiifff"
    cell_hdr_fmt="iiii"
    time_hdr_fmt="ifif"
    spc_fmt="10i"
    id_fmt="i"+spc_fmt
    id_size=struct.calcsize(id_fmt)
    data_fmt="f"
    ione=1
    idum=0
    rdum=0.
    def __init__(self,rf):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
                
        self.rffile=OpenRecordFile(rf)
        
        self.padded_time_hdr_size=struct.calcsize(self.time_hdr_fmt+"ii")
        self.__readheader()
        self.__gettimestep()
        self.dimensions={}
        self.createDimension('LAY', self.nlayers)
        self.createDimension('COL', self.nx)
        self.createDimension('ROW', self.ny)
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('DATE-TIME', 2)
            
        self.variables=PseudoNetCDFVariables(self.__var_get,map(str.strip,self.spcnames))

    def __var_get(self,key):
        units = get_uamiv_units(self.name, key)
        spcnames = map(str.strip, self.spcnames)
        if self.name=='EMISSIONS ':
            constr=lambda spc: self.getArray(nspec=spcnames.index(spc)).squeeze()[:,newaxis,:,:]
            decor=lambda spc: dict(units=units, var_desc=spc, long_name=spc.ljust(16))
        else:
            constr=lambda spc: self.getArray(nspec=spcnames.index(spc)).squeeze().reshape(map(len, (self.dimensions['TSTEP'],self.dimensions['LAY'],self.dimensions['ROW'],self.dimensions['COL'])))
            decor=lambda spc: dict(units=units, var_desc=spc.ljust(16), long_name=spc.ljust(16))

        values=constr(key)
        var=self.createVariable(key,'f',('TSTEP','LAY','ROW','COL'))
        var[:] = values
        for k,v in decor(key).items():
            setattr(var,k,v)
        return var

    def header(self):
        rdum=self.rdum
        idum=self.idum
        ione=self.ione
        return [
                [self.name,self.note,ione,self.nspec,self.start_date,self.start_time,self.end_date,self.end_time],
                [rdum,rdum,self.iutm,self.xorg,self.yorg,self.delx,self.dely,self.nx,self.ny,self.nz,idum,idum,rdum,rdum,rdum],
                [ione,ione,self.nx,self.ny],
                self.spcnames
                ]
                
    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        vals=self.rffile.read(self.emiss_hdr_fmt)
        self.name,self.note,ione,self.nspec,self.start_date,self.start_time,self.end_date,self.end_time=vals[0:10],vals[10:70],vals[70],vals[71],vals[72],vals[73],vals[74],vals[75]
        
        self.name=Int2Asc(self.name)
        self.note=Int2Asc(self.note)
        self.rdum,rdum,self.iutm,self.xorg,self.yorg,self.delx,self.dely,self.nx,self.ny,self.nz,idum,self.idum,rdum,rdum,rdum=self.rffile.read(self.grid_hdr_fmt)

        if self.name=='EMISSIONS ':
            #Special case of gridded emissions
            #Seems to be same as avrg
            self.nlayers=1
        else:
            self.nlayers=self.nz
        self.ione,ione,nx,ny=self.rffile.read(self.cell_hdr_fmt)
        if not (self.nx,self.ny)==(nx,ny):
            raise ValueError("nx, ny defined first as %i, %i and then as %i, %i" % (self.nx,self.ny,nx,ny))
        species_temp=self.rffile.read(self.nspec*self.spc_fmt)
        self.spcnames=[]
        for i in range(0,self.nspec*10,10):
            self.spcnames.append(Int2Asc(species_temp[i:i+10]))
        
        self.data_start_byte=self.rffile.record_start
        start_date,start_time,end_date,end_time=self.rffile.read(self.time_hdr_fmt)
        self.time_step=timediff((start_date,start_time),(end_date,end_time))
        self.time_step_count=int(timediff((self.start_date,self.start_time),(self.end_date,self.end_time),(2400,24)[int(self.time_step % 2)])//self.time_step)
        if self.name == 'AIRQUALITY':
            self.time_step_count = 1
            self.start_date = self.end_date
        self.record_size=self.rffile.record_size
        self.padded_size=self.record_size+8
        self.cell_count=(self.record_size-struct.calcsize("i10i"))/struct.calcsize(self.data_fmt)
        self.record_fmt=("i10i")+self.data_fmt*(self.cell_count)
        
    def __gettimestep(self):
        """
        this is taken care of in the readheader routine
        record format provides start and end for each hour,
        which translates to t1 and t2
        """
        pass
    
    def __timerecords(self,dt):
        """
        Calculate the number of records to increment to reach time (d,t)
        """
        dt = d, t
        nsteps=int(timediff((self.start_date,self.start_time),(d,t))/self.time_step)
        nspec=self.__spcrecords(self.nspec+1)
        return nsteps*nspec
    def __layerrecords(self,k):
        """Calculate the number of records to increment to reach layer k
        """
        return k-1
    def __spcrecords(self,spc):
        """
        Calculated number of records before spc
        """
        return (spc-1)*self.__layerrecords(self.nlayers+1)

    def __recordposition(self,date,time,spc,k):
        """
        Use time (d,t), spc, and k to calculate number of records before
        desired record
        
        date - integer julian
        time - float
        spc - integer
        k - integer
        """
        ntime=self.__timerecords((date,time))
        nk=self.__layerrecords(k)
        nid=ntime/self.nspec/self.nlayers
        nspec=0
        if spc!=0:
            nid+=1
            nspec=self.__spcrecords(spc)

        return self.data_start_byte+(nspec+nk+ntime)*self.padded_size+nid*self.padded_time_hdr_size
        
    def seek(self,date=None,time=None,spc=-1,k=0,chkvar=True):
        """
        Move file cursor to the beginning of the specified record
        see __recordposition for parameter definitions
        """
        spc+=1
        if date==None:
            date=self.start_date
        if time==None:
            time=self.start_time
            
        if chkvar and timediff((self.end_date,self.end_time),(date,time),24)>0 or timediff((self.start_date,self.start_time),(date,time),24)<0:
            raise KeyError("Gridded emission file includes (%i,%6.1f) thru (%i,%6.1f); you requested (%i,%6.1f)" % (self.start_date,self.start_time,self.end_date,self.end_time,date,time))
        if chkvar and spc<1 or spc>self.nspec:
            raise KeyError("Gridded emission file include species 1 thru %i; you requested %i" % (self.nspec,spc))
        
        #self.rffile._newrecord(self.__recordposition(date,time,1,0))
        #start_date,start_time,end_date,end_time=self.rffile.read("ifif")
        self.rffile._newrecord(self.__recordposition(date,time,spc,k))
        
    def read(self):
        """
        Provide direct access to record file read
        """
        return self.rffile.read(self.record_fmt)
        
    def read_into(self,dest):
        """
        Transfer values from current record to dest
        dest - numeric or numpy array
        """
        return read_into(self.rffile,dest,self.id_fmt,self.data_fmt)
        
    def seekandreadinto(self,dest,date=None,time=None,spc=1,k=1):
        """
        see seek and read_into
        """
        self.seek(date,time,spc,k)
        self.read_into(dest)
        
    def seekandread(self,date=None,time=None,spc=1,k=1):
        """
        see seek and read
        """
        self.seek(date,time,spc,k)
        return self.read()

    def values(self):
        for d,t,spc,k in self.__iter__():
            yield self.seekandread(d,t,spc,k)
            
    def items(self):
        for d,t,spc,k in self.__iter__():
            yield d,t,spc,k,self.seekandread(d,t,spc,k)
        
    def keys(self):
        for d,t in self.timerange():
            for spc in range(len(self.spcnames)):
                for k in range(1,self.nlayers+1):
                    yield d,t,spc,k
    __iter__=keys
    def close(self):
        self.rffile.infile.close()
        
    def getArray(self,krange=slice(1,None), nspec=slice(None),nx=slice(None), ny=slice(None)):
        """Method takes slice arguments. Alternatively, takes a hashable object
        with 2 values (e.g., the list: [0,3]). 
        Arguments:
        krange    vertical slice (1 indexed)
        nspec     species  slice (0 indexed)
        nx        column   slice (0 indexed)
        ny        row      slice (0 indexed)
        """
        krange=sliceit(krange)
        nspec=sliceit(nspec)
        nx=sliceit(nx)
        ny=sliceit(ny)
        
        a=zeros(
           (
            self.time_step_count,
            len(xrange(*nspec.indices(self.nspec))),
            len(xrange(*krange.indices(self.nlayers+1))),
            self.ny,
            self.nx)
            ,'f')
        
        for ti,(d,t) in enumerate(self.timerange()):
           for sidx,spc in enumerate(xrange(*nspec.indices(self.nspec))):
               for kidx,k in enumerate(xrange(*krange.indices(self.nlayers+1))):
                    self.seekandreadinto(a[ti,sidx,kidx,...,...],d,t,spc,k)
                  
        return a[...,...,...,ny,nx]

    def timerange(self):
        return timerange((self.start_date,self.start_time),(self.end_date,self.end_time),self.time_step,24)
Esempio n. 32
0
class wind(PseudoNetCDFFile):
    """
    wind provides a PseudoNetCDF interface for CAMx
    wind files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).

    ex:
        >>> wind_path = 'camx_wind.bin'
        >>> rows,cols = 65,83
        >>> windfile = wind(wind_path,rows,cols)
        >>> windfile.variables.keys()
        ['TFLAG', 'U', 'V']
        >>> v = windfile.variables['V']
        >>> tflag = windfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> windfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """

    time_hdr_fmts = {12: "fii", 8: "fi"}
    data_fmt = "f"

    def __init__(self, rf, rows=None, cols=None):
        """
        Initialization included reading the header and learning
        about the format.

        see __readheader and __gettimestep() for more info
        """
        self.rffile = OpenRecordFile(rf)
        self.time_hdr_fmt = self.time_hdr_fmts[self.rffile.record_size]

        self.time_hdr_size = struct.calcsize(self.time_hdr_fmt)
        self.padded_time_hdr_size = struct.calcsize("ii" + self.time_hdr_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows is None and cols is None:
            rows = self.cell_count
            cols = 1
        elif rows is None:
            rows = self.cell_count / cols
        elif cols is None:
            cols = self.cell_count / rows
        else:
            if cols * rows != self.cell_count:
                raise ValueError(
                    ("The product of cols (%d) and rows (%d) " +
                     "must equal cells (%d)") % (cols, rows, self.cell_count))
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)

        self.variables = PseudoNetCDFVariables(self.__var_get, ['U', 'V'])

    def __var_get(self, key):
        def constr(uv):
            return self.getArray()[:, {'U': 0, 'V': 1}[uv], :, :, :].copy()

        def decor(uv):
            return dict(units='m/s',
                        var_desc=uv.ljust(16),
                        long_name=uv.ljust(16))

        values = constr(key)

        var = self.createVariable(key, 'f', ('TSTEP', 'LAY', 'ROW', 'COL'))
        var[:] = values
        for k, v in decor(key).items():
            setattr(var, k, v)
        return var

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        self.data_start_byte = 0
        if self.time_hdr_fmt == 'fii':
            self.start_time, self.start_date, self.lstagger = self.rffile.read(
                self.time_hdr_fmt)
        elif self.time_hdr_fmt == 'fi':
            self.start_time, self.start_date = self.rffile.read(
                self.time_hdr_fmt)
            self.lstagger = None
        else:
            raise NotImplementedError("Header format is unknown")

        self.record_size = self.rffile.record_size
        self.padded_size = self.record_size + 8
        self.cell_count = self.record_size // struct.calcsize(self.data_fmt)
        self.record_fmt = self.data_fmt * self.cell_count

    def __gettimestep(self):
        """
        Header information provides start and end date, but does not
        indicate the increment between.  This routine reads the first
        and second date/time and initializes variables indicating the
        timestep length and the anticipated number.
        """
        # This is a bit of a hack, but should work:
        # Search for the next record that is the same
        # length as self.padded_time_hdr_size
        #
        # This should be the next date record
        # 1) date - startdate = timestep
        # 2) (record_start - self.padded_time_hdr_size)/self.padded_size
        #     = klayers
        self.rffile._newrecord(0)
        self.rffile.next()
        nlayers = 0
        while not self.rffile.record_size == self.time_hdr_size:
            self.rffile.next()
            nlayers += 1

        self.nlayers = (nlayers - 1) // 2

        if self.time_hdr_fmt == "fi":
            time, date = self.rffile.read(self.time_hdr_fmt)
        elif self.time_hdr_fmt == "fii":
            time, date, lstagger = self.rffile.read(self.time_hdr_fmt)

        self.end_time, self.end_date = time, date

        self.time_step = timediff((self.start_date, self.start_time),
                                  (date, time))

        while True:
            try:
                for i in range(self.nlayers * 2 + 1):
                    self.rffile.next()
                if self.rffile.record_size == 8:
                    self.end_time, self.end_date = self.rffile.read("fi")
                elif self.rffile.record_size == 12:
                    self.end_time, self.end_date, lstagger = self.rffile.read(
                        "fii")
                else:
                    raise KeyError()
            except Exception:
                break

        self.time_step_count = int(
            timediff((self.start_date, self.start_time),
                     (self.end_date, self.end_time)) / self.time_step) + 1

    def __layerrecords(self, k):
        return k - 1

    def __timerecords(self, dt):
        """
        routine returns the number of records to increment from the
        data start byte to find the first time
        """
        d, t = dt
        nsteps = int(
            timediff((self.start_date, self.start_time),
                     (d, t)) / self.time_step)
        nlays = self.__layerrecords(self.nlayers + 1)
        return nsteps * nlays

    def __recordposition(self, date, time, k, duv):
        """
        routine uses pagridrecords, timerecords,irecords,
        jrecords, and krecords multiplied by the fortran padded size
        to return the byte position of the specified record

        pagrid - integer
        date - integer
        time - float
        duv - integer (0=date,1=uwind,2=vwind)
        """
        bytes = self.data_start_byte
        nsteps = self.__timerecords((date, time))
        bytes += int(nsteps / self.nlayers) * self.padded_time_hdr_size
        bytes += int(nsteps / self.nlayers) * 12
        bytes += nsteps * self.padded_size * 2
        if not duv == 0:
            bytes += self.padded_time_hdr_size
            bytes += self.__layerrecords(k) * 2 * self.padded_size
        if duv == 2:
            bytes += self.padded_size
        return bytes

    def seek(self, date=None, time=None, k=1, uv=1):
        """
        Move file cursor to beginning of specified record
        see __recordposition for a definition of variables
        """
        if date is None:
            date = self.start_date
        if time is None:
            time = self.start_time
        chkvar = True
        if (chkvar and timediff(
            (self.end_date, self.end_time), (date, time)) > 0 or timediff(
                (self.start_date, self.start_time), (date, time)) < 0):
            raise KeyError(("Wind file includes (%i,%6.1f) thru (%i,%6.1f); " +
                            "you requested (%i,%6.1f)") %
                           (self.start_date, self.start_time, self.end_date,
                            self.end_time, date, time))
        if chkvar and uv < 0 or uv > 2:
            raise KeyError("Wind file includes Date (uv: 0), u velocity " +
                           "(uv: 1) and v velocity (uv: 2); you requested %i" %
                           (uv))

        self.rffile._newrecord(self.__recordposition(date, time, k, uv))

    def read(self):
        """
        provide direct access to the underlying RecordFile read
        method
        """
        return self.rffile.read(self.record_fmt)

    def read_into(self, dest):
        """
        put values from rffile read into dest
        dest - numpy or numeric array
        """
        return read_into(self.rffile, dest, "", self.data_fmt)

    def seekandreadinto(self, dest, date=None, time=None, k=1, duv=1):
        """
        see seek and read_into
        """
        self.seek(date, time, k, duv)
        self.read_into(dest)

    def seekandread(self, date=None, time=None, k=1, duv=1):
        """
        see seek and read
        """
        self.seek(date, time, k, duv)
        return self.read()

    def keys(self):
        for d, t in timerange((self.start_date, self.start_time),
                              timeadd((self.end_date, self.end_time),
                                      (0, self.time_step)), self.time_step):
            for k in range(1, self.nlayers + 1):
                yield d, t, k

    def values(self):
        for d, t, k in self.keys():
            yield self.seekandread(d, t, k, 1), self.seekandread(d, t, k, 2)

    def items(self):
        for d, t, k in self.keys():
            y1, y2 = self.seekandread(d, t, k, 1), self.seekandread(d, t, k, 2)
            yield d, t, k, y1, y2

    __iter__ = keys

    def getArray(self, krange=slice(1, None)):
        if type(krange) != slice:
            if type(krange) == tuple:
                krange = slice(*krange)
            if type(krange) == int:
                krange = slice(krange, krange + 1)
        a = zeros((
            self.time_step_count,
            2,
            len(range(*krange.indices(self.nlayers + 1))),
            len(self.dimensions['ROW']),
            len(self.dimensions['COL']),
        ), 'f')
        nlay = self.nlayers
        for i, (d, t) in enumerate(self.timerange()):
            for uv in range(1, 3):
                for ki, k in enumerate(range(*krange.indices(nlay + 1))):
                    uvi = uv - 1
                    ki = k - 1
                    self.seekandreadinto(a[i, uvi, ki, :, :], d, t, k, uv)
        return a

    def timerange(self):
        return timerange((self.start_date, self.start_time),
                         timeadd((self.end_date, self.end_time),
                                 (0, self.time_step)), self.time_step)
Esempio n. 33
0
class one3d(PseudoNetCDFFile):
    """
    one3d provides a PseudoNetCDF interface for CAMx
    one3d files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> one3d_path = 'camx_one3d.bin'
        >>> rows,cols = 65,83
        >>> one3dfile = one3d(one3d_path,rows,cols)
        >>> one3dfile.variables.keys()
        ['TFLAG', 'UNKNOWN']
        >>> tflag = one3dfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v = one3dfile.variables['UNKNOWN']
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> one3dfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """
    
    
    id_fmt="fi"
    data_fmt="f"
    var_name="UNKNOWN"
    units="UNKNOWN"
    def __init__(self,rf,rows=None,cols=None):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        
        self.rffile=OpenRecordFile(rf)
        
        self.id_size=struct.calcsize(self.id_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows==None and cols==None:
            rows=self.cell_count
            cols=1
        elif rows==None:
            rows=self.cell_count/cols
        elif cols==None:
            cols=self.cell_count/rows
        else:
            if cols*rows!=self.cell_count:
                raise ValueError("The product of cols (%d) and rows (%d) must equal cells (%d)" %  (cols,rows,self.cell_count))

        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)

        self.variables=PseudoNetCDFVariables(self.__var_get,[self.var_name])

    def __var_get(self,key):
        constr=lambda *args, **kwds: self.getArray()
        decor=lambda *args: dict(units=self.units, var_desc=self.var_name.ljust(16), long_name=self.var_name.ljust(16))
        values=constr(key)
        
        var=self.createVariable(key,'f',('TSTEP','LAY','ROW','COL'))
        var[:] = values
        for k,v in decor(key).items():
            setattr(var,k,v)
        return var

    def __readheader(self):
        """
        __readheader reads the header section of the vertical diffusivity file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        self.data_start_byte=0
        self.start_time,self.start_date=self.rffile.read(self.id_fmt)
        self.record_size=self.rffile.record_size
        self.padded_size=self.record_size+8
        self.cell_count=(self.record_size-self.id_size)/struct.calcsize(self.data_fmt)
        self.record_fmt=self.id_fmt+self.data_fmt*(self.cell_count)
        
    def __gettimestep(self):
        """
        Header information provides start and end date, but does not
        indicate the increment between.  This routine reads the first
        and second date/time and initializes variables indicating the
        timestep length and the anticipated number.
        """
        self.rffile._newrecord(
                        self.padded_size
                        )
        d,t=self.start_date,self.start_time
        self.nlayers=0
        while timediff((self.start_date,self.start_time),(d,t))==0:
            t,d=self.rffile.read(self.id_fmt)
            self.nlayers+=1
        self.time_step=timediff((self.start_date,self.start_time),(d,t))

        while True:
            try:
                self.seek(d,t,1,False)
                d,t=timeadd((d,t),(0,self.time_step))
            except:
                break
        self.end_date,self.end_time=timeadd((d,t),(0,-self.time_step))
        self.time_step_count=int(timediff((self.start_date,self.start_time),(self.end_date,self.end_time))/self.time_step)+1
        
    def __timerecords(self,dt):
        """
        routine returns the number of records to increment from the
        data start byte to find the first time
        """
        d, t = dt
        nsteps=int(timediff((self.start_date,self.start_time),(d,t))/self.time_step)
        nk=self.__layerrecords(self.nlayers+1)
        return nsteps*nk
        
    def __layerrecords(self,k):
        """
        routine returns the number of records to increment from the
        data start byte to find the first klayer
        """
        return k-1

    def __recordposition(self,date,time,k):
        """ 
        routine uses timerecords and layerrecords multiplied 
        by the fortran padded size to return the byte position of the specified record
        
        date - integer
        time - float
        k - integer
        """
        ntime=self.__timerecords((date,time))
        nk=self.__layerrecords(k)
        return (nk+ntime)*self.padded_size+self.data_start_byte
        
    def seek(self,date=None,time=None,k=1,chkvar=True):
        """
        Move file cursor to beginning of specified record
        see __recordposition for a definition of variables
        """
        if date==None:
            date=self.start_date
        if time==None:
            time=self.start_time
            
        if chkvar and timediff((self.end_date,self.end_time),(date,time))>0 or timediff((self.start_date,self.start_time),(date,time))<0:
            raise KeyError("Vertical Diffusivity file includes (%i,%6.1f) thru (%i,%6.1f); you requested (%i,%6.1f)" % (self.start_date,self.start_time,self.end_date,self.end_time,date,time))
        if chkvar and k<1 or k>self.nlayers:
            raise KeyError("Vertical Diffusivity file include layers 1 thru %i; you requested %i" % (self.nlayers,k))
        self.rffile._newrecord(self.__recordposition(date,time,k))
        
    def read(self):
        """
        provide direct access to the underlying RecordFile read
        method
        """
        return self.rffile.read(self.record_fmt)
        
    def read_into(self,dest):
        """
        put values from rffile read into dest
        dest - numpy or numeric array
        """
        return read_into(self.rffile,dest,self.id_fmt,self.data_fmt)
        
    def seekandreadinto(self,dest,date=None,time=None,k=1):
        """
        see seek and read_into
        """
        self.seek(date,time,k)
        return self.read_into(dest)
        
    def seekandread(self,date=None,time=None,k=1):
        """
        see seek and read
        """
        self.seek(date,time,k)
        return self.read()

    def values(self):
        for d,t,k in self.__iter__():
            yield self.seekandread(d,t,k)
            
    def items(self):
        for d,t,k in self.__iter__():
            yield d,t,k,self.seekandread(d,t,k)
        
    def keys(self):
        for d,t in self.timerange():
            for k in range(1,self.nlayers+1):
                yield d,t,k
    __iter__=keys

    def getArray(self):
        a=zeros((self.time_step_count,self.nlayers,len(self.dimensions['ROW']),len(self.dimensions['COL'])),'f')
        for ti,(d,t) in enumerate(self.timerange()):
            for ki,k in enumerate(range(1,self.nlayers+1)):
                self.seekandreadinto(a[ti,ki,...,...],d,t,k)
        return a

    def timerange(self):
        return timerange((self.start_date,self.start_time),(self.end_date,self.end_time+self.time_step),self.time_step)
Esempio n. 34
0
class ipr(PseudoNetCDFFile):
    """
    ipr provides a PseudoNetCDF interface for CAMx
    ipr files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> ipr_path = 'camx_ipr.bin'
        >>> iprfile = ipr(ipr_path)
        >>> iprfile.variables.keys()
        ['TFLAG', 'SPAD_O3', 'DATE_O3', 'TIME_O3', 'SPC_O3', 
         'PAGRID_O3', 'NEST_O3', 'I_O3', 'J_O3', 'K_O3', 
         'INIT_O3', 'CHEM_O3', 'EMIS_O3', 'PTEMIS_O3', 
         'PIG_O3', 'WADV_O3', 'EADV_O3', 'SADV_O3', 'NADV_O3', 
         'BADV_O3', 'TADV_O3', 'DIL_O3', 'WDIF_O3', 'EDIF_O3', 
         'SDIF_O3', 'NDIF_O3', 'BDIF_O3', 'TDIF_O3', 'DDEP_O3', 
         'WDEP_O3', 'INORGACHEM_O3', 'ORGACHEM_O3', 'AQACHEM_O3', 
         'FCONC_O3', 'UCNV_O3', 'AVOL_O3', 'EPAD_O3']
        >>> v = iprfile.variables['CHEM_O3']
        >>> tflag = iprfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> iprfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """
    
    id_fmt="if10s5i"
    dt_fmt="if"
    data_fmt="f"
    
    def __init__(self,rf,multi=False, **props):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info

        Keywords (i.e., props) for projection: P_ALP, P_BET, P_GAM, XCENT, YCENT, XORIG, YORIG, XCELL, YCELL
        """
        self.__rffile=OpenRecordFile(rf)
        self.__readheader()
        self.__ipr_record_type={
            24: dtype(
                        dict(
                            names=['SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J', 'K', 
                                    'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 
                                    'NADV', 'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 
                                    'BDIF', 'TDIF', 'DDEP', 'WDEP', 'AERCHEM', 'FCONC', 'UCNV', 'AVOL', 
                                    'EPAD'], 
                            formats=['>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i', '>i', 
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>i'])),
            26: dtype(
                        dict(
                            names=['SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J', 'K', 
                                    'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 
                                    'NADV', 'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 
                                    'BDIF', 'TDIF', 'DDEP', 'WDEP', 'INORGACHEM', 'ORGACHEM', 'AQACHEM', 'FCONC', 'UCNV', 'AVOL', 
                                    'EPAD'], 
                            formats=['>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i', '>i', 
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                                    '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>i']))
                                }[len(self.prcnames)]

        prcs=['SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K', 
                'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 
                'NADV', 'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 
                'BDIF', 'TDIF', 'DDEP', 'WDEP']+{24: ['AERCHEM'], 26: ['INORGACHEM', 'ORGACHEM', 'AQACHEM']}[len(self.prcnames)]+['FCONC', 'UCNV', 'AVOL', 
                'EPAD']
        varkeys=['_'.join(i) for i in cartesian(prcs,self.spcnames)]
        varkeys+=['SPAD','DATE','TIME','PAGRID','NEST','I','J','K','TFLAG']
        self.groups = {}
        NSTEPS = len([i_ for i_ in self.timerange()])
        NVARS = len(varkeys)
        self.createDimension('VAR', NVARS)
        self.createDimension('DATE-TIME', 2)
        self.createDimension('TSTEP', NSTEPS)
        padatatype = []
        pavarkeys = []
        for di, domain in enumerate(self.padomains):
            dk = 'PA%02d' % di
            prefix = dk + '_'
            grp = self.groups[dk] = PseudoNetCDFFile()
            pavarkeys.extend([prefix + k for k in varkeys])
            grp.createDimension('VAR', NVARS)
            grp.createDimension('DATE-TIME', 2)
            grp.createDimension('TSTEP', NSTEPS)
            grp.createDimension('COL', domain['iend'] - domain['istart'] + 1)
            grp.createDimension('ROW', domain['jend'] - domain['jstart'] + 1)
            grp.createDimension('LAY', domain['tlay'] - domain['blay'] + 1)
            padatatype.append((dk, self.__ipr_record_type, (len(grp.dimensions['ROW']), len(grp.dimensions['COL']), len(grp.dimensions['LAY']))))
            if len(self.padomains) == 1:
                self.createDimension('COL', domain['iend']-domain['istart']+1)
                self.createDimension('ROW', domain['jend']-domain['jstart']+1)
                self.createDimension('LAY', domain['tlay']-domain['blay']+1)
            exec("""def varget(k):
                return self._ipr__variables('%s', k)""" % dk, dict(self = self), locals())
            if len(self.padomains) == 1:
                self.variables = PseudoNetCDFVariables(varget,varkeys)
            else:
                grp.variables = PseudoNetCDFVariables(varget,varkeys)
        
        self.__memmaps=memmap(self.__rffile.infile.name,dtype(padatatype),'r',self.data_start_byte).reshape(NSTEPS, len(self.spcnames))
        for k, v in props.items():
            setattr(self, k, v)
        try:
            add_cf_from_ioapi(self)
        except:
            pass

    def __del__(self):
        try:
            self.__memmaps.close()
            del self.__memmaps
        except:
            pass

    def __decorator(self,name,pncfv):
        spc = name.split('_')[-1]
        prc = name.split('_')[0]
        # IPR units are consistent with 'IPR'
        if prc == 'UCNV':
            units = 'm**3/mol'
        elif prc == 'AVOL':
            units = 'm**3'
        else:
            units = get_uamiv_units('IPR', spc)
        decor=lambda k: dict(units=units, var_desc=k.ljust(16), long_name=k.ljust(16))
        for k,v in decor(name).items():
            setattr(pncfv,k,v)        
        return pncfv
        
    def __variables(self,pk, proc_spc):
        if proc_spc in self.__ipr_record_type.names:
            proc=proc_spc
            proc_spc=proc_spc+'_'+self.spcnames[0]
            return PseudoNetCDFVariable(self,proc_spc,'f',('TSTEP','LAY','ROW','COL'),values=self.__memmaps[pk][:,0,:,:,:][proc].swapaxes(1, 3).swapaxes(2, 3))
        if proc_spc=='TFLAG':
            thisdate = self.__memmaps[pk][:,0,:,:,:]['DATE'].swapaxes(1, 3).swapaxes(2, 3)[..., 0, 0, 0]
            thistime = self.__memmaps[pk][:,0,:,:,:]['TIME'].swapaxes(1, 3).swapaxes(2, 3)[..., 0, 0, 0]
            return ConvertCAMxTime(thisdate, thistime, len(self.groups[pk].dimensions['VAR']))
        for k in self.__ipr_record_type.names:
            proc=proc_spc[:len(k)]
            spc=proc_spc[len(k)+1:]
            if proc==k and spc in self.spcnames:
                spc=self.spcnames.index(spc)
                dvals = self.__memmaps[pk][:,spc][proc].swapaxes(1, 3).swapaxes(2, 3)
                return self.__decorator(proc_spc,PseudoNetCDFVariable(self,proc_spc,'f',('TSTEP','LAY','ROW','COL'),values=dvals))
        raise KeyError("Bad!")
                
                
    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        
        self.runmessage=self.__rffile.read("80s")
        self.start_date,self.start_time,self.end_date,self.end_time=self.__rffile.read("ifif")
        
        self.grids=[]
        for grid in range(self.__rffile.read("i")[-1]):
            self.grids.append(
                            dict(
                                zip(
                                    ['orgx','orgy','ncol','nrow','xsize','ysize'], 
                                    self.__rffile.read("iiiiii")
                                    )
                                )
                            )
        
        self.spcnames = []
        for spc in range(self.__rffile.read("i")[-1]):
            self.spcnames.append(self.__rffile.read("10s")[-1].strip())
            
        self.nspec=len(self.spcnames)
        self.padomains=[]
        
        for padomain in range(self.__rffile.read("i")[-1]):
            self.padomains.append(
                                dict(
                                    zip(
                                        ['grid','istart','iend','jstart','jend','blay','tlay'],
                                        self.__rffile.read("iiiiiii")
                                        )
                                    )
                                )
        self.activedomain=self.padomains[0]
        self.prcnames=[]
        
        for i in range(self.__rffile.read('i')[-1]):
            self.prcnames.append(self.__rffile.read('25s')[-1].strip())
        
        self.data_start_byte=self.__rffile.record_start
        self.record_fmt=self.id_fmt + str(len(self.prcnames)) + self.data_fmt
        self.record_size=self.__rffile.record_size
        self.SDATE,self.STIME,dummy,dummy,dummy,dummy,dummy,dummy=self.__rffile.read(self.id_fmt)
        self.__rffile.previous()
        self.TSTEP=100.
        self.padded_size=self.record_size+8
        domain=self.padomains[0]
        self.records_per_time=self.nspec*(domain['iend']-domain['istart']+1)*(domain['jend']-domain['jstart']+1)*(domain['tlay']-domain['blay']+1)
        self.time_data_block=self.padded_size*self.records_per_time
        self.time_step=100.

    def timerange(self):
        return timerange((self.start_date,self.start_time+self.time_step),timeadd((self.end_date,self.end_time),(0,self.time_step)),self.time_step)
Esempio n. 35
0
    def __init__(self, rf, multi=False, **props):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info

        Keywords (i.e., props) for projection: P_ALP, P_BET, P_GAM, XCENT, YCENT, XORIG, YORIG, XCELL, YCELL
        """
        self.__rffile = OpenRecordFile(rf)
        self.__readheader()
        self.__ipr_record_type = {
            24:
            dtype(
                dict(names=[
                    'SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J',
                    'K', 'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV',
                    'EADV', 'SADV', 'NADV', 'BADV', 'TADV', 'DIL', 'WDIF',
                    'EDIF', 'SDIF', 'NDIF', 'BDIF', 'TDIF', 'DDEP', 'WDEP',
                    'AERCHEM', 'FCONC', 'UCNV', 'AVOL', 'EPAD'
                ],
                     formats=[
                         '>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i',
                         '>i', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>i'
                     ])),
            26:
            dtype(
                dict(names=[
                    'SPAD', 'DATE', 'TIME', 'SPC', 'PAGRID', 'NEST', 'I', 'J',
                    'K', 'INIT', 'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV',
                    'EADV', 'SADV', 'NADV', 'BADV', 'TADV', 'DIL', 'WDIF',
                    'EDIF', 'SDIF', 'NDIF', 'BDIF', 'TDIF', 'DDEP', 'WDEP',
                    'INORGACHEM', 'ORGACHEM', 'AQACHEM', 'FCONC', 'UCNV',
                    'AVOL', 'EPAD'
                ],
                     formats=[
                         '>i', '>i', '>f', '>S10', '>i', '>i', '>i', '>i',
                         '>i', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f', '>f',
                         '>i'
                     ]))
        }[len(self.prcnames)]

        prcs = [
            'SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K', 'INIT',
            'CHEM', 'EMIS', 'PTEMIS', 'PIG', 'WADV', 'EADV', 'SADV', 'NADV',
            'BADV', 'TADV', 'DIL', 'WDIF', 'EDIF', 'SDIF', 'NDIF', 'BDIF',
            'TDIF', 'DDEP', 'WDEP'
        ] + {
            24: ['AERCHEM'],
            26: ['INORGACHEM', 'ORGACHEM', 'AQACHEM']
        }[len(self.prcnames)] + ['FCONC', 'UCNV', 'AVOL', 'EPAD']
        varkeys = ['_'.join(i) for i in cartesian(prcs, self.spcnames)]
        varkeys += [
            'SPAD', 'DATE', 'TIME', 'PAGRID', 'NEST', 'I', 'J', 'K', 'TFLAG'
        ]
        self.groups = {}
        NSTEPS = len([i_ for i_ in self.timerange()])
        NVARS = len(varkeys)
        self.createDimension('VAR', NVARS)
        self.createDimension('DATE-TIME', 2)
        self.createDimension('TSTEP', NSTEPS)
        padatatype = []
        pavarkeys = []
        for di, domain in enumerate(self.padomains):
            dk = 'PA%02d' % di
            prefix = dk + '_'
            grp = self.groups[dk] = PseudoNetCDFFile()
            pavarkeys.extend([prefix + k for k in varkeys])
            grp.createDimension('VAR', NVARS)
            grp.createDimension('DATE-TIME', 2)
            grp.createDimension('TSTEP', NSTEPS)
            grp.createDimension('COL', domain['iend'] - domain['istart'] + 1)
            grp.createDimension('ROW', domain['jend'] - domain['jstart'] + 1)
            grp.createDimension('LAY', domain['tlay'] - domain['blay'] + 1)
            padatatype.append(
                (dk, self.__ipr_record_type, (len(grp.dimensions['ROW']),
                                              len(grp.dimensions['COL']),
                                              len(grp.dimensions['LAY']))))
            if len(self.padomains) == 1:
                self.createDimension('COL',
                                     domain['iend'] - domain['istart'] + 1)
                self.createDimension('ROW',
                                     domain['jend'] - domain['jstart'] + 1)
                self.createDimension('LAY',
                                     domain['tlay'] - domain['blay'] + 1)
            exec(
                """def varget(k):
                return self._ipr__variables('%s', k)""" % dk, dict(self=self),
                locals())
            if len(self.padomains) == 1:
                self.variables = PseudoNetCDFVariables(varget, varkeys)
            else:
                grp.variables = PseudoNetCDFVariables(varget, varkeys)

        self.__memmaps = memmap(self.__rffile.infile.name, dtype(padatatype),
                                'r', self.data_start_byte).reshape(
                                    NSTEPS, len(self.spcnames))
        for k, v in props.items():
            setattr(self, k, v)
        try:
            add_cf_from_ioapi(self)
        except:
            pass
Esempio n. 36
0
class wind(PseudoNetCDFFile):
    """
    wind provides a PseudoNetCDF interface for CAMx
    wind files.  Where possible, the inteface follows
    IOAPI conventions (see www.baronams.com).
    
    ex:
        >>> wind_path = 'camx_wind.bin'
        >>> rows,cols = 65,83
        >>> windfile = wind(wind_path,rows,cols)
        >>> windfile.variables.keys()
        ['TFLAG', 'U', 'V']
        >>> v = windfile.variables['V']
        >>> tflag = windfile.variables['TFLAG']
        >>> tflag.dimensions
        ('TSTEP', 'VAR', 'DATE-TIME')
        >>> tflag[0,0,:]
        array([2005185,       0])
        >>> tflag[-1,0,:]
        array([2005185,  240000])
        >>> v.dimensions
        ('TSTEP', 'LAY', 'ROW', 'COL')
        >>> v.shape
        (25, 28, 65, 83)
        >>> windfile.dimensions
        {'TSTEP': 25, 'LAY': 28, 'ROW': 65, 'COL': 83}
    """
    
    time_hdr_fmts={12: "fii", 8: "fi"}
    data_fmt="f"
    def __init__(self,rf,rows=None,cols=None):
        """
        Initialization included reading the header and learning
        about the format.
        
        see __readheader and __gettimestep() for more info
        """
        self.rffile=OpenRecordFile(rf)
        self.time_hdr_fmt=self.time_hdr_fmts[self.rffile.record_size]
        
        self.time_hdr_size=struct.calcsize(self.time_hdr_fmt)
        self.padded_time_hdr_size=struct.calcsize("ii"+self.time_hdr_fmt)
        self.__readheader()
        self.__gettimestep()
        if rows==None and cols==None:
            rows=self.cell_count
            cols=1
        elif rows==None:
            rows=self.cell_count/cols
        elif cols==None:
            cols=self.cell_count/rows
        else:
            if cols*rows!=self.cell_count:
                raise ValueError("The product of cols (%d) and rows (%d) must equal cells (%d)" %  (cols,rows,self.cell_count))
        self.createDimension('TSTEP', self.time_step_count)
        self.createDimension('COL', cols)
        self.createDimension('ROW', rows)
        self.createDimension('LAY', self.nlayers)

        self.variables=PseudoNetCDFVariables(self.__var_get,['U','V'])

    def __var_get(self,key):
        constr=lambda uv: self.getArray()[:,{'U': 0, 'V': 1}[uv],:,:,:].copy()
        decor=lambda uv: dict(units='m/s', var_desc=uv.ljust(16), long_name=uv.ljust(16))
        values=constr(key)
        
        var=self.createVariable(key,'f',('TSTEP','LAY','ROW','COL'))
        var[:] = values
        for k,v in decor(key).items():
            setattr(var,k,v)
        return var

    def __readheader(self):
        """
        __readheader reads the header section of the ipr file
        it initializes each header field (see CAMx Users Manual for a list)
        as properties of the ipr class
        """
        self.data_start_byte=0
        if self.time_hdr_fmt=='fii':
            self.start_time,self.start_date,self.lstagger=self.rffile.read(self.time_hdr_fmt)
        elif self.time_hdr_fmt=='fi':
            self.start_time,self.start_date=self.rffile.read(self.time_hdr_fmt)
            self.lstagger=None
        else:
            raise NotImplementedError("Header format is unknown")
            
        self.record_size=self.rffile.record_size
        self.padded_size=self.record_size+8
        self.cell_count=self.record_size/struct.calcsize(self.data_fmt)
        self.record_fmt=self.data_fmt*self.cell_count
        
    def __gettimestep(self):
        """
        Header information provides start and end date, but does not
        indicate the increment between.  This routine reads the first
        and second date/time and initializes variables indicating the
        timestep length and the anticipated number.
        """
        #This is a bit of a hack, but should work:
        #Search for the next record that is the same
        #length as self.padded_time_hdr_size
        #
        #This should be the next date record
        #1) date - startdate = timestep
        #2) (record_start - self.padded_time_hdr_size)/self.padded_size = klayers
        self.rffile.next()
        while not self.rffile.record_size==self.time_hdr_size:
            self.rffile.next()
        
        dist_btwn_dates=self.rffile.record_start - self.padded_time_hdr_size
        self.nlayers=(dist_btwn_dates)/self.padded_size/2
        
        if self.time_hdr_fmt=="fi":
            time,date=self.rffile.read(self.time_hdr_fmt)
        elif self.time_hdr_fmt=="fii":
            time,date,lstagger=self.rffile.read(self.time_hdr_fmt)

        self.time_step=timediff((self.start_date,self.start_time),(date,time))
        
        while True:
            try:
                self.rffile._newrecord(self.rffile.record_start+dist_btwn_dates)
                self.rffile.tell()
                if self.time_hdr_fmt=="fi":
                    self.end_time,self.end_date=self.rffile.read(self.time_hdr_fmt)
                elif self.time_hdr_fmt=="fii":
                    self.end_time,self.end_date,lstagger=self.rffile.read(self.time_hdr_fmt)
            except:
                break
        
        self.time_step_count=int(timediff((self.start_date,self.start_time),(self.end_date,self.end_time))/self.time_step)+1
        
    def __layerrecords(self,k):
        return k-1
        
    def __timerecords(self,dt):
        """
        routine returns the number of records to increment from the
        data start byte to find the first time
        """
        d, t = dt
        nsteps=int(timediff((self.start_date,self.start_time),(d,t))/self.time_step)
        nlays=self.__layerrecords(self.nlayers+1)
        return nsteps*nlays
        
    def __recordposition(self,date,time,k,duv):
        """ 
        routine uses pagridrecords, timerecords,irecords,
        jrecords, and krecords multiplied by the fortran padded size
        to return the byte position of the specified record
        
        pagrid - integer
        date - integer
        time - float
        duv - integer (0=date,1=uwind,2=vwind)
        """
        bytes=self.data_start_byte
        nsteps=self.__timerecords((date,time))
        bytes+=int(nsteps/self.nlayers)*self.padded_time_hdr_size
        bytes+=int(nsteps/self.nlayers)*12
        bytes+=nsteps*self.padded_size*2
        if not duv==0:
            bytes+=self.padded_time_hdr_size
            bytes+=self.__layerrecords(k)*2*self.padded_size
        if duv==2:
            bytes+=self.padded_size
        return bytes
        
    def seek(self,date=None,time=None,k=1,uv=1):
        """
        Move file cursor to beginning of specified record
        see __recordposition for a definition of variables
        """
        if date==None:
            date=self.start_date
        if time==None:
            time=self.start_time
        chkvar=True
        if chkvar and timediff((self.end_date,self.end_time),(date,time))>0 or timediff((self.start_date,self.start_time),(date,time))<0:
            raise KeyError("Wind file includes (%i,%6.1f) thru (%i,%6.1f); you requested (%i,%6.1f)" % (self.start_date,self.start_time,self.end_date,self.end_time,date,time))
        if chkvar and uv<0 or uv >2:
            raise KeyError("Wind file includes Date (uv: 0), u velocity (uv: 1) and v velocity (uv: 2); you requested %i" % (uv))
        
        self.rffile._newrecord(self.__recordposition(date,time,k,uv))
        
    def read(self):
        """
        provide direct access to the underlying RecordFile read
        method
        """
        return self.rffile.read(self.record_fmt)
        
    def read_into(self,dest):
        """
        put values from rffile read into dest
        dest - numpy or numeric array
        """
        return read_into(self.rffile,dest,"",self.data_fmt)
        
    def seekandreadinto(self,dest,date=None,time=None,k=1,duv=1):
        """
        see seek and read_into
        """
        self.seek(date,time,k,duv)
        self.read_into(dest)
        
    def seekandread(self,date=None,time=None,k=1,duv=1):
        """
        see seek and read
        """
        self.seek(date,time,k,duv)
        return self.read()
        
    def keys(self):
        for d,t in timerange((self.start_date,self.start_time),timeadd((self.end_date,self.end_time),(0,self.time_step)),self.time_step):
            for k in range(1,self.nlayers+1):
                yield d,t,k
                
    def values(self):
        for d,t,k in self.keys():
            yield self.seekandread(d,t,k,1),self.seekandread(d,t,k,2)
    
    def items(self):
        for d,t,k in self.keys():
            yield d,t,k,self.seekandread(d,t,k,1),self.seekandread(d,t,k,2)
    
    __iter__=keys
    
    def getArray(self,krange=slice(1,None)):
        if type(krange) != slice :
            if type(krange)==tuple:
                krange = slice(*krange)
            if type(krange)==int:
                krange=slice(krange,krange+1)
        a=zeros(
            (
                self.time_step_count ,
                2 ,
                len(xrange(*krange.indices(self.nlayers+1))),
                len(self.dimensions['ROW']),
                len(self.dimensions['COL']),
            ),'f')
        for i,(d,t) in enumerate(self.timerange()):
            for uv in range(1,3):
                for ki,k in enumerate(xrange(*krange.indices(self.nlayers+1))):
                    self.seekandreadinto(a[i,uv-1,k-1,:,:],d,t,k,uv)
        return a
    
    def timerange(self):
        return timerange((self.start_date,self.start_time),timeadd((self.end_date,self.end_time),(0,self.time_step)),self.time_step)