Beispiel #1
0
def scan_timezones(outfile, attr_in, *, loutf=False):
    # Include functions
    from CAMxtools.write.wrt_ioapi import wrt_ioapi
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools._cnvt._data2fin import _data2fin
    from tzwhere import tzwhere

    # Get nx, ny, dxy, and lcc
    nx = attr_in['NCOLS']
    ny = attr_in['NROWS']
    dxy = float(attr_in['XCELL'])  # Simply assume XCELL = YCELL
    lcc = get_lcc(attr_in)

    # Global variables related to timezone
    # timezone lookup, force nearest tz for coords outside of polygons
    #global WHERETZ
    WHERETZ = tzwhere.tzwhere()
    # daylight savings time (DST) in northern hemisphere starts in March and ends
    # in November and the opposite in southern hemisphere
    #global JAN1
    JAN1 = datetime.datetime(
        2016, 1, 1)  # date with standard time in northern hemisphere
    #global JUN1
    JUN1 = datetime.datetime(
        2016, 6, 1)  # date with standard time in southern hemisphere

    # Calculating timezones over the domain
    print("Calculating timezones over the domain")
    # If tz is set to auto, calculate a tshift array before the loop
    tzone_ji = np.zeros((ny, nx)).astype(int)  # PST:-8 MST:-7 CST:-6 EST:-5
    for i in range(nx):
        for j in range(ny):
            lat, lon = proj_latlon_single(i, j, dxy, lcc)
            tzone_ji[j, i], tz_info = tz_latlon(
                lat, lon, WHERETZ, JAN1,
                JUN1)  #tz_cell is based on LST not LDT.
    for itz in (np.unique(-tzone_ji)):
        print("time zone = {}".format(itz))

    # Data array preparation
    nspc = 1
    nz = 1
    nsteps = 1
    data2sav = np.zeros((nspc, nsteps, nz, ny, nx))
    data2sav[0, 0, 0, :, :] = -tzone_ji
    tracernames = "TZONE".split()

    # Write output to a binary file
    fout = _data2fin(data2sav, tracernames, attr_in)
    l2uam = False  # Force to output to IOAPI format
    if loutf:  # Write output to netcdf
        if l2uam:
            wrt_uamiv(outfile, fout)
        else:
            wrt_ioapi(outfile, fout)
    else:
        return tracernames, data2sav
Beispiel #2
0
def hxmdaz_naaqs(jdbeg,
                 jdend,
                 outfile,
                 csvfile,
                 comb,
                 attr_in,
                 nifiles,
                 infile_h,
                 infile_t,
                 *,
                 avg_hr=8,
                 rank=4,
                 lyyyyjjj=True,
                 tzone=None,
                 l2uam=False,
                 lnew_mda8=False,
                 tmpoutf=None):
    # Include functions
    from CAMxtools.combine.combine import combine
    import netCDF4 as ncdf4
    from PseudoNetCDF.camxfiles.Memmaps import uamiv
    from CAMxtools.write.set_attr import set_attr
    from CAMxtools.tzone.scan_timezones import scan_timezones
    from CAMxtools.tzone.get_local_hrs import get_mda1s
    from CAMxtools.tzone.get_local_hrs import get_mda8
    from CAMxtools._cnvt._data2fin import _data2fin
    from CAMxtools.write.wrt_ioapi import wrt_ioapi
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.naaqs.wrt_csv_for_naaqs import wrt_csv_for_naaqs
    import numpy as np
    import gc

    # Check arguments
    l1tzone = False
    if tzone != None:
        l1tzone = True  # If users specify a specific time zone, MATS MDA8 O3 is calculated based on the time zone.

    # Scan time zone over the domain
    tzfile = None
    dum, tzone_stlji = scan_timezones(tzfile, attr_in, loutf=False)
    tzone_ji = tzone_stlji[0, 0, 0, :, :].astype(np.int)
    tzones = np.unique(tzone_ji)
    if l1tzone:
        print("A SINGLE TIMEZONE, {} will be applied".format(tzone))
        if tzone in tzones:
            ny = attr_in['NROWS']
            nx = attr_in['NCOLS']
            tzone_ji = np.zeros((ny, nx)).astype(np.int) + tzone
            tzones = [tzone]
        else:
            exit("YOUR TIMEZONE SPECIFIED IS OUT OF DOMAIN")

    # Get attribute
    nx = attr_in['NCOLS']
    ny = attr_in['NROWS']

    # Set a variable name
    spec = "O3"

    # Daily loop
    jdate = jdbeg
    while (jdate <= jdend):
        print("Processing {}".format(jdate))
        gdate = int(
            datetime.datetime.strptime(str(jdate), "%Y%j").strftime("%Y%m%d"))
        infile = []
        for ifile in range(0, nifiles):
            if lyyyyjjj:
                infile.append(infile_h[ifile] + '.' + str(jdate) + '.' +
                              infile_t[ifile])
            else:
                infile.append(infile_h[ifile] + '.' + str(gdate) + '.' +
                              infile_t[ifile])
        print("  1. PROCESSING COMBINE")
        tracernames, indata2, ovarunits = combine(None,
                                                  comb,
                                                  nifiles,
                                                  infile,
                                                  lverbose=False,
                                                  loutf=False,
                                                  lovarunits=True)
        if (jdate > jdbeg):
            # One time execution - Check 'O3' is the first output species and index output trcnames which excludes O3 from tracernames
            if jdate == jdbeg + 1:
                s = tracernames.index(spec)
                if s != 0:
                    exit('O3 must be the first species in combine spec_def')
                ntracers = len(tracernames)
                trcnames = tracernames[1:]
            print("  2. PROCESSING METRICS FOR PREVIOUS DAY")
            concs_48_utc = np.append(indata1[:, :, 0, :, :],
                                     indata2[:, :, 0, :, :],
                                     axis=1)
            conc_o3_48_utc = concs_48_utc[
                np.newaxis,
                s, :, :, :]  #concs_48_utc[nspc,nt,ny,nx], conc_o3_48_utc[1,nt,ny,nx]
            if avg_hr == 1:
                dmdaz_1day, dind_hr_1day = get_mda1(
                    conc_o3_48_utc, tzone_ji, nx,
                    ny)  # dmda1_1day[1,ny,nx], dind_hr_1day[1,ny,nx]
            elif avg_hr == 8:
                dmdaz_1day, dind_hr_1day = get_mda8(
                    conc_o3_48_utc, tzone_ji, nx, ny, lnew_mda8=lnew_mda8
                )  # dmda1_1day[1,ny,nx], dind_hr_1day[1,ny,nx]
            else:
                exit("avg_hr must be either 1 or 8.")
            # Set trcs_dmdaz_1day
            trcs_dmdaz_1day = np.zeros((ntracers - 1, ny, nx))
            for i in range(nx):
                for j in range(ny):
                    if avg_hr == 1:
                        trcs_dmdaz_1day[:, j, i] = concs_48_utc[
                            1:, np.squeeze(dind_hr_1day)[j, i], j, i]
                    elif avg_hr == 8:
                        trcs_daz_1day_1cell = np.apply_along_axis(
                            np.convolve,
                            axis=1,
                            arr=concs_48_utc[1:,
                                             tzone_ji[j,
                                                      i]:tzone_ji[j, i] + 31,
                                             j, i],
                            v=[1 / 8.] * 8,
                            mode='valid')  #trcs_daz_1day_1cell[nspc-1,24]
                        trcs_dmdaz_1day[:, j,
                                        i] = trcs_daz_1day_1cell[:,
                                                                 np.squeeze(
                                                                     dind_hr_1day
                                                                 )[j, i]]
                    else:
                        exit("avg_hr must be either 1 or 8.")
                gc.collect()
            if jdate == jdbeg + 1:
                dmdazs = dmdaz_1day  #dmdazs[nd,ny,nx]
                dind_hrs = dind_hr_1day  #dind_hrs[nd,ny,nx]
                trcs_dmdazs = np.array([trcs_dmdaz_1day
                                        ])  #trcs_dmdazs[nd,nspc-1,ny,nx]
            if jdate > jdbeg + 1:
                dmdazs = np.append(dmdazs, dmdaz_1day, axis=0)
                dind_hrs = np.append(dind_hrs, dind_hr_1day, axis=0)
                trcs_dmdazs = np.append(trcs_dmdazs,
                                        np.array([trcs_dmdaz_1day]),
                                        axis=0)
        indata1 = indata2
        jdate = int((datetime.datetime.strptime(str(jdate), "%Y%j") +
                     datetime.timedelta(days=1)).strftime("%Y%j"))
        gc.collect()
    del indata2
    del indata1
    gc.collect()

    # Prepare MDAZ if asked
    lout_mdaz = False
    if not tmpoutf == None: lout_mdaz = True
    if lout_mdaz:
        nd = jdend - jdbeg
        data2sav = np.zeros((ntracers, nd, 1, ny, nx))
        data2sav[0, :, 0, :, :] = dmdazs
        data2sav[1:, :, 0, :, :] = np.einsum('jikl->ijkl', trcs_dmdazs)
        # Write a binary file for MDAZ
        attr_in['TSTEP'] = 240000
        fout = _data2fin(data2sav, tracernames, attr_in)
        if l2uam:
            wrt_uamiv(tmpoutf, fout, lsurf=True, ounits=ovarunits)
        else:
            wrt_ioapi(tmpoutf, fout, lsurf=True, ounits=ovarunits)
        gc.collect()

    # Find X highest
    if rank == 1:
        rank_name = "FIRST"
    elif rank == 4:
        rank_name = "FOURTH"
    elif rank == 0:
        rank_name = "AVERAGE"
    else:
        exit("rank must be either 1 or 4 or 0.")
    if rank == 0:
        print("  3. PROCESSING AVERAGE")
    else:
        print("  3. PROCESSING {} HIGHEST".format(rank_name))
    data2sav = np.zeros((ntracers, 1, 1, ny, nx))
    nd = jdend - jdbeg
    if (nd < rank):
        print('Your no. of days is less than the rank you specified.')
        print('No. of days from input files = {}'.format(nd))
        print('Rank you select is = {}'.format(rank))
        exit(
            'Either reduce your rank or increase no. of days from input files')
    if rank == 0:
        data2sav[0, 0, 0, :, :] = np.mean(
            dmdazs, axis=0)  #data2sav[nspc,nt,nz,ny,nx], dmdazs[nd,ny,nx]
        data2sav[1:, 0,
                 0, :, :] = np.mean(trcs_dmdazs,
                                    axis=0)  #trcs_dmdazs[nd,nspc-1,ny,nx]
    else:
        ind = np.argsort(dmdazs, axis=0)[nd - rank,
                                         ...]  #dmdazs[nd,ny,nx], ind[ny,nx]
        gr = np.ogrid[0:dmdazs.shape[0], 0:dmdazs.shape[1], 0:dmdazs.shape[2]]
        gr[0] = ind
        data2sav[0, 0, 0, :, :] = dmdazs[gr]  #data2sav[nspc,nt,nz,ny,nx]
        for i in range(nx):
            for j in range(ny):
                data2sav[1:, 0, 0, j, i] = trcs_dmdazs[ind[j, i], :, j, i]
    # Write a csv file
    if rank == 0:
        jdays = np.zeros(
            (ny,
             nx)) + attr_in['SDATE']  # Average of MDAZ, set the beginning date
        hours = np.zeros((ny, nx))  # Average of MDAZ, set zeros.
    else:
        jdays = attr_in['SDATE'] + ind[:, :]
        hours = np.zeros(
            (ny, nx)) + dind_hrs[gr][0, :, :]  #dind_hrs[gr].shape = (1,ny,nx)
    data2csv = np.zeros((ntracers, ny, nx))
    data2csv = data2sav[:, 0, 0, :, :]
    wrt_csv_for_naaqs(csvfile, tracernames, data2csv, jdays, hours)
    gc.collect()
    # Write a binary file for HXMDAZ
    fout = _data2fin(data2sav, tracernames, attr_in)
    if l2uam:
        wrt_uamiv(outfile, fout, lsurf=True, ounits=ovarunits)
    else:
        wrt_ioapi(outfile, fout, lsurf=True, ounits=ovarunits)
    gc.collect()
Beispiel #3
0
def hxmdaz(jdbeg,jdend,outfile,comb,attr_in,nifiles,infile_h,infile_t,*,avg_hr=8,rank=4,lyyyyjjj=True,tzone=None,l2uam=False,lnew_mda8=False,tmpoutf=None):
    # Include functions
    from CAMxtools.combine.combine import combine
    import netCDF4 as ncdf4
    from PseudoNetCDF.camxfiles.Memmaps import uamiv
    from CAMxtools.write.set_attr import set_attr
    from CAMxtools.tzone.scan_timezones import scan_timezones
    from CAMxtools.tzone.get_local_hrs import get_mda1s
    from CAMxtools.tzone.get_local_hrs import get_mda8s
    from CAMxtools.tzone.get_local_hrs import get_mda8s_a0
    from CAMxtools._cnvt._data2fin import _data2fin
    from CAMxtools.write.wrt_ioapi import wrt_ioapi
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    import numpy as np

    # Check arguments
    l1tzone = False
    if tzone != None: l1tzone = True # If users specify a specific time zone, MATS MDA8 O3 is calculated based on the time zone.

    # Scan time zone over the domain
    tzfile = None
    dum, tzone_stlji = scan_timezones(tzfile, attr_in, loutf = False)
    tzone_ji = tzone_stlji[0,0,0,:,:].astype(np.int)
    tzones = np.unique(tzone_ji)
    if l1tzone:
      print("A SINGLE TIMEZONE, {} will be applied".format(tzone))
      if tzone in tzones:
        ny = attr_in['NROWS']
        nx = attr_in['NCOLS']
        tzone_ji = np.zeros((ny,nx)).astype(np.int) + tzone
        tzones = [tzone]
      else:
        exit("YOUR TIMEZONE SPECIFIED IS OUT OF DOMAIN")

    # Get attribute
    nx  = attr_in['NCOLS']
    ny  = attr_in['NROWS']

    # Set a variable name 
    spec = "O3"

    # Daily loop
    print ("  1. PROCESSING COMBINE")
    jdate = jdbeg
    while (jdate <= jdend):
      print ("Processing {}".format(jdate))
      gdate = int(datetime.datetime.strptime(str(jdate),"%Y%j").strftime("%Y%m%d"))
      infile = []
      for ifile in range(0,nifiles):
        if lyyyyjjj:
          infile.append(infile_h[ifile]+'.'+str(jdate)+'.'+infile_t[ifile])
        else:
          infile.append(infile_h[ifile]+'.'+str(gdate)+'.'+infile_t[ifile])
      tracernames, indata, ovarunits = combine(None,comb,nifiles,infile,lverbose=False,loutf=False,lovarunits=True)
      s = tracernames.index(spec)
      if jdate == jdbeg : conc_hrs_utc = indata[s,:,0,:,:]
      if jdate > jdbeg  : conc_hrs_utc = np.append(conc_hrs_utc,indata[s,:,0,:,:],axis=0) #As len(s) = 1, the 2nd dimension, nt is axis=0
      jdate = int((datetime.datetime.strptime(str(jdate),"%Y%j") + datetime.timedelta(days=1)).strftime("%Y%j"))
    del indata
    print ("  2. PROCESSING MDA{} FOR ALL DAYS".format(avg_hr))
    nd = jdend - jdbeg
    if avg_hr == 1:
      mdaz = get_mda1s(conc_hrs_utc, tzone_ji, nd, nx, ny)
    elif avg_hr == 8:
      if lnew_mda8:
        mdaz = get_mda8s_a0(conc_hrs_utc, tzone_ji, nd, nx, ny)
      else:
        mdaz = get_mda8s(conc_hrs_utc, tzone_ji, nd, nx, ny)
    else:
      exit("avg_hr must be either 1 or 8.")

    # Prepare MDAZ if asked
    lout_mdaz = False
    if not tmpoutf == None: lout_mdaz = True
    if lout_mdaz:
      data2sav = np.zeros((1,nd,1,ny,nx))
      data2sav[0,:,0,:,:] = mdaz
      # Write a binary file for MDAZ
      attr_in['TSTEP']=240000
      fout = _data2fin(data2sav, tracernames, attr_in)
      if l2uam:
        wrt_uamiv(tmpoutf, fout, lsurf = True, ounits = ovarunits)
      else:
        wrt_ioapi(tmpoutf, fout, lsurf = True, ounits = ovarunits)

    # Find X highest
    if rank == 1:
      rank_name = "FIRST"
    elif rank == 4:
      rank_name = "FOURTH"
    else:
      exit("rank must be either 1 or 4.")
    print ("  3. PROCESSING {} HIGHEST".format(rank_name))
    data2sav = np.zeros((1,1,1,ny,nx))
    data2sav[0,0,0,:,:] = np.sort(mdaz,axis=0)[mdaz.shape[0]-rank,...] #data2sav[nspc,nt,nz,ny,nx]

    # Write a binary file for HXMDAZ
    fout = _data2fin(data2sav, tracernames, attr_in)
    if l2uam:
      wrt_uamiv(outfile, fout, lsurf = True, ounits = ovarunits)
    else:
      wrt_ioapi(outfile, fout, lsurf = True, ounits = ovarunits)
def psd_pm10_2nddavg_annavg(jdbeg,
                            jdend,
                            out_anndavg,
                            out_2nddavg,
                            csv_anndavg,
                            csv_2nddavg,
                            comb,
                            attr_in,
                            nifiles,
                            infile_h,
                            infile_t,
                            *,
                            lyyyyjjj=True,
                            tzone=None,
                            l2uam=False,
                            tmpoutf=None):
    # Include functions
    from CAMxtools.combine.combine import combine
    import netCDF4 as ncdf4
    from PseudoNetCDF.camxfiles.Memmaps import uamiv
    from CAMxtools.write.set_attr import set_attr
    from CAMxtools.tzone.scan_timezones import scan_timezones
    from CAMxtools.tzone.get_local_hrs import get_davg
    from CAMxtools._cnvt._data2fin import _data2fin
    from CAMxtools.write.wrt_ioapi import wrt_ioapi
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.psd.wrt_csv_for_psd import wrt_csv_for_psd
    import numpy as np

    # Check arguments
    l1tzone = False
    if tzone != None:
        l1tzone = True  # If users specify a specific time zone, MATS MDA8 O3 is calculated based on the time zone.

    # Scan time zone over the domain
    tzfile = None
    dum, tzone_stlji = scan_timezones(tzfile, attr_in, loutf=False)
    tzone_ji = tzone_stlji[0, 0, 0, :, :].astype(np.int)
    tzones = np.unique(tzone_ji)
    if l1tzone:
        print("A SINGLE TIMEZONE, {} will be applied".format(tzone))
        if tzone in tzones:
            ny = attr_in['NROWS']
            nx = attr_in['NCOLS']
            tzone_ji = np.zeros((ny, nx)).astype(np.int) + tzone
            tzones = [tzone]
        else:
            exit("YOUR TIMEZONE SPECIFIED IS OUT OF DOMAIN")

    # Get attribute
    nx = attr_in['NCOLS']
    ny = attr_in['NROWS']

    # Daily loop
    jdate = jdbeg
    while (jdate <= jdend):
        print("Processing {}".format(jdate))
        gdate = int(
            datetime.datetime.strptime(str(jdate), "%Y%j").strftime("%Y%m%d"))
        infile = []
        for ifile in range(0, nifiles):
            if lyyyyjjj:
                infile.append(infile_h[ifile] + '.' + str(jdate) + '.' +
                              infile_t[ifile])
            else:
                infile.append(infile_h[ifile] + '.' + str(gdate) + '.' +
                              infile_t[ifile])
        print("  1. PROCESSING COMBINE")
        tracernames, indata2, ovarunits = combine(None,
                                                  comb,
                                                  nifiles,
                                                  infile,
                                                  lverbose=False,
                                                  loutf=False,
                                                  lovarunits=True)
        if (jdate > jdbeg):
            print("  2. PROCESSING DAILY AVERAGE FOR PREVIOUS DAY")
            concs_48_utc = np.append(indata1[:, :, 0, :, :],
                                     indata2[:, :, 0, :, :],
                                     axis=1)
            davg_1day = get_davg(concs_48_utc, tzone_ji, nx, ny)
            if jdate == jdbeg + 1:
                davgs = np.array([davg_1day])  #davgs[nd,nspc,ny,nx]
            if jdate > jdbeg + 1:
                davgs = np.append(davgs, np.array([davg_1day]), axis=0)
        indata1 = indata2
        jdate = int((datetime.datetime.strptime(str(jdate), "%Y%j") +
                     datetime.timedelta(days=1)).strftime("%Y%j"))
    del indata2
    del indata1

    # Prepare DAVG if asked
    nspc = len(tracernames)
    lout_davg = False
    if not tmpoutf == None: lout_davg = True
    if lout_davg:
        nd = jdend - jdbeg
        data2sav = np.zeros((nspc, nd, 1, ny, nx))
        for ispc in range(nspc):
            data2sav[ispc, :, 0, :, :] = davgs[:, ispc, :, :]
        # Write a binary file for DAVG
        attr_in['TSTEP'] = 240000
        fout = _data2fin(data2sav, tracernames, attr_in)
        if l2uam:
            wrt_uamiv(tmpoutf, fout, lsurf=True, ounits=ovarunits)
        else:
            wrt_ioapi(tmpoutf, fout, lsurf=True, ounits=ovarunits)

    # Calculate 2nd highest daily average
    print("  3. PROCESSING 2ND HIGHEST DAILY AVERAGE")
    data2sav = np.zeros((nspc, 1, 1, ny, nx))
    rank = 2
    #data2sav[:,0,0,:,:] = np.sort(davgs,axis=0)[davgs.shape[0]-rank,...] #data2sav[nspc,nt,nz,ny,nx], davgs[nd,nspc,ny,nx]
    ind = np.argsort(davgs, axis=0)[
        davgs.shape[0] - rank,
        ...]  #data2sav[nspc,nt,nz,ny,nx], davgs[nd,nspc,ny,nx], ind[nsp,ny,nx]
    gr = np.ogrid[0:davgs.shape[0], 0:davgs.shape[1], 0:davgs.shape[2],
                  0:davgs.shape[3]]
    gr[0] = ind
    data2sav[:, 0, 0, :, :] = davgs[gr]
    #data2sav[:,0,0,:,:] = np.mean(davgs,axis=0) #data2sav[nspc,nt,nz,ny,nx], davgs[nd,nspc,ny,nx]
    # Write a csv file
    jdays = attr_in['SDATE'] + ind[
        0, :, :]  # Annual average, set the beginning date
    hours = np.zeros((ny, nx))  # daily average, set zeros
    data2csv = np.zeros((nspc, ny, nx))
    data2csv = data2sav[:, 0, 0, :, :]
    wrt_csv_for_psd(csv_2nddavg, tracernames, data2csv, jdays, hours)
    # Write a binary file
    fout = _data2fin(data2sav, tracernames, attr_in)
    if l2uam:
        wrt_uamiv(out_2nddavg, fout, lsurf=True, ounits=ovarunits)
    else:
        wrt_ioapi(out_2nddavg, fout, lsurf=True, ounits=ovarunits)

    # Calculate grand average
    print("  4. PROCESSING GRAND AVERAGE")
    data2sav = np.zeros((nspc, 1, 1, ny, nx))
    data2sav[:, 0, 0, :, :] = np.mean(
        davgs, axis=0)  #data2sav[nspc,nt,nz,ny,nx], davgs[nd,nspc,ny,nx]
    # Write a csv file
    jdays = np.zeros(
        (ny, nx)) + attr_in['SDATE']  # Annual average, set the beginning date
    hours = np.zeros((ny, nx))  # Annual average, set zeros
    data2csv = np.zeros((nspc, ny, nx))
    data2csv = data2sav[:, 0, 0, :, :]
    wrt_csv_for_psd(csv_anndavg, tracernames, data2csv, jdays, hours)
    # Write a binary file
    fout = _data2fin(data2sav, tracernames, attr_in)
    if l2uam:
        wrt_uamiv(out_anndavg, fout, lsurf=True, ounits=ovarunits)
    else:
        wrt_ioapi(out_anndavg, fout, lsurf=True, ounits=ovarunits)
Beispiel #5
0
def combine(outfile,
            comb,
            nifiles,
            infile,
            *,
            lsurf=True,
            lverbose=False,
            loutf=True,
            l2uam=False,
            lovarunits=False):
    """
  Linearly combine multiple variables in single or multiple input files
     - Input file can be UAM or IOAPI.
     - Output file format is IOAPI.
  Arguments:
     outfile - IOAPI or UAMIV formatted output file
     comb - species definition text file which has output species name, unit, and the equation of linear combination from species in input files. Follow the same syntax of CMAQ species definition file.
     nifiles - no. of input files
     infile - python list of multiple input files
     lsurf - if True, create only surface layer regardless of no. of layers from input file.
     lverbose - if True, print more messages to screen
     loutf - if True, create an output file
     l2uam - if True, output file format is UAM. Otherwise, IOAPI.
     lovarunits - if True, return output variable units, ovarunits.
  """
    # Include functions to call
    from CAMxtools.combine._exprparse import ExpressionEvaluator
    from CAMxtools.write.wrt_ioapi import wrt_ioapi
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.write.set_attr import set_attr
    from CAMxtools._cnvt._data2fin import _data2fin

    # Handle input files
    if lverbose: print("no. of input files = {}".format(nifiles))
    fin = []
    for ifile in range(0, nifiles):
        try:
            fin.append(uamiv(infile[ifile]))
            if ifile == 0: ftype = 'uam'
        except:
            try:
                fin.append(xr.open_dataset(infile[ifile]))
                if ifile == 0: ftype = 'netcdf'
            except:
                print("Check whether your input file exists")
                print(infile[ifile])
                exit()

    # Used for variable names to recognize variables from equations
    NAME = r'(?P<NAME>[a-zA-Z_][a-zA-Z_0-9\[\]]*)'

    # Outmost block begins with opening species definition file, comb.
    with open(comb) as csvfile:
        # Initialize tracernames which will be used for 'VAR-LIST' of output file at the end of this function.
        tracernames = []
        ovarunits = []

        # Scan the species defnition file to know no. of output variables.
        lines = csv.reader(csvfile, delimiter=',', quotechar='|')
        nspc = sum(1 for row in lines
                   if not ((''.join(row).strip().replace(" ", "") == '') or
                           (row[0][0] == '/') or (row[0][0] == '#') or
                           (row[0][0] == '!')))  # no. of output variables
        csvfile.seek(0)  # rewind the species definition file

        # If an output file does not exist, get fin0 and nsteps
        lnew = False  # flag for the new output
        if loutf:
            if not os.path.exists(outfile): lnew = True
        if ftype == 'uam':
            fin0 = fin[0]
            nsteps = len(fin[0].dimensions['TSTEP'])
        else:
            fin0 = ncdf4.Dataset(infile[0])
            nsteps = fin[0].dims['TSTEP']
        lfin0 = True
        attr_in = set_attr(lfin0, fin0, {})

        # Set data2sav
        lgrdded = False
        if attr_in['FTYPE'] == 1: lgrdded = True
        ny = attr_in['NROWS']
        nx = attr_in['NCOLS']
        nz = attr_in['NLAYS']
        if lsurf:
            nz = 1
        if lgrdded:  # GRIDDED
            data2sav = np.zeros((nspc, nsteps, nz, ny, nx))
        else:  # BOUNDARY CONDITION
            ncells = 2 * (nx + ny) + 4
            data2sav = np.zeros((nspc, nsteps, nz, ncells))

        # Main loop - process the species definition file line by line
        lines = csv.reader(csvfile, delimiter=',', quotechar='|')
        for line in lines:

            # Skip unnecessary lines
            if ''.join(line).strip().replace(" ", "") == '':
                continue  # skip blank line
            if ((line[0][0] == '/') or (line[0][0] == '#')
                    or (line[0][0] == '!')):
                continue  # skip the lines starting with /, #, or blank

            # Set ovar, ovarunit, and formula. Append tracernames.
            ovar = line[0].split()[0]
            ovarunit = line[1].split()[0]
            formula = line[2].split()[0]
            tracernames.append(ovar)
            ovarunits.append(ovarunit)

            # Find vars from formula, which are either an input file or ovar from previous lines, (i.e. [0])
            p = re.compile(
                NAME
            )  # Declare pattern match that include alphabet and/or number and ends with "[ number ]"
            vars = p.findall(
                formula
            )  # Find continuous blocks of p defined above. For example, if formula = 'NO_DD[1]+NO_DD[2]+NO2_DD[1]', vars = ['NO_DD[1]', 'NO_DD[2]', 'NO2_DD[1]']

            # Loop through individual var in vars
            for var in vars:
                # Delimit by [ or ] and store the first element to varname and the second element to fins. For example, varname[0] = NO_DD and fins[0] = 1. Do this for varname[1] and fins[1], and so on.
                varname = (re.findall(r"\w+", var)[0])
                findx = (int(re.findall(r"\w+", var)[1]))

                # If the var in formula is not defined, do sanity check fins and define. For example, NO_DD_1 = fin.variables[NO_DD][0:nstamps,:,:,:]
                var_findx = "".join([varname, "_", str(findx)])
                if not var_findx in locals():
                    if findx < 0:
                        print('File index is negative! {}'.format(findx))
                        exit()
                    elif findx == 0:  # if [0] in the var, use a variable that is already calculated.
                        s = tracernames.index(varname)
                        if lgrdded:  # GRIDDED
                            exec("".join(
                                [var_findx, " = data2sav[s,:,0:nz,:,:]"]))
                        else:  # BOUNDARY CONDITION
                            exec("".join(
                                [var_findx, " = data2sav[s,:,0:nz,:]"]))
                    elif findx <= nifiles:
                        exec("".join([
                            var_findx,
                            " = fin[int(findx)-1].variables[varname][0:nsteps,0:nz,:,:]"
                        ]))
                    else:
                        print(
                            'File index is larger than no. of input files! {}'.
                            format(findx))
                        print('no. of input files {}'.format(nifiles))
                        exit()

            # Change formular to an easy expression to deal with. For exmaple, NO_DD[1]+NO_DD[2]+NO2_DD[1] to NO_DD_1+NO_DD_2+NO2_DD_1
            for i in range(0, nifiles +
                           1):  # replace [] to _. (e.g. NO_DD[1] -> NO_DD_1)
                formula = formula.replace("".join(["[", str(i), "]"]),
                                          "".join(["_", str(i)]))

            # Construct dict_vars such as {'NO_DD_1':NO_DD_1, 'NO_DD_2':NO_DD_2, 'NO2_DD_1':NO2_DD_1}
            dict_vars = {}
            vars = p.findall(formula)  # return all the variables in formula
            for var in vars:
                exec("".join(["dict_vars['", var, "']=", var]))

            # Generate class e. To declare variables in dic_vars in the ExpressionEvaluator class. dic_vars is passed as an argument.
            e = ExpressionEvaluator(dict_vars)

            # Evaluate formula
            s = tracernames.index(ovar)
            if lgrdded:  # GRIDDED
                data2sav[s, 0:nsteps, 0:nz, :, :] = e.parse(formula)
            else:  # BOUNDARY CONDITION
                data2sav[s, 0:nsteps, 0:nz, :] = e.parse(formula)

        # Del files in fin lists
        if ftype == 'netcdf':
            for ifile in range(0, nifiles):
                fin[ifile].close()
        del fin

        # Return results
        if loutf:  # if creating an output file
            fout = _data2fin(data2sav, tracernames, attr_in)
            lounit = True
            if l2uam:
                wrt_uamiv(outfile,
                          fout,
                          lsurf=lsurf,
                          lapp=not lnew,
                          ounits=ovarunits)
            else:
                wrt_ioapi(outfile,
                          fout,
                          lsurf=lsurf,
                          lapp=not lnew,
                          ounits=ovarunits)
        else:
            if lovarunits:
                return tracernames, data2sav, ovarunits
            else:
                return tracernames, data2sav
Beispiel #6
0
def regrid(outfile,
           project,
           utmzon,
           plon,
           plat,
           tlat1,
           tlat2,
           xorg,
           yorg,
           dxy_out,
           nx_out,
           ny_out,
           comb,
           attr_in,
           nstep,
           nifiles,
           infile,
           *,
           lno_edges=True,
           l2uam=False,
           lnearest=False,
           ijfile=None):
    # Include functions
    from CAMxtools.combine.combine import combine
    import netCDF4 as ncdf4
    from PseudoNetCDF.camxfiles.Memmaps import uamiv
    from CAMxtools.write.set_attr import set_attr
    from CAMxtools.tzone.scan_timezones import get_lcc
    from CAMxtools.regrid.projection import ll2latlon
    from CAMxtools.regrid.projection import lcp2latlon
    from CAMxtools.regrid.projection import ll2lcp
    from CAMxtools._cnvt._data2fin import _data2fin
    from CAMxtools.write.wrt_ioapi import wrt_ioapi
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    import numpy as np
    import csv

    # 1. COMBINE
    print("  1. PROCESSING COMBINE")
    tracernames, indata, ovarunits = combine(None,
                                             comb,
                                             nifiles,
                                             infile,
                                             lsurf=False,
                                             lverbose=False,
                                             loutf=False,
                                             lovarunits=True)

    # 2. REGRID
    print("  2. PROCESSING REGRID")

    # 2.1 Set attributes of output file
    if l2uam:
        NA_val = 0.
    else:
        NA_val = -9.999E36
    attr_out = {}
    for key, value in attr_in.items():
        if key == 'XCELL': attr_out[key] = dxy_out
        elif key == 'YCELL': attr_out[key] = dxy_out
        elif key == 'XORIG': attr_out[key] = xorg
        elif key == 'YORIG': attr_out[key] = yorg
        elif key == 'NCOLS': attr_out[key] = nx_out
        elif key == 'NROWS': attr_out[key] = ny_out
        elif key == 'GDTYP':
            if project == 'LATLON': attr_out[key] = 1
            elif project == 'LAMBERT': attr_out[key] = 2
            else: exit()
        elif key == 'P_ALP':
            if project == 'LATLON': attr_out[key] = NA_val
            elif project == 'LAMBERT': attr_out[key] = tlat1
            else: exit()
        elif key == 'P_BET':
            if project == 'LATLON': attr_out[key] = NA_val
            elif project == 'LAMBERT': attr_out[key] = tlat2
            else: exit()
        elif key == 'P_GAM':
            if project == 'LATLON': attr_out[key] = NA_val
            elif project == 'LAMBERT': attr_out[key] = plon
            else: exit()
        elif key == 'XCENT':
            if project == 'LATLON': attr_out[key] = NA_val
            elif project == 'LAMBERT': attr_out[key] = plon
            else: exit()
        elif key == 'YCENT':
            if project == 'LATLON': attr_out[key] = NA_val
            elif project == 'LAMBERT': attr_out[key] = plat
            else: exit()
        else:
            attr_out[key] = value

    if ijfile == None:
        # 2.2a.1 Get lat and lon at center of grid cells for output
        if attr_out['GDTYP'] == 1:
            lats_out, lons_out = ll2latlon(xorg,
                                           yorg,
                                           dxy_out,
                                           nx_out,
                                           ny_out,
                                           lno_edges=False)
        elif attr_out['GDTYP'] == 2:
            lcc_out = get_lcc(attr_out)
            lats_out, lons_out = lcp2latlon(lcc_out,
                                            dxy_out,
                                            nx_out,
                                            ny_out,
                                            lno_edges=False)
        else:
            print("Your GDTYP is {}".format(attr_in['GDTYP']))
            exit(
                "This program currently supports LATLON (GDTYP = 1) and LCP (GDTYP = 2) only."
            )
        # 2.2a.2 Get x and y values of input file corresponding to each output grid cell
        if attr_in['GDTYP'] == 1:
            xpos = lons_out
            ypos = lats_out
        elif attr_in['GDTYP'] == 2:
            x0_in = attr_in['XORIG']
            y0_in = attr_in['YORIG']
            lcc_in = get_lcc(attr_in)
            xpos, ypos = ll2lcp(lons_out, lats_out, lcc_in, ny_out, nx_out,
                                x0_in, y0_in)
        else:
            print("Your GDTYP is {}".format(attr_in['GDTYP']))
            exit(
                "This program currently supports LATLON (GDTYP = 1) and LCP (GDTYP = 2) only."
            )
        # 2.2a.3 Get grid index of input file corresponding to each output grid cell
        dxy_in = attr_in['XCELL']
        x0 = attr_in['XORIG']
        y0 = attr_in['YORIG']
        nx_in = attr_in['NCOLS']
        ny_in = attr_in['NROWS']
        if lnearest:
            if attr_in['GDTYP'] == 1:
                lats_in, lons_in = ll2latlon(x0,
                                             y0,
                                             dxy_in,
                                             nx_in,
                                             ny_in,
                                             lno_edges=False)
            elif attr_in['GDTYP'] == 2:
                lats_in, lons_in = lcp2latlon(lcc_in,
                                              dxy_in,
                                              nx_in,
                                              ny_in,
                                              lno_edges=False)
            else:
                print("Your GDTYP is {}".format(attr_in['GDTYP']))
                exit(
                    "This program currently supports LATLON (GDTYP = 1) and LCP (GDTYP = 2) only."
                )
            iloc, jloc = find_ij(dxy_in,
                                 x0,
                                 y0,
                                 nx_in,
                                 ny_in,
                                 nx_out,
                                 ny_out,
                                 xpos,
                                 ypos,
                                 lno_edges=True,
                                 lnearest=lnearest,
                                 lats_in=lats_in,
                                 lons_in=lons_in,
                                 lats_out=lats_out,
                                 lons_out=lons_out)
        else:
            iloc, jloc = find_ij(dxy_in,
                                 x0,
                                 y0,
                                 nx_in,
                                 ny_in,
                                 nx_out,
                                 ny_out,
                                 xpos,
                                 ypos,
                                 lno_edges=True)
    else:
        # 2.2b Read index mapping from ijfile
        iloc = np.zeros((ny_out, nx_out), dtype=int)
        jloc = np.zeros((ny_out, nx_out), dtype=int)
        with open(ijfile) as csvfile:
            lines = csv.reader(csvfile, delimiter=',',
                               quotechar='|')  # skip the first header line
            for line in lines:
                if line[0][0] == 'i': continue  # skip the first header line
                i = int(line[0].split()[0])
                j = int(line[1].split()[0])
                iloc[j, i] = int(line[2].split()[0])
                jloc[j, i] = int(line[3].split()[0])

    # 2.5 Mapping and prepare data to save
    nspc = len(tracernames)
    nz = attr_out['NLAYS']
    data2sav = np.zeros((nspc, nstep, nz, ny_out, nx_out))
    for j in range(ny_out):
        for i in range(nx_out):
            data2sav[:, :, :, j, i] = indata[:, :, :, jloc[j, i], iloc[j, i]]
    del indata

    # 3. WRITE A BINARY FILE
    fout = _data2fin(data2sav, tracernames, attr_out)
    if l2uam:
        wrt_uamiv(outfile, fout, lsurf=False, ounits=ovarunits)
    else:
        wrt_ioapi(outfile, fout, lsurf=False, ounits=ovarunits)
Beispiel #7
0
def calc_W126(outfile,
              fileh,
              filet,
              year,
              smo,
              emo,
              tz,
              *,
              lyyyyjjj=True,
              loutf=True,
              lverbose=True):
    # Include functions
    from CAMxtools.tzone.get_local_hrs import get_ozone_12hr
    from CAMxtools.write.wrt_ioapi import wrt_ioapi
    from CAMxtools.write.wrt_uamiv import wrt_uamiv
    from CAMxtools.write.set_attr import set_attr
    from CAMxtools._cnvt._data2fin import _data2fin

    # Check the first day file to figure out nx and ny
    jdate = datetime.date(year, smo, 1).strftime("%Y%j")
    gdate = year * 10000 + smo * 100 + 1
    if lyyyyjjj:
        infile = fileh + '.' + str(jdate) + '.' + filet
    else:
        infile = fileh + '.' + str(gdate) + '.' + filet
    if not os.path.exists(infile):
        print("{} does not exist!".format(infile))
        print('Program exits from calc_W126 at point 1')
        exit()
    try:
        fin = uamiv(infile)
        ftype = 'avg'
    except:
        try:
            fin = ncdf4.Dataset(infile)
            ftype = 'netcdf'
        except:
            print("Unrecognized file type")
            print("infile = {}".format(infile))
            exit()
    nx = len(fin.dimensions['COL'])
    ny = len(fin.dimensions['ROW'])

    # Loop to get monthly sums of W126
    nmo = emo - smo + 1
    monsum = np.zeros((nmo, ny, nx))
    imo = 0
    mo = smo
    print("Calculating monthly sum of W126")
    while imo < nmo:
        print("Processing month :{}".format(calendar.month_name[mo]))
        jbdate = int(datetime.date(year, mo, 1).strftime("%Y%j"))
        edd = calendar.monthrange(year, mo)[1]
        jedate = int(datetime.date(year, mo, edd).strftime("%Y%j"))
        print("jedate={}".format(str(jedate)))
        jdate = jbdate
        while (jdate <= jedate):
            jdatep1 = jdate + 1
            if lyyyyjjj:
                infile1 = fileh + '.' + str(jdate) + '.' + filet
                infile2 = fileh + '.' + str(jdatep1) + '.' + filet
            else:
                gdate = year * 10000 + mo * 100 + (jdate - jbdate + 1)
                infile1 = fileh + '.' + str(gdate) + '.' + filet
                if (jdate < jedate):
                    gdatep1 = gdate + 1
                else:
                    gdatep1 = year * 10000 + (mo + 1) * 100 + 1
                infile2 = fileh + '.' + str(gdatep1) + '.' + filet

            for infile in [infile1, infile2]:
                if not os.path.exists(infile):
                    print("{} does not exist!".format(infile))
                    print('Program exits from calc_W126 at point 2')
                    exit()

            if ftype == 'avg':
                fin1 = uamiv(infile1)
                fin2 = uamiv(infile2)
            else:
                fin1 = ncdf4.Dataset(infile1)
                fin2 = ncdf4.Dataset(infile2)

            # get ozone conc. from 8am-8pm local time
            if (lverbose):
                print("  Processing Julian day :{}".format(str(jdate)))
            o3_48_utc = np.append(fin1.variables["O3"],
                                  fin2.variables["O3"],
                                  axis=0)
            o3 = get_ozone_12hr(o3_48_utc, tz, nx, ny)

            # calculate W126
            W126 = calc_daily_W126(o3, nx, ny)

            # calculate monthly sum of W126
            for i in range(nx):
                for j in range(ny):
                    monsum[imo, j, i] += W126[j, i]

            # increase day by one
            date = datetime.datetime.strptime(str(jdate), "%Y%j")
            date += datetime.timedelta(days=1)
            jdate = int(date.strftime("%Y%j"))

        # increase month by one
        imo += 1  # increase month counter
        mo += 1  # increase the order of month in the year

    # Loop to calculate three months sum
    nmo = emo - smo - 1  # remove two from the total number of months
    mon3sum = np.zeros((nmo, ny, nx))
    imo = 0
    mo = smo
    print("Calculating three monthly sum of W126")
    while imo < nmo:
        print("Processing month :{}".format(calendar.month_name[mo]))
        for i in range(nx):
            for j in range(ny):
                mon3sum[imo, j,
                        i] = monsum[imo, j, i] + monsum[imo + 1, j,
                                                        i] + monsum[imo + 2, j,
                                                                    i]
        # increase month by one
        imo += 1  # increase month counter
        mo += 1  # increase the order of month in the year

    # Find the highest three monthly sum
    h1mon3sum = np.zeros((ny, nx))
    imo = 0
    print("Finding the highest three monthly sum of W126")
    while imo < nmo:
        for i in range(nx):
            for j in range(ny):
                if h1mon3sum[j, i] < mon3sum[imo, j, i]:
                    h1mon3sum[j, i] = mon3sum[imo, j, i]
        # increase month by one
        imo += 1  # increase month counter
        mo += 1  # increase the order of month in the year
    # write output to netcdf
    fin0 = fin  # to copy file header
    nspc = 1
    nz = 1
    nsteps = 1
    data2sav = np.zeros((nspc, nsteps, nz, ny, nx))
    data2sav[0, 0, 0, :, :] = h1mon3sum
    tracernames = "O3".split()
    beghr = 8

    #file attributes from an argument
    lfin0 = True
    attr_fed = {}
    attr_in = set_attr(lfin0, fin0, attr_fed, beghr=beghr)

    # Write output to a binary file
    fout = _data2fin(data2sav, tracernames, attr_in)
    l2uam = False  # Force to output to IOAPI format
    if loutf:
        if l2uam:
            wrt_uamiv(outfile, fout)
        else:
            wrt_ioapi(outfile, fout)
    else:
        return tracernames, data2sav