def arg_parse(varname=None,domain=None,input_file=None,list_var_names=False):
    wrf_dom = 3
    if input_file is not None:
        ncfile = Nio.open_file(input_file,format='grib')
    else:
        ncfile = Nio.open_file(find_inputfile(wrf_dom),format='')

    if list_var_names:
        print_var_summary(ncfile)

    if len(varname) != 0:
        for var in varname:
            print_var_summary(ncfile,varname=var)

    return
Beispiel #2
0
    def split(self, num):

        if type(num) is not int:
            raise TypeError, "number to split must be an integer"

        if self._open_file is False:

            if self.trange is not None:
                time_len = self.trange[1]-self.trange[0]
                tstart = self.trange[0]
            else:
                f = Nio.open_file(self.pathname)
                time_len = f.dimensions['time']
                f.close()
                tstart = 0

            chunk_size = time_len//num
            remainder = time_len%num

            tranges = [(tstart+i*chunk_size+remainder*i//num, \
                    tstart+(i+1)*chunk_size+remainder*(i+1)//num) \
                    for i in range(num)]

            return [RectGrid(self.pathname, self.varname, trange=it) \
                    for it in tranges]

        else:
            raise RuntimeError, "RectGrid must not be initialized before running split()"
Beispiel #3
0
def do_setup(filename):
    if os.path.exists(filename): os.remove(filename)
    f = Nio.open_file(filename, 'c')
    (nx, ny, nz, nt,ns) = (21, 21, 12, 10,1)
    (dx, dy, dz, dt) = (1000., 1000., 400., 3600.)
    f.create_dimension('xc', nx)
    f.create_dimension('yc', ny)
    f.create_dimension('zc', nz)
    f.create_dimension('time', nt)
    f.create_dimension('single', ns)
    f.Conventions = 'CF-1.0'
    f.source = 'ARPS'

    var = f.create_variable('xc', 'f', ('xc',))
    setattr(var, 'axis', 'X')
    var = f.create_variable('yc', 'f', ('yc',))
    setattr(var, 'axis', 'Y')
    var = f.create_variable('zc', 'f', ('zc',))
    setattr(var, 'axis', 'Z')
    var = f.create_variable('time', 'f', ('time',))
    setattr(var, 'axis', 'T')
    setattr(var, 'units', 'seconds since 2007-03-21 06:00:00')
    var = f.create_variable('PT', 'f', ('time', 'zc', 'yc', 'xc'))
    var = f.create_variable('PTS', 'f', ('single','time', 'zc', 'yc', 'xc'))
    var = f.create_variable('ZP', 'f', ('zc', 'yc', 'xc'))
    var = f.create_variable('TOPO', 'f', ('yc', 'xc'))
    var = f.create_variable('lon', 'f', ('yc','xc'))
    var = f.create_variable('lat', 'f', ('yc','xc'))

    xc = N.arange(nx, dtype='float32')*dx
    yc = N.arange(ny, dtype='float32')*dy
    zc = N.arange(nz, dtype='float32')*dz
    f.variables['xc'][:] = xc
    f.variables['yc'][:] = yc
    f.variables['zc'][:] = zc
    f.variables['time'][:] = N.arange(nt, dtype='float32')*dt
    a = N.arange(nt*nz*ny*nx,dtype = 'float32')
    #a = N.array(N.random.randn(nt,nz,ny,nx), dtype='float32')
    a = a.reshape(nt,nz,ny,nx)
    #print a.shape
    mask = N.zeros(a.shape,N.bool_)
    mask[:,3,:,:] = 1
    # tests adding a fill value

    am = ma.array(a,mask=mask)
    f.variables['PT'][:] = am[:]
    f.variables['PTS'][:] = am[:]
    #if verbose: print f.variables['PT']
    H = 5000.
    topo = 1000*N.cos(2*N.pi*(xc-10000.)/20000.)+1000.
    zp = zc[:,N.newaxis]*(1-topo[N.newaxis,:]/H) + topo[N.newaxis,:]
    topof = N.zeros((ny, nx), dtype='float32')
    topof[:,:] = topo[N.newaxis,:]
    zpf = N.zeros((nz,ny,nx), dtype='float32')
    zpf[:] = zp[:,N.newaxis,:]
    f.variables['ZP'][:] = zpf
    f.variables['TOPO'][:] = topof
    f.variables['lon'][:] = N.cos(0.1)*xc[N.newaxis,:] - N.sin(0.1)*yc[:,N.newaxis]
    f.variables['lat'][:] = N.sin(0.1)*xc[N.newaxis,:] + N.cos(0.1)*yc[:,N.newaxis]
    f.close()
def setup_nc_file(filename,lat,lon,nt,ny,nx):
    """setup a netcdf file for output"""
    
    ncfile=Nio.open_file(filename,mode="w",format="nc")
    ncfile.create_dimension("time",nt)
    ncfile.create_dimension("lat",ny)
    ncfile.create_dimension("lon",nx)

    timev=ncfile.create_variable("time","f",("time",))
    times=np.arange(nt)/48.0
    timev[:]=times.astype("f")
    timev.__setattr__("longname","Time")
    timev.__setattr__("units","Days from "+start_date)
    
    latv=ncfile.create_variable("lat","f",("lat","lon"))
    latv[:]=lat.astype("f")
    latv.__setattr__("longname","Latitude")
    latv.__setattr__("units","degrees")
    
    lonv=ncfile.create_variable("lon","f",("lat","lon"))
    lonv[:]=lon.astype("f")
    lonv.__setattr__("longname","Longitude")
    lonv.__setattr__("units","degrees")
    
    return ncfile
Beispiel #5
0
def do_setup_nocrd(filename):
    if os.path.exists(filename): os.remove(filename)
    f = Nio.open_file(filename, 'c')
    (nx, ny, nz, nt) = (20, 25, 5, 10)
    (dx, dy, dz, dt) = (1000., 1000., 800., 3600.)
    f.create_dimension('xc', nx)
    f.create_dimension('yc', ny)
    f.create_dimension('zc', nz)
    f.create_dimension('time', nt)
    f.Conventions = 'CF-1.0'
    f.source = 'ARPS'

    var = f.create_variable('time', 'f', ('time',))
    setattr(var, 'axis', 'T')
    setattr(var, 'units', 'seconds since 2007-03-21 06:00:00')
    var = f.create_variable('PT', 'f', ('time', 'zc', 'yc', 'xc'))
    var = f.create_variable('ZP', 'f', ('zc', 'yc', 'xc'))

    xc = N.arange(nx, dtype='float32')*dx
    yc = N.arange(ny, dtype='float32')*dy
    f.variables['time'][:] = N.arange(nt, dtype='float32')*dt
    #a = N.array(N.random.randn(nt,nz,ny,nx), dtype='float32')
    a = N.arange(nt*nz*ny*nx,dtype = 'float32')
    a = a.reshape(nt,nz,ny,nx)
    f.variables['PT'][:] = a
    a = N.zeros((nz,ny,nx))
    a[:] = N.arange(nz)[:,N.newaxis,N.newaxis]
    f.variables['ZP'][:] = N.array(a, dtype='float32')
    f.close()
def main():
    base_path = "/caps2/tsupinie/1kmf-control/"
    temp = goshen_1km_temporal(start=14400, end=14400)
    grid = goshen_1km_grid()
    n_ens_members = 40

    np.seterr(all='ignore')

    ens = loadEnsemble(base_path, [ 11 ], temp.getTimes(), ([ 'pt', 'p' ], computeDensity))
    ens = ens[0, 0]

    zs = decompressVariable(nio.open_file("%s/ena001.hdfgrdbas" % base_path, mode='r', format='hdf').variables['zp'])
    xs, ys = grid.getXY()
    xs = xs[np.newaxis, ...].repeat(zs.shape[0], axis=0)
    ys = ys[np.newaxis, ...].repeat(zs.shape[0], axis=0)

    eff_buoy = effectiveBuoyancy(ens, (zs, ys, xs), plane={'z':10})
    print eff_buoy

    pylab.figure()
    pylab.contourf(xs[0], ys[0], eff_buoy[0], cmap=matplotlib.cm.get_cmap('RdBu_r'))
    pylab.colorbar()

    grid.drawPolitical()

    pylab.suptitle("Effective Buoyancy")
    pylab.savefig("eff_buoy.png")
    pylab.close()
    return
Beispiel #7
0
def main():
    files = sorted(glob.glob("3kmgoshen.hdf[0123456789]?????"))

    trajectory_points = [ [ (1, 0, 0) ],
                          [ (10, 0, 0) ],
                          [ (20, 0, 0) ],
                          [ (30, 0, 0) ],
                          [ (40, 0, 0) ] ]

    grid_spacing = 1000
    time_spacing = 300

    for file_name in files:
        hdf = nio.open_file(file_name, mode='r', format='hdf')

        u_grid = hdf.variables['u'][:]
        v_grid = hdf.variables['v'][:]

        hdf.close()

        for trajectory in trajectory_points:
            last_z, last_x, last_y = trajectory[-1]
            point_u = interpolate(u_grid, last_x, last_y, last_z)
            point_v = interpolate(v_grid, last_x, last_y, last_z)

            new_x = last_x + time_spacing * point_u / grid_spacing
            new_y = last_y + time_spacing * point_v / grid_spacing

            if new_x > u_grid.shape[1] - 1 or new_x < 0 or new_y > u_grid.shape[2] - 1 or new_y < 0:
                print "Parcel out of bounds ..."
            else:
                trajectory.append((last_z, new_x, new_y))

    print trajectory_points
    return
Beispiel #8
0
 def makeNioFile(self, variableName):
     filename = '/tmp/good_%s.nc' % variableName
     f = nio.open_file(filename, 'w')
     f.create_dimension('test_dimension', 1)
     f.create_variable(variableName,'l',('test_dimension',))
     f.close()
     return filename
Beispiel #9
0
def write_2d_output(fname,lat,lon,p):
    
    Ny,Nx=p.shape
    ncf=Nio.open_file(fname,mode="w",format="nc")
    ncf.create_dimension("lat",Ny)
    ncf.create_dimension("lon",Nx)

    varname="XLAT"
    ncf.create_variable(varname,'f',('lat','lon'))
    ncf.variables[varname][:]=lat.astype('f')
    ncf.variables[varname].units="degrees"
    ncf.variables[varname].description="Latitude"

    varname="XLONG"
    ncf.create_variable(varname,'f',('lat','lon'))
    ncf.variables[varname][:]=lon.astype('f')
    ncf.variables[varname].units="degrees"
    ncf.variables[varname].description="Longitude"

    varname="precipitation"
    ncf.create_variable(varname,'f',('lat','lon'))
    ncf.variables[varname][:]=p.astype('f')
    ncf.variables[varname].units="kg/m^2/s"
    ncf.variables[varname].description="precipitation rate"
    
    ncf.close()
def read_nc(filename,var="data",proj=None,returnNCvar=False):
    '''read a netCDF file and return the specified variable

    output is a structure :
        data:raw data as an array
        proj:string representation of the projection information
        atts:data attribute dictionary (if any)
    if (returnNCvar==True) then the Nio file is note closed and the Nio 
        representation of the variable is returned instead of being read into 
        memory immediately.  
    '''
    d=Nio.open_file(filename, mode='r',format="nc")
    outputdata=None
    if var != None:
        data=d.variables[var]
        attributes=d.variables[var].__dict__
        if returnNCvar:
            outputdata=data
        else:
            outputdata=data[:]
    outputproj=None
    if proj!=None:
        projection=d.variables[proj]
        outputproj=str(projection)
    
    
    if returnNCvar:
        return Bunch(data=outputdata,proj=outputproj,ncfile=d,atts=attributes)
    d.close()
    return Bunch(data=outputdata,proj=outputproj,atts=attributes)
def findHeights(grdbas, bounds):
    hdf = nio.open_file(grdbas, mode='r', format='hdf')

    bounds_x, bounds_y = bounds
    column_x = (bounds_x.start + bounds_x.stop) / 2
    column_y = (bounds_y.start + bounds_y.stop) / 2

    return hdf.variables['zp'][:, column_y, column_x]
Beispiel #12
0
def do_setup(filename):
    if os.path.exists(filename):
        os.remove(filename)
    f = Nio.open_file(filename, "c")
    (nx, ny, nz, nt) = (21, 21, 12, 10)
    (dx, dy, dz, dt) = (1000.0, 1000.0, 400.0, 3600.0)
    f.create_dimension("xc", nx)
    f.create_dimension("yc", ny)
    f.create_dimension("zc", nz)
    f.create_dimension("time", nt)
    f.Conventions = "CF-1.0"
    f.source = "ARPS"

    var = f.create_variable("xc", "f", ("xc",))
    setattr(var, "axis", "X")
    var = f.create_variable("yc", "f", ("yc",))
    setattr(var, "axis", "Y")
    var = f.create_variable("zc", "f", ("zc",))
    setattr(var, "axis", "Z")
    var = f.create_variable("time", "f", ("time",))
    setattr(var, "axis", "T")
    setattr(var, "units", "seconds since 2007-03-21 06:00:00")
    var = f.create_variable("PT", "f", ("time", "zc", "yc", "xc"))
    var = f.create_variable("ZP", "f", ("zc", "yc", "xc"))
    var = f.create_variable("TOPO", "f", ("yc", "xc"))
    var = f.create_variable("lon", "f", ("yc", "xc"))
    var = f.create_variable("lat", "f", ("yc", "xc"))

    xc = N.arange(nx, dtype="float32") * dx
    yc = N.arange(ny, dtype="float32") * dy
    zc = N.arange(nz, dtype="float32") * dz
    f.variables["xc"][:] = xc
    f.variables["yc"][:] = yc
    f.variables["zc"][:] = zc
    f.variables["time"][:] = N.arange(nt, dtype="float32") * dt
    a = N.arange(nt * nz * ny * nx, dtype="float32")
    # a = N.array(N.random.randn(nt,nz,ny,nx), dtype='float32')
    a = a.reshape(nt, nz, ny, nx)
    print a.shape
    mask = N.zeros(a.shape, N.bool_)
    mask[:, 3, :, :] = 1
    # tests adding a fill value

    am = ma.array(a, mask=mask)
    f.variables["PT"][:] = am[:]
    # if verbose: print f.variables['PT']
    H = 5000.0
    topo = 1000 * N.cos(2 * N.pi * (xc - 10000.0) / 20000.0) + 1000.0
    zp = zc[:, N.newaxis] * (1 - topo[N.newaxis, :] / H) + topo[N.newaxis, :]
    topof = N.zeros((ny, nx), dtype="float32")
    topof[:, :] = topo[N.newaxis, :]
    zpf = N.zeros((nz, ny, nx), dtype="float32")
    zpf[:] = zp[:, N.newaxis, :]
    f.variables["ZP"][:] = zpf
    f.variables["TOPO"][:] = topof
    f.variables["lon"][:] = N.cos(0.1) * xc[N.newaxis, :] - N.sin(0.1) * yc[:, N.newaxis]
    f.variables["lat"][:] = N.sin(0.1) * xc[N.newaxis, :] + N.cos(0.1) * yc[:, N.newaxis]
    f.close()
Beispiel #13
0
def merge(ts):
    """
    Process an hour's worth of stage4 data into the hourly RE
    """

    # Load up the 12z 24h total, this is what we base our deltas on
    fp = "/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.24h.grib" % (
      ts.strftime("%Y/%m/%d"), ts.strftime("%Y%m%d%H") )

    grib = Nio.open_file(fp, 'r')
    # Rough subsample, since the whole enchillata is too much
    lats = numpy.ravel( grib.variables["g5_lat_0"][200:-100:5,300:900:5] )
    lons = numpy.ravel( grib.variables["g5_lon_1"][200:-100:5,300:900:5] )
    vals = numpy.ravel( grib.variables["A_PCP_GDS5_SFC_acc24h"][200:-100:5,300:900:5] )
    res = Ngl.natgrid(lons, lats, vals, iemre.XAXIS, iemre.YAXIS)
    stage4 = res.transpose()
    # Prevent Large numbers, negative numbers
    stage4 = numpy.where( stage4 < 10000., stage4, 0.)
    stage4 = numpy.where( stage4 < 0., 0., stage4)

    # Open up our RE file
    nc = netCDF4.Dataset("/mesonet/data/iemre/%s_mw_hourly.nc" % (ts.year,),'a')
    ts0 = ts + mx.DateTime.RelativeDateTime(days=-1)
    jan1 = mx.DateTime.DateTime(ts.year, 1, 1, 0, 0)
    offset0 = int(( ts0 - jan1).hours)
    offset1 = int(( ts -  jan1).hours)
    if offset0 < 0:
        offset0 = 0
    iemre2 = numpy.sum(nc.variables["p01m"][offset0:offset1,:,:], axis=0)
    
    iemre2 = numpy.where( iemre2 > 0., iemre2, 0.00024)
    iemre2 = numpy.where( iemre2 < 10000., iemre2, 0.00024)
    print "Stage IV 24h [Avg %5.2f Max %5.2f]  IEMRE Hourly [Avg %5.2f Max: %5.2f]" % (
                    numpy.average(stage4), numpy.max(stage4), 
                    numpy.average(iemre2), numpy.max(iemre2) )
    multiplier = stage4 / iemre2
    print "Multiplier MIN: %5.2f  AVG: %5.2f  MAX: %5.2f" % (
                    numpy.min(multiplier), numpy.average(multiplier),numpy.max(multiplier))
    for offset in range(offset0, offset1):
        data  = nc.variables["p01m"][offset,:,:]
        
        # Keep data within reason
        data = numpy.where( data > 10000., 0., data)
        adjust = numpy.where( data > 0, data, 0.00001) * multiplier
        adjust = numpy.where( adjust > 250.0, 0, adjust)
        nc.variables["p01m"][offset,:,:] = numpy.where( adjust < 0.01, 0, adjust)
        ts = jan1 + mx.DateTime.RelativeDateTime(hours=offset)
        print "%s IEMRE %5.2f %5.2f Adjusted %5.2f %5.2f" % (ts.strftime("%Y-%m-%d %H"), 
                                    numpy.average(data), numpy.max(data),
                                    numpy.average(nc.variables["p01m"][offset]),
                                    numpy.max(nc.variables["p01m"][offset]))
    nc.sync()
    iemre2 = numpy.sum(nc.variables["p01m"][offset0:offset1,:,:], axis=0)
    print "Stage IV 24h [Avg %5.2f Max %5.2f]  IEMRE Hourly [Avg %5.2f Max: %5.2f]" % (
                    numpy.average(stage4), numpy.max(stage4), 
                    numpy.average(iemre2), numpy.max(iemre2) )
    nc.close()
Beispiel #14
0
def main():
    files = glob("1kmgoshen/1kmgoshen.hdf0*")
    hdf_grdbas = nio.open_file("1kmgoshen/1kmgoshen.hdfgrdbas", mode='r', format='hdf')

#   topo, topo_lats, topo_lons = load_topo("e10g", (6000, 10800), ((0., 50.), (-180., -90.)), ((36., 46.), (-114., -101.)))

    for file in files:
        time_sec = file[-6:]
        hdf_data = nio.open_file(file, mode='r', format='hdf')
        hydrostatic, mass_cons, thermal_wind_u, thermal_wind_v = computeBalances(hdf_data, hdf_grdbas)
        plot_map(hydrostatic[1], (1000, 1000), "xy", r"Hydrostatic Imbalance (Pa m$^{-1}$)", "hydrostatic_t%s.png" % time_sec) #, topo=(topo, topo_lats, topo_lons))
        plot_map(mass_cons[1], (1000, 1000), "xy", r"Mass Conservation Imbalance (m s$^{-2}$)", "mass_cons_t%s.png" % time_sec) #, topo=(topo, topo_lats, topo_lons))
        plot_map(thermal_wind_u[1], (1000, 1000), "xy", r"Thermal Wind $u$ Imbalance (m s$^{-2}$)", "thermal_wind_u_t%s.png" % time_sec) #, topo=(topo, topo_lats, topo_lons))
        plot_map(thermal_wind_v[1], (1000, 1000), "xy", r"Thermal Wind $v$ Imbalance (m s$^{-2}$)", "thermal_wind_v_t%s.png" % time_sec) #, topo=(topo, topo_lats, topo_lons))
        hdf_data.close()

    hdf_grdbas.close()
    return
Beispiel #15
0
def main():
    print "Testing"
    import Nio as nio
    data = nio.open_file('/home/wrfuser/hootpy/data/wrfout_d01_PLEV.nc')
#   print mslp(data.variables['PSFC'][:],data.variables['HGT'][:],data.variables['T2'][:],data.variables['Q2'][:])
    dewp = dewpoint(data.variables['T'][0,:],data.variables['QVAPOR'][0,:],data.variables['P'][:]*100)
    print dewp.max()
    print dewp.min()
    print dewp.mean()
def save_graham_data_to_netcdf(
    netcdf_file_path, resolution_min=5, shape=(4320, 2160), lower_left_point=GeoPoint(-180.0, -90.0)
):
    opt = "c"
    if os.path.isfile(netcdf_file_path):
        os.remove(netcdf_file_path)

    file = Nio.open_file(netcdf_file_path, opt)
    save_graham_data_to_obj(file, resolution_min=resolution_min, the_shape=shape, lower_left_point=lower_left_point)
    file.close()
Beispiel #17
0
def getAxes(base_path, agl=True, z_coord_type=""):
    grdbas_file = _buildEnsGrdbas(0)
    hdf_grdbas = nio.open_file("%s/%s" % (base_path, grdbas_file), mode='r', format='hdf')
    axes = dict([ (ax[:1], decompressVariable(hdf_grdbas.variables[ax])) for ax in ['x', 'y', "zp%s" % z_coord_type] ])
    if agl:
        axes['z'], axes['z_MSL'] = _makeZCoordsAGL(axes['z'])
    else:
        axes['z_AGL'], axes['z'] = _makeZCoordsAGL(axes['z'])

    return axes
Beispiel #18
0
def main():
    _epoch_time = datetime(1970, 1, 1, 0, 0, 0)
    _initial_time = datetime(2009, 6, 5, 18, 0, 0) - _epoch_time
    _initial_time = (_initial_time.microseconds + (_initial_time.seconds + _initial_time.days * 24 * 3600) * 1e6) / 1e6
    _target_times = [ 1800, 3600, 5400, 7200, 9000, 10800, 11100, 11400, 11700, 12000, 12300, 12600, 12900, 13200, 13500, 13800, 14100, 14400,
        14700, 15000, 15300, 15600, 15900, 16200, 16500, 16800, 17100, 17400, 17700, 18000 ]

    inflow_wd_lbound, inflow_wd_ubound = (100, 240)

#   bounds = (0, slice(90, 210), slice(40, 160))
#   bounds = (0, slice(100, 180), slice(90, 170))
    bounds = (0, slice(115, 140), slice(120, 145))
    rev_bounds = [ 0 ]
    rev_bounds.extend(bounds[2:0:-1])
    rev_bounds = tuple(rev_bounds)

    refl_base = "hdf/KCYS/1km/goshen.hdfrefl2d"
    refl_times = np.array([ int(f[-6:]) for f in glob.glob("%s??????" % refl_base) ])
    refl_keep_times = []
    refl_data = {}

    for tt in _target_times:
        idx = np.argmin(np.abs(refl_times - tt))
        if refl_times[idx] > tt and idx > 0:
            idx -= 1

        file_name = "%s%06d" % (refl_base, refl_times[idx])
        hdf = nio.open_file(file_name, mode='r', format='hdf')
        refl_keep_times.append(refl_times[idx])
        refl_data[refl_times[idx]] = hdf.variables['refl2d'][rev_bounds]

    _proj = setupMapProjection(goshen_1km_proj, goshen_1km_gs, bounds=bounds[1:])
#   _proj['resolution'] = 'h' 
    map = Basemap(**_proj)

    ttu_sticknet_obs = cPickle.load(open("ttu_sticknet.pkl", 'r'))
    psu_straka_obs = cPickle.load(open("psu_straka_mesonet.pkl", 'r'))

    all_obs = loadObs(['ttu_sticknet.pkl', 'psu_straka_mesonet.pkl'], [ _epoch_time + timedelta(seconds=(_initial_time + t)) for t in _target_times ],  map, (goshen_1km_proj['width'], goshen_1km_proj['height']), round_time=True)
    print all_obs

#   partitioned_obs = gatherObservations(all_obs, [ _initial_time + t for t in _target_times ])
    for time, refl_time in zip([ _initial_time + t for t in _target_times], refl_keep_times):
        time_str = (_epoch_time + timedelta(seconds=time)).strftime("%d %B %Y %H%M UTC")

        plot_obs = all_obs[np.where(all_obs['time'] == time)]

        inflow_idxs = np.where((plot_obs['wind_dir'] >= inflow_wd_lbound) & (plot_obs['wind_dir'] <= inflow_wd_ubound))[0]
        outflow_idxs = np.array([ idx for idx in range(plot_obs['id'].shape[0]) if idx not in inflow_idxs ])

        title = "All MM observations at %s" % time_str
        file_name = "mm_obs_%06d.png" % (time - _initial_time)

        plotObservations(plot_obs, map, title, file_name, refl=refl_data[refl_time])
    return
Beispiel #19
0
 def make5VariableNioFile(self):
     filename = '/tmp/5_variables.nc'
     f = nio.open_file(filename, 'w')
     f.create_dimension('dimension_one', 1)
     f.create_variable('one', 'l', ('dimension_one',))
     f.create_variable('two', 'l', ('dimension_one',))
     f.create_variable('three', 'l', ('dimension_one',))
     f.create_variable('four', 'l', ('dimension_one',))
     f.create_variable('five', 'l', ('dimension_one',))
     f.close()
     return filename
Beispiel #20
0
def load_reflectivity_vars(file_name, vars, ens_member):
    hdf = nio.open_file(file_name, mode='r', format='hdf')

    vars['pt'][ens_member] = hdf.variables['pt'][12]
    vars['p'][ens_member] = hdf.variables['p'][12]
    vars['qr'][ens_member] = np.maximum(hdf.variables['qr'][12], np.zeros(hdf.variables['qr'][12].shape))
    vars['qs'][ens_member] = np.maximum(hdf.variables['qs'][12], np.zeros(hdf.variables['qs'][12].shape))
    vars['qh'][ens_member] = np.maximum(hdf.variables['qh'][12], np.zeros(hdf.variables['qh'][12].shape))

    hdf.close()
    return
Beispiel #21
0
def loadGrdbas(grdbas_file, agl):
    grdbas = nio.open_file(grdbas_file, mode='r', format='hdf')
    z_coords = decompressVariable(grdbas.variables['zp'])

    x_coords = decompressVariable(grdbas.variables['x'])
    y_coords = decompressVariable(grdbas.variables['y'])

    if agl:
        z_coords = _makeZCoordsAGL(z_coords)

    return z_coords, y_coords, x_coords
Beispiel #22
0
 def load_data(self):
     """
     Loads data from MRMS GRIB2 files and handles compression duties if files are compressed.
     """
     data = []
     loaded_dates = []
     loaded_indices = []
     for t, timestamp in enumerate(self.all_dates):
         date_str = timestamp.date().strftime("%Y%m%d")
         full_path = self.path_start + date_str + "/"
         if self.variable in os.listdir(full_path):
             full_path += self.variable + "/"
             data_files = sorted(os.listdir(full_path))
             file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files])
             if timestamp in file_dates:
                 data_file = data_files[np.where(timestamp==file_dates)[0][0]]
                 print(full_path + data_file)
                 if data_file[-2:] == "gz":
                     subprocess.call(["gunzip", full_path + data_file])
                     file_obj = Nio.open_file(full_path + data_file[:-3])
                 else:
                     file_obj = Nio.open_file(full_path + data_file)
                 var_name = sorted(file_obj.variables.keys())[0]
                 data.append(file_obj.variables[var_name][:])
                 if self.lon is None:
                     self.lon = file_obj.variables["lon_0"][:]
                     # Translates longitude values from 0:360 to -180:180
                     if np.count_nonzero(self.lon > 180) > 0:
                         self.lon -= 360
                     self.lat = file_obj.variables["lat_0"][:]
                 file_obj.close()
                 if data_file[-2:] == "gz":
                     subprocess.call(["gzip", full_path + data_file[:-3]])
                 else:
                     subprocess.call(["gzip", full_path + data_file])
                 loaded_dates.append(timestamp)
                 loaded_indices.append(t)
     if len(loaded_dates) > 0:
         self.loaded_dates = pd.DatetimeIndex(loaded_dates)
         self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999
         self.data[loaded_indices] = np.array(data)
 def __init__(self, filename,nx,ny,nz=None,var=None,dtype='f'):
     history = 'Created : ' + time.ctime() + '\nby:'+os.environ['USER']+" using NC_writer Class"
     self.NCfile=Nio.open_file(filename,mode='w',format="nc",history=history)
     self.NCfile.create_dimension('time', 0)
     self.NCfile.create_dimension('lat', ny)
     self.ny=ny
     self.NCfile.create_dimension('lon', nx)
     self.nx=nx
     if nz:
         self.NCfile.create_dimension('level',nz)
         self.nz=nz
     self.NCfile.create_variable('time','l',('time',))
     if var: self.addVar(var,dtype=dtype)
Beispiel #24
0
def doit(ts):
    """
    Generate hourly plot of stage4 data
    """
    gmtnow = mx.DateTime.gmt()
    routes = "a"
    if (gmtnow - ts).hours < 2:
        routes = "ac"

    fp = "/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.01h.grib" % (
                        ts.strftime("%Y/%m/%d"), ts.strftime("%Y%m%d%H") )
    if not os.path.isfile(fp):
        print 'Missing stage4 %s' % (fp,)
        return

    grib = Nio.open_file(fp)
    lats = grib.variables["g5_lat_0"][:]
    lons = grib.variables["g5_lon_1"][:]
    vals = grib.variables["A_PCP_GDS5_SFC_acc1h"][:] / 25.4

    cfg = {
     'wkColorMap': 'BlAqGrYeOrRe',
     'nglSpreadColorStart': -1,
     'nglSpreadColorEnd'  : 2,
     '_MaskZero'          : True,
     'lbTitleString'      : "[inch]",
     '_valid'    : 'Hour Ending %s' % (ts.localtime().strftime("%d %B %Y %I %p %Z"),),
     '_title'    : "StageIV 1 Hour Precipitation [inch]",
     }

    tmpfp = iemplot.simple_grid_fill(lons, lats, vals, cfg)
    pqstr = "plot %s %s00 iowa_stage4_1h.png iowa_stage4_1h_%s.png png" % (
                            routes, ts.strftime("%Y%m%d%H"), ts.strftime("%H"))
    iemplot.postprocess(tmpfp, pqstr)

    # Plot Midwest
    cfg = {
     'wkColorMap': 'BlAqGrYeOrRe',
     'nglSpreadColorStart': -1,
     'nglSpreadColorEnd'  : 2,
     '_MaskZero'          : True,
     '_midwest'           : True,
     'lbTitleString'      : "[inch]",
     '_valid'    : 'Hour Ending %s' % (ts.localtime().strftime("%d %B %Y %I %p %Z"),),
     '_title'    : "StageIV 1 Hour Precipitation [inch]",
     }

    tmpfp = iemplot.simple_grid_fill(lons, lats, vals, cfg)
    pqstr = "plot %s %s00 midwest_stage4_1h.png midwest_stage4_1h_%s.png png" % (
      routes, ts.strftime("%Y%m%d%H"),ts.strftime("%H") )
    iemplot.postprocess(tmpfp, pqstr)
Beispiel #25
0
 def test_nc4_NCFile_create_variable_ndim(self):
     iobackend.set_backend('netCDF4')
     ncf = iobackend.NCFile(self.ncfaname, mode='a')
     v2 = ncf.create_variable('v2', np.dtype('f'), ('t', 'x'))
     v2[:] = self.v2
     ncf.close()
     ncfr = Nio.open_file(self.ncfaname)
     actual = ncfr.variables['v2'][:]
     expected = self.v2
     ncfr.close()
     print_test_msg('NCFile.create_variable()',
                    actual=actual, expected=expected)
     npt.assert_array_equal(
         actual, expected, 'NCFile 2d-variable incorrect')
Beispiel #26
0
def do_simulated_plot(file_name, valid_time, sec_string, obs=None, date_tag=True):
    path_name, file_base = os.path.split(file_name)
    ena_string = file_base.split(".")[0]

    if date_tag:
        tag = path_name.split("/")[-1].split("-")[-1]
        id_string = "-%s" % tag
    else:
        id_string = ""

    hdf = nio.open_file(file_name, mode='r', format='hdf')

    vars = {}
    for var in ['pt', 'p', 'qr', 'qs', 'qh', 'w']:
        vars[var] = hdf.variables[var][12]

        if vars[var].min() == -32768 or vars[var].max() == 32767:
            dindex = (12, slice(None), slice(None))
            vars[var] = decompressVariable(hdf.variables[var], dindex=dindex)

        if var in ['qr', 'qs', 'qh']:
            vars[var] = np.maximum(vars[var], np.zeros(vars[var].shape))

    reflectivity = computeReflectivity(**vars)

    reflectivity_thresh = np.maximum(reflectivity, np.zeros(reflectivity.shape))

    w_levels = range(-20, 0, 2)
    w_levels.extend(range(2, 22, 2))

    refl_title = "Base Reflectivity Valid %s" % valid_time.strftime("%d %b %Y %H%M UTC")
    refl_img_file_name = "%s%s.bref.%s.png" % (ena_string, id_string, sec_string)
    ptprt_title = r"$\theta$ Valid %s" % valid_time.strftime("%d %b %Y %H%M UTC")
    ptprt_img_file_name = "%s%s.pt.%s.png" % (ena_string, id_string, sec_string)

#   vars = {}
    for var in ['pt', 'u', 'v']:
        vars[var] = hdf.variables[var][2]

        if vars[var].min() == -32768 or vars[var].max() == 32767:
            dindex = (2, slice(None), slice(None))
            vars[var] = decompressVariable(hdf.variables[var], dindex=dindex)

#   print pt.min(), pt.max()

    bounds = (slice(100, 130), slice(100, 130))

    plot_map(vars['pt'], 1000, ptprt_title, ptprt_img_file_name, color_bar='pt', vectors=(vars['u'], vars['v']), obs=obs)
    plot_map(reflectivity, 1000, refl_title, refl_img_file_name, color_bar='refl', aux_field=(w_levels, vars['w']))
    return
Beispiel #27
0
def main():
    base_path = "/caps1/tsupinie/1km-control-no-ua"
    files = glob.glob("%s/ena???.hdf0*" % base_path)

    for file in files:
        hdf = nio.open_file(file, mode='r', format='hdf')

        for var in ['u', 'v', 'w', 'pt', 'p', 'qv']:
            if var not in hdf.variables:
                print "%s incomplete ... " % file
                break

        hdf.close()
    return
Beispiel #28
0
    def _init(self):

        if self._open_file is False:
            self._open_file = True
            f = Nio.open_file(self.pathname)

            # Dimension of var is time, lat, lon
            self._var = f.variables[self.varname]
            self._time = f.variables['time']
            self._lat = f.variables['lat']
            self._lon = f.variables['lon']
            self.time = None
            self.lat = None
            self.lon = None
def calculate_maxnormens(opts_dict,var_list):
  ifiles=[]
  Maxnormens={}
  threshold=1e-12
  # input file directory
  inputdir=opts_dict['indir']
  
  # the timeslice that we want to process
  tstart=opts_dict['tslice']
  
  # open all files
  for frun_file in os.listdir(inputdir):
    if (os.path.isfile(inputdir+frun_file)):
      ifiles.append(Nio.open_file(inputdir+frun_file,"r"))
    else:
      print "COULD NOT LOCATE FILE "+inputdir+frun_file+" EXISTING"
      sys.exit() 
  comparision={}
  # loop through each variable
  for k in var_list:
    output=[]
    # read all data of variable k from all files
    for f in ifiles:
      v=f.variables
      output.append(v[k][tstart])
    max_val=0
    # open an output file
    outmaxnormens=k+"_ens_maxnorm.txt"
    fout=open(outmaxnormens,"w")
    Maxnormens[k]=[]
   
    # calculate E(i=0:n)(maxnormens[i][x])=max(comparision[i]-E(x=0:n)(output[x])) 
    for n in range(len(ifiles)):
      Maxnormens[k].append(0)
      comparision[k]=ifiles[n].variables[k][tstart]
      for m in range(len(ifiles)):
        max_val=np.max(np.abs(comparision[k]-output[m]))
        if Maxnormens[k][n] < max_val:
          Maxnormens[k][n]=max_val
      range_max=np.max((comparision[k]))
      range_min=np.min((comparision[k]))
      if range_max-range_min < threshold:
	Maxnormens[k][n]=0.
      else:
	Maxnormens[k][n]=Maxnormens[k][n]/(range_max-range_min)
      fout.write(str(Maxnormens[k][n])+'\n')
    strtmp = k + ' : '  + 'ensmax min max' + ' : ' + '{0:9.2e}'.format(min(Maxnormens[k]))+' '+'{0:9.2e}'.format(max(Maxnormens[k]))
    print strtmp
    fout.close()
Beispiel #30
0
def main():
    files = [ 'hdf/KCYS/3km/manual/goshen.hdfrefl2d010691', 'hdf/KRIW/3km/goshen.hdfrefl2d010785', 'hdf/KFTG/3km/goshen.hdfrefl2d010638' ]
    radar_data = [ nio.open_file(f, format='hdf', mode='r').variables['refl2d'][0] for f in files ]
    radars = { 'KCYS':(41.15194, -104.80611), 'KFTG':(39.78667, -104.54583), 'KRIW':(43.06611, -108.47722) }

    composite = makeComposite(radar_data)

    proj = setupMapProjection(goshen_3km_proj, goshen_3km_gs)
    map = Basemap(**proj)

    makePlot(composite, goshen_3km_gs, map, "Radar Obs composite (2100 UTC)", "radar_composite_010800.png", radars=radars)
#   for fn, rd in zip(files, radar_data):
#       rid = fn[4:8]
#       makePlot(rd, goshen_3km_gs, map, "Radar Obs composite component (%s, 2100 UTC)" % rid, "radar_composite_comp_%s_010800.png" % rid)
    return
Beispiel #31
0
import Ngl, Nio
import sys
import numpy as np

nc_file = sys.argv[1]
grid_file = sys.argv[2]
output_dir = sys.argv[3]

f = Nio.open_file(nc_file, "r")
g = Nio.open_file(grid_file, "r")

lon = g.variables["xc"][1, :]                   #-- read clon
lat = g.variables["yc"][:, 1]                   #-- read clat

print(lon)
print(lat)

print("Getting Data")
data = f.variables["SSTAYYC"][11, :, :]
print(data.shape)
print("DONE")

data[:] = 0

#---Start the graphics
print("Start plotting...")

wks_type = "png"
wks = Ngl.open_wks(wks_type, "%s/SSTAYYC" % (output_dir,))

#---Read in desired color map so we can subset it later
#  Notes: The data for this example can be downloaded from
#    http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
'''
  Transition Guide PyNGL Example: TRANS_streamline.py

  -  Read netCDF data
  -  Drawing a streamline plot
  
  18-09-04  kmf
'''
from __future__ import print_function
import Ngl, Nio

#-- open a file and read variables
f = Nio.open_file("../read_data/rectilinear_grid_2D.nc", "r")

u = f.variables["u10"]
v = f.variables["v10"]
ua = f.variables["u10"][0, :, :]
va = f.variables["v10"][0, :, :]

lat = f.variables["lat"]
lon = f.variables["lon"]
nlon = len(lon)
nlat = len(lat)

#-- open a workstation
wks = Ngl.open_wks("png", "plot_TRANS_streamline_py")

#-- resource settings
Beispiel #33
0
def openWRF(main_directory, file_name):
    try:
        wrf = Nio.open_file(main_directory + file_name, format='nc')
        return wrf
    except:
        print('Failure when opening WRF file')
Beispiel #34
0
    def test_types(self):
        #
        #  Specify a global history attribute and open a NetCDF file
        #  for writing.
        #
        hatt = "Created " + time.ctime(time.time()) + " by " + getUserName()
        f = Nio.open_file(self.filename, "w", None, hatt)

        #
        #  Create some global attributes.
        #
        f.title = "Nio test NetCDF file"
        f.series = [1, 2, 3, 4, 5, 6]
        f.version = 45

        file_attributes.update({'history': hatt})

        #
        #  Create some dimensions.
        #
        f.create_dimension("array", 3)
        #f.create_dimension("strlen",    6)
        f.create_dimension("strlen", 10)
        f.create_dimension("dim1", 2)
        f.create_dimension("dim2", 1)
        f.create_dimension("dim3", 4)

        #
        #  Create some variables.
        #
        #print("creating and assigning scalar double")
        v1 = f.create_variable("v1", 'd', ())
        v1.assign_value(42.0)

        #print("creating and assigning scalar float")
        v2 = f.create_variable("v2", 'f', ())
        v2.assign_value(52.0)

        #print("creating and assigning scalar integer")
        v3 = f.create_variable("v3", 'i', ())
        v3.assign_value(42)

        #print("creating and assigning scalar long")
        v4 = f.create_variable("v4", 'l', ())
        v4.assign_value(42)

        #print("creating and assigning scalar short")
        v5 = f.create_variable("v5", 'h', ())
        v5.assign_value(42)

        #print("creating and assigning scalar byte")
        v6 = f.create_variable("v6", 'b', ())
        v6.assign_value(42)

        #print("creating and assigning scalar char")
        v7 = f.create_variable("v7", 'S1', ())
        v7.assign_value('x')

        #print("creating and assigning array double")
        v11 = f.create_variable("v11", 'd', ('array', ))
        v11.assign_value([42.0, 43.0, 44.0])

        #print("creating and assigning array float")
        v22 = f.create_variable("v22", 'f', ('array', ))
        v22.assign_value([52.0, 53.0, 54.0])

        #print("creating and assigning array integer")
        v33 = f.create_variable("v33", 'i', ('array', ))
        v33.assign_value([42, 43, 44])

        #print("creating and assigning array long")
        v44 = f.create_variable("v44", 'l', ('array', ))
        a = np.array([42, 43, 44], 'l')
        v44.assign_value(a)

        #print("creating and assigning array short")
        v55 = f.create_variable("v55", 'h', ('array', ))
        v55.assign_value([42, 43, 44])

        #print("creating and assigning array byte")
        v66 = f.create_variable("v66", 'b', ('array', ))
        v66.assign_value([42, 43, 44])

        #print("creating and assigning array char")
        v77 = f.create_variable("v77", 'S1', ('array', 'strlen'))
        v77.assign_value(['bcdef', 'uvwxyz', 'ijklmnopqr'])
        #v77.assign_value(['ab','uv','ij'])
        #v77.assign_value(['a','u','i'])

        #v77[1] = v77[1,::-1]

        #print(v77[:])

        v_single = f.create_variable("v_single", 'f', ("dim1", "dim2", "dim3"))
        #print(v_single)
        # type mismatch (double created then assigned to float variable)
        a = np.array([1.0, 2, 3, 4, 5, 6, 7, 8], dtype=np.float64)
        a.shape = (2, 1, 4)
        #print(a)
        with nt.assert_raises(Nio.NIOError):
            v_single.assign_value(a)

        # now do it right
        a = np.array([1.0, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
        a.shape = (2, 1, 4)
        #print(a)
        v_single.assign_value(a)
        #print(v_single[:])
        v_single[1, 0, 2] = 11.0
        v_single[:, 0, 2] = [11.0, 12.0]

        var_names = list(f.variables.keys())

        nt.assert_equal(set(var_names), file_variables)

        for var in var_names:
            v = f.variables[var]
            nt.assert_equal(v.dimensions, var_dimensions[var])
            nt.assert_equal(v.attributes, {})
            nt.assert_equal(v.get_value(), var_values[var])

        f.close()

        #
        #  Read the file we just created.
        #
        f = Nio.open_file(self.filename, "r")

        nt.assert_equal(f.attributes, file_attributes)
        nt.assert_equal(f.dimensions, file_dimensions)
        nt.assert_equal(set(f.variables.keys()), file_variables)

        for var in var_names:
            v = f.variables[var]
            nt.assert_equal(v.dimensions, var_dimensions[var])
            nt.assert_equal(v.attributes, {})
            nt.assert_equal(v.get_value(), var_values[var])

        f.close()
Beispiel #35
0
#
#  To use the ScientificPython module to read in the netCDF file,
#  comment out the above "import" command, and uncomment the
#  import line below.
#
# from Scientific.IO.NetCDF import NetCDFFile

#
#  Import Ngl support functions.
#
import Ngl
#
#  Open three netCDF files and get variables.
#
data_dir = Ngl.pynglpath("data")
cdf_file1 = Nio.open_file(os.path.join(data_dir, "cdf", "941110_P.cdf"), "r")
cdf_file2 = Nio.open_file(os.path.join(data_dir, "cdf", "sstdata_netcdf.nc"),
                          "r")
cdf_file3 = Nio.open_file(os.path.join(data_dir, "cdf", "Pstorm.cdf"), "r")

#
#  This is the ScientificPython method for opening netCDF files.
#
# cdf_file1 = NetCDFFile(os.path.join(data_dir,"cdf","941110_P.cdf"),"r")
# cdf_file2 = NetCDFFile(os.path.join(data_dir,"cdf","sstdata_netcdf.nc"),"r")
# cdf_file3 = NetCDFFile(os.path.join(data_dir,"cdf","Pstorm.cdf"),"r")

psl = cdf_file1.variables["Psl"][:]
sst = cdf_file2.variables["sst"]
pf = cdf_file3.variables["p"]
Beispiel #36
0
        break
#print stationlist
allpath = '/Users/yetao.lu/Documents/testdata'
# 遍历文件
inputfile = ''
inputfile2 = ''
#filesdict={}
for rootpath, dirs, files in os.walk(allpath):
    for file in files:
        if file[:3] == 'sfc' and file[-5:] == '.grib' and (string.find(
                file, '2014') == -1):
            inputfile = os.path.join(rootpath, file)
            inputfile2 = inputfile.replace('sfc', 'pl')
            #filesdict[inputfile]=inputfile2
            #print inputfile,inputfile2
            sfcfile = Nio.open_file(inputfile, 'r')
            plfile = Nio.open_file(inputfile2, 'r')
            #参数0是指第0个时次的预报
            GetStationsAndOnetimesFromEC(0, sfc_varinames, sfcfile,
                                         pl_varinames, plfile, inputfile)
            csvfile1 = '/Users/yetao.lu/Desktop/mos/data.csv'
            filewrite = open(csvfile1, 'w')
            #print len(alllist)
            for i in range(len(stationsVlist)):
                for j in range(len(stationsVlist[i])):
                    if j == len(stationsVlist[i]) - 1:
                        filewrite.write(str((stationsVlist[i])[j]))
                    else:
                        filewrite.write(str((stationsVlist[i])[j]) + ',')
                filewrite.write('\n')
# file = Nio.open_file(inputfile, 'r')
    if o == "-e":
        export_flag = 1
    if o == "-r":
        restart_time = int(a)

# Skip is the length between outputs
# skip = 1
if dom == 'd02':
    skip = 1
    DP_CLEVS = range(55, 90, 5)
else:
    skip = 3
    DP_CLEVS = range(55, 90, 5)

filename = '../wrfout_' + dom + '_PLEV.nc'
nc = netcdf.open_file(filename)

# Grab three variables for now
#temps_ua = nc.variables['TT']
temps_ua = nc.variables['T']
#temps_base_ua = nc.variables['TB']
temps_base_ua = 300
u_wind_ms_ua = nc.variables['UU'][:, :, :, :-1]
v_wind_ms_ua = nc.variables['VV'][:, :, :-1, :]
w_wind_ms_ua = nc.variables['W']
#rhum_ua = nc.variables['RH']
qvap = nc.variables['QVAPOR']
#ght = nc.variables['GHT']
phb = nc.variables['PHB']
ph = nc.variables['PH']
w_wind_ua = nc.variables['W']
Beispiel #38
0
import Ngl, Nio
import math
import numpy as np
import json
import pandas as pd
import datetime
import time

st = time.time()
fname = "/home/alley/work/Dong/mongo/seasonal_analysis/data/data/1981-2019_gh_500.grib"
f = Nio.open_file(fname, 'r')
# print(f.variables)
lat = np.array(f.variables['g0_lat_1'])
lon = np.array(f.variables['g0_lon_2'])
Time = list(np.array(f.variables['initial_time0_encoded']))
gh = np.array(f.variables['Z_GDS0_ISBL_S123'])
dims = (gh.shape)
print(dims)
n = 0
with open(
        '/home/alley/work/Dong/mongo/seasonal_analysis/data/data/1981-2019_gh_500_by_only_latlon.json',
        'w',
        encoding='utf-8') as fout:
    for lat_i in range(len(lat)):
        for lon_i in range(len(lon)):
            tmp = (gh[:, lat_i, lon_i])
            tmp = (np.array(tmp).reshape(int(dims[0] / 12),
                                         12)).tolist()  # 维度(年数,月数)
            lat_ = float(lat[lat_i])
            lon_ = float(lon[lon_i])
            dic = {
  -  Reading netCDF file
  -  Converting data from Kelvin to degC
  -  Writing ASCII data to new file
   
  2018-08-30  kmf
"""
from __future__ import print_function
import os, sys
import numpy as np
import Ngl, Nio

#--  data file name
fname = "../read_data/rectilinear_grid_3D.nc"

#--  open file
f = Nio.open_file(fname, "r")

#-- read variable, first time step, first level
var = f.variables["t"][0, 0, :, :]

#--  convert var from Kelvin to degC while retaining the missing values
var = var - 273.15
print(var)

# -- write var to an ASCII file
os.system("/bin/rm -f data_py.asc")  #-- delete file
sys.stdout = open("data_py.asc", "w")  #-- redirect stdout to file
for i in range(0, 10):
    for j in range(0, 10):
        print "%10.6f" % (var[i, j])  #-- write to file
Beispiel #40
0
#  Description:
#    This example shows how to draw partially opaque text on a plot
#
#  Effects illustrated:
#    o Using the new txFontOpacityF resource
#    o Subscripting a variable using coordinate values
# 
#  Output:
#     A single visualization with two XY curves 
#
from __future__ import print_function
import numpy,os
import Ngl,Nio

dirc = Ngl.pynglpath("data")
f    = Nio.open_file(os.path.join(dirc,"cdf","uv300.nc"))
u    = f.variables["U"][0,:,8]
lat  = f.variables["lat"][:]

#---Start the graphics section
wks_type = "png"
wks = Ngl.open_wks (wks_type,"newcolor2")     # Open "newcolor2.png" for graphics

#---Set some resources.
res          = Ngl.Resources()
res.nglDraw  = False
res.nglFrame = False
res.xyLineThicknessF = 4.0

plot  = Ngl.xy (wks,lat,u,res)              # Create plot
Beispiel #41
0
import Ngl
import Nio
import numpy
import os
dirc=Ngl.pynglpath("data")
file=Nio.open_file(os.path.join(dirc,"cdf","pop.nc"))

wks_type="png"
wks=Ngl.open_wks(wks_type,"map2")

urot=file.variables["urot"]
t=file.variables["t"]
lat2d=file.variables["lat2d"]
lon2d=file.variables["lon2d"]


u=Ngl.add_cyclic(urot[:])
temp=Ngl.add_cyclic(t[0:])
lon=Ngl.add_cyclic(lon2d[0:])
lat=Ngl.add_cyclic(lat2d[0:])


resource=Ngl.Resources()

resource.vfXArray=lon
resource.vfYArray=lat

resource.mpProjection="Stereographic"
resource.mpFillOn=True
resource.mpInlandWaterFillColor="SkyBlue"
resource.mpProjection          = "Stereographic"
Beispiel #42
0
#
#  Import Nio for reading netCDF files.
#
import Nio

#
#  Import Ngl support functions.
#
import Ngl

#
#  Open the netCDF file containing the climate divisions polygons.
#
dirc = Ngl.pynglpath("data")
ncdf = Nio.open_file(os.path.join(dirc, "cdf", "climdiv_polygons.nc"))

#
#  State names for the contiguous U.S. states.
#
statenames = ["AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL", \
              "IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT", \
              "NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA", \
              "RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]

#
#  Climate divisions in each state.
#
ncds = [8,9,7,7,5,3,2,6,9,9,10,9,9,9,4,9,3,8,3,10,9,6,10,7, \
        8,9,8,2,3,8,4,10,10,9,9,10,1,7,9,4,10,7,7,3,10,9,6,10]
Beispiel #43
0
def modelprocess(ll):
    sys.stdout=open(os.path.join(outpath,'ml'+str(ll)+'.out'),'w')
    # 站点列表数据
    stationlist = []
    csvfile = '/Users/yetao.lu/Desktop/mos/t_p_station_cod.csv'
    fileread = open(csvfile, 'r')
    firstline = fileread.readline()
    while True:
        line = fileread.readline()
        perlist = line.split(',')
        if len(perlist) > 4:
            stationlist.append(perlist)
        if not line or line == '':
            break
    allpath = '/Users/yetao.lu/Documents/testdata'
    sfc_varinames = ['2T_GDS0_SFC', '2D_GDS0_SFC', '10U_GDS0_SFC','10V_GDS0_SFC','TCC_GDS0_SFC', 'LCC_GDS0_SFC']
    pl_varinames = ['R_GDS0_ISBL']
    print 'a'
    # 遍历文件
    dict={}
    stationsVlist = []
    trainlebellist=[]
    def GetOnetimeFromEC(n, sfc_varinames, sfc_file, indexlat, indexlon,
                         pl_varinames, pl_file):
        vstring = []
        levelArray = pl_file.variables['lv_ISBL1']
        for i in range(len(sfc_varinames)):
            variArray = sfc_file.variables[sfc_varinames[i]]
            latlonArray = variArray[n]
            vstring.append(latlonArray[indexlat][indexlon])
            vstring.append(latlonArray[indexlat][indexlon + 1])
            vstring.append(latlonArray[indexlat + 1][indexlon + 1])
            vstring.append(latlonArray[indexlat + 1][indexlon])
            vstring.append(latlonArray[indexlat - 1][indexlon - 1])
            vstring.append(latlonArray[indexlat - 1][indexlon])
            vstring.append(latlonArray[indexlat - 1][indexlon + 1])
            vstring.append(latlonArray[indexlat - 1][indexlon + 2])
            vstring.append(latlonArray[indexlat][indexlon + 2])
            vstring.append(latlonArray[indexlat + 1][indexlon + 2])
            vstring.append(latlonArray[indexlat + 2][indexlon + 2])
            vstring.append(latlonArray[indexlat + 2][indexlon + 1])
            vstring.append(latlonArray[indexlat + 2][indexlon])
            vstring.append(latlonArray[indexlat + 2][indexlon - 1])
            vstring.append(latlonArray[indexlat + 1][indexlon - 1])
            vstring.append(latlonArray[indexlat][indexlon - 1])
        for p in range(len(pl_varinames)):
            pl_variArray = pl_file.variables[pl_varinames[p]]
            phaArray = pl_variArray[n]
            for k in range(len(phaArray)):
                llArray = phaArray[k]
                pha = levelArray[k]
                # print pha
                if pha == 500 or pha == 850:
                    # vstring.append(str(pha)+'hpa')
                    vstring.append(llArray[indexlat][indexlon])
                    vstring.append(llArray[indexlat][indexlon + 1])
                    vstring.append(llArray[indexlat + 1][indexlon + 1])
                    vstring.append(llArray[indexlat + 1][indexlon])
                    vstring.append(llArray[indexlat - 1][indexlon - 1])
                    vstring.append(llArray[indexlat - 1][indexlon])
                    vstring.append(llArray[indexlat - 1][indexlon + 1])
                    vstring.append(llArray[indexlat - 1][indexlon + 2])
                    vstring.append(llArray[indexlat][indexlon + 2])
                    vstring.append(llArray[indexlat + 1][indexlon + 2])
                    vstring.append(llArray[indexlat + 2][indexlon + 2])
                    vstring.append(llArray[indexlat + 2][indexlon + 1])
                    vstring.append(llArray[indexlat + 2][indexlon])
                    vstring.append(llArray[indexlat + 2][indexlon - 1])
                    vstring.append(llArray[indexlat + 1][indexlon - 1])
                    vstring.append(llArray[indexlat][indexlon - 1])
        return vstring

    def GetStationsAndOnetimesFromEC(i, sfc_varinames, sfc_file, pl_varinames,
                                     pl_file, inputfile):
        # print 'stationlist:',len(stationlist)
        for j in range(len(stationlist)):
            # 根据文件名来建立索引获取实况数据气温的值
            strarray = inputfile.split('_')
            odatetime = datetime.datetime.strptime(
                (strarray[1] + strarray[2][:2]),
                '%Y%m%d%H')
            fdatetime = odatetime + datetime.timedelta(hours=i * 3)
            fdateStr = datetime.datetime.strftime(fdatetime, '%Y%m%d%H%M%S')
            # 这里只能是起报时间加预报时效,不能用预报时间。因预报时间有重复
            dictid = stationlist[j][0] + '_' + strarray[1] + strarray[2][
            :2] + '_' + str(i)
            # 根据站号+实况数据的索引来获取实况数据
            kid = stationlist[j][0] + '_' + fdateStr
            trainlebel = stationdict.get(kid)
            # 判断该实况数据是否是有效值(不为99999或者None),若有效再计算16个格点值,将其一起添加到训练样本
            if trainlebel <> None and trainlebel <> 999999:
                latitude = float(stationlist[j][4])
                longitude = float(stationlist[j][5])
                # #    #首先计算经纬度对应格点的索引,
                indexlat = int((60 - latitude) / 0.125)
                indexlon = int((longitude - 60) / 0.125)
                # print latitude,longitude,indexlat,indexlon
                # 则依次取周边16个点的索引为[indexlat,indexlon+1][indexlat+1,indexlon+1][indexlat+1,indexlon]...(顺时针)
                perstationlist = GetOnetimeFromEC(i, sfc_varinames, sfc_file,
                                                  indexlat, indexlon,
                                                  pl_varinames, pl_file)
                # print dictid,perstalist,kid,trainlebel
                dict[dictid] = perstationlist
                stationsVlist.append(perstationlist)
                trainlebellist.append(trainlebel)
        # print 'stationsVlist',len(stationsVlist),'trainlebellist',len(trainlebellist),'stationdict',len(stationdict)
    #遍历所有的文件,
    for rootpath, dirs, files in os.walk(allpath):
        for file in files:
            if file[:3] == 'sfc' and file[-5:] == '.grib' and (string.find(file,'2014')==-1):
                inputfile = os.path.join(rootpath, file)
                inputfile2=inputfile.replace('sfc','pl')
                sfcfile=Nio.open_file(inputfile,'r')
                plfile=Nio.open_file(inputfile2,'r')
                #参数0是指第0个时次的预报,这里只是一个文件的2000个站的列表。
                GetStationsAndOnetimesFromEC(ll,sfc_varinames,sfcfile,pl_varinames,plfile,inputfile)
    stationArray=numpy.array(stationsVlist)
    trainlebelArray=numpy.array(trainlebellist)
    a_train,a_test=train_test_split(stationArray,test_size=0.33,random_state=7)
    #print len(a_train),len(a_test),len(a_train)+len(a_test)
    #数据训练前进行标准化
    x_scaled=preprocessing.scale(stationArray)
    stationArray=x_scaled
    #xgboost
    x_train,x_test,y_train,y_test=train_test_split(stationArray,trainlebelArray,test_size=0.33,random_state=7)
    xgbtrain=xgboost.DMatrix(x_train,label=y_train)
    xgbtest=xgboost.DMatrix(x_test,label=y_test)
    #xgbtrain.save_binary('train.buffer')
    #print len(x_train),len(x_test),len(y_train),len(y_test)
    #print xgbtest
    #训练和验证的错误率
    watchlist=[(xgbtrain,'xgbtrain'),(xgbtest,'xgbeval')]
    params={
    'booster':'gbtree',
    'objective': 'reg:linear', #线性回归
    'gamma':0.2,  # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
    'max_depth':12, # 构建树的深度,越大越容易过拟合
    'lambda':2,  # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
    'subsample':0.7, # 随机采样训练样本
    'colsample_bytree':0.7, # 生成树时进行的列采样
    'min_child_weight':3,
    # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
    #,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
    #这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
    'silent':0 ,#设置成1则没有运行信息输出,最好是设置为0.
    'eta': 0.01, # 如同学习率
    'seed':1000,
    'nthread':3,# cpu 线程数
    #'eval_metric': 'auc'
    'scale_pos_weight':1
    }
    plst=list(params.items())
    num_rounds=20000
    #early_stopping_rounds当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练
    model=xgboost.train(plst,xgbtrain,num_rounds,watchlist,early_stopping_rounds=800)
    #print model,watchlist
    preds=model.predict(xgbtest,ntree_limit=model.best_iteration)
    # 将预测结果写入文件,方式有很多,自己顺手能实现即可
    # numpy.savetxt('submission_xgb_MultiSoftmax.csv',numpy.c_[range(1,len(test)+1),preds],
    #                 delimiter=',',header='ImageId,Label',comments='',fmt='%d')
    #print preds
    #print y_test.dtype,preds.dtype
    y_test=y_test.astype('float32')
    mse=mean_squared_error(y_test,preds)
    rmse=math.sqrt(mse)
    mae=mean_absolute_error(y_test,preds,multioutput='uniform_average')
    print("训练后MSE: %.4f" % mse)
    print("训练后RMSE: %.4f" % rmse)
    print("训练后MAE: %.4f" % mae)
    #气温差2度的准确率
    n=0
    for x,y in zip(y_test,preds):
        if abs(x-y)<2:
            n=n+1
    accuracy=float(n)/float(len(y_test))
    print ("训练后2度的accuracy: %.4f" % accuracy)
    n=0
    for x,y in zip(y_test,preds):
        if abs(x-y)<3:
            n=n+1
    accuracy=float(n)/float(len(y_test))
    print ("训练后3度的accuracy: %.4f" % accuracy)
    #和EC中原始数据对比获取均方误差
    y_origin=a_test[:,0]
    #print y_origin
    y_origin=y_origin-273.15
    #print y_origin
    mse0=mean_squared_error(y_test,y_origin)
    rmse0=math.sqrt(mse0)
    mae0=mean_absolute_error(y_test,y_origin,multioutput='uniform_average')
    print("训练前MSE: %.4f" % mse0)
    print("训练前RMSE: %.4f" % rmse0)
    print("训练前MAE: %.4f" % mae0)
    n=0
    for x,y in zip(y_test,y_origin):
        if abs(x-y)<2:
            n=n+1
    accuracy=float(n)/float(len(y_test))
    print ("训练前2度的accuracy: %.4f" % accuracy)
    n=0
    for x,y in zip(y_test,y_origin):
        if abs(x-y)<3:
            n=n+1
    accuracy=float(n)/float(len(y_test))
    print ("训练前3度的accuracy: %.4f" % accuracy)
    model.save_model(os.path.join(outpath,'ectemp'+str(ll)+'.model'))
    testfile=os.path.join(outpath,'test'+str(ll)+'.csv')
    predsfile=os.path.join(outpath,'preds'+str(ll)+'.csv')
    originfile=os.path.join(outpath,'origin'+str(ll)+'.csv')
    testfw=open(testfile,'w')
    predsfw=open(predsfile,'w')
    originfw=open(originfile,'w')
    for u in y_test:
        testfw.write(str(u)+',')
    testfw.close()
    for o in preds:
        predsfw.write(str(o)+',')
    predsfw.close()
    for q in y_origin:
        originfw.write(str(q)+',')
    originfw.close()
    del stationArray
    del trainlebelArray
    del a_test
    del a_train
    del x_test
    del x_train
    del y_origin
    del y_test
    del y_train
    del xgbtest
    del xgbtrain
    del x_scaled
    del plfile
    del sfcfile
Beispiel #44
0
from __future__ import print_function
import numpy as np
import Nio, Ngl, os, sys
from wrf import getvar, latlon_coords, to_np


# Read data
filename = "wrfout_d03_2012-04-22_23_00_00"
if(not os.path.exists(filename)):
  print("You do not have the necessary '{}' file to run this example.".format(filename))
  print("You need to supply your own WRF output file")
  print("WRF output files usually have names like '{}'".format(filename))
  sys.exit()

# Read some WRF data
a  = Nio.open_file(filename+".nc")  # Must add ".nc" suffix for Nio.open_file
ua = getvar(a,"ua")
va = getvar(a,"va")

# First timestep, lowest (bottommost) level, every 5th lat/lon
nl    = 0
nt    = 0
nstep = 5     # a stride to cull some of the streamlines
u     = ua[nl,::nstep,::nstep]
v     = va[nl,::nstep,::nstep]
spd   = np.sqrt(u**2+v**2)

# Get the latitude and longitude points
lat, lon = latlon_coords(ua)
lat = to_np(lat)
lon = to_np(lon)
Beispiel #45
0
#
#     http://mailman.ucar.edu/mailman/listinfo/pyngl-talk
#
from __future__ import print_function
import numpy, Nio, Ngl, sys, os

#---Read data
filename = "b.e12.B1850C5CN.ne30_g16.init.ch.027.cam.h0.0001-01.nc"
if (not os.path.exists(filename)):
    print(
        "You do not have the necessary '{}' file to run this example.".format(
            filename))
    print("See the comments at the top of this script for more information.")
    sys.exit()

a = Nio.open_file(filename)
vname = "TS"
data = a.variables[vname]
lat = a.variables["lat"][:]  # 1D array (48602 cells)
lon = a.variables["lon"][:]  # ditto

ncells = data.shape[1]
print("There are {} cells in the {} variable".format(ncells, vname))

wks_type = "png"
wks = Ngl.open_wks(wks_type, "camse1")

#---Set some plot options
res = Ngl.Resources()

# Contour options
Beispiel #46
0
    #target.write(header1)
    #target.write('\n')
    #target.write(header2)
    #target.write('\n')
    #print var[0,0]
    #target.write(var[:,:])
    #target.write('\n')
    #target.close()


for i in range(1, len(sys.argv)):
    infilename = sys.argv[i]
    print infilename

    #file = Nio.open_file(os.path.join(Ngl.pynglpath("data"),"grb",infilename),"r")
    infile = Nio.open_file(infilename, "r")

    names = infile.variables.keys()  #  Get the variable names
    print "\nVariable names:"  #  and print them out.
    print names

    for j in range(0, len(names)):
        if names[j] == 'lat_0' or names[j] == 'lon_0':
            continue

        #
        #  For variable in names[1], retrieve and print all attributes
        #  and their values.
        #
        print "\nThe attributes and their values for variable " + names[j] + ":"
        for attrib in infile.variables[names[j]].attributes.keys():
Beispiel #47
0
def modelprocess(stationdict, stationlist, ll, allpath, dempath, forehours):
    max_varinames = ['MX2T6_GDS0_SFC_1']
    max_varinames001 = ['MX2T6_GDS0_SFC']
    min_varinames = ['MN2T6_GDS0_SFC_1']
    min_varinames001 = ['MN2T6_GDS0_SFC']
    sfc_varinames1 = [
        '2T_GDS0_SFC', '2D_GDS0_SFC', '10U_GDS0_SFC', '10V_GDS0_SFC',
        'TCC_GDS0_SFC', 'LCC_GDS0_SFC', 'Z_GDS0_SFC'
    ]
    pl_varinames = ['R_GDS0_ISBL']
    # print max_varinames
    dict01 = {}
    # 遍历文件
    maxecvaluelist = []
    minecvaluelist = []
    maxtemplist = []
    mintemplist = []
    # 遍历所有的文件,
    for rootpath, dirs, files in os.walk(allpath):
        for file in files:
            # print file
            if file[:3] == 'sfc' and file[-5:] == '.grib' and (string.find(
                    file, '2017') == -1):
                inputfile = os.path.join(rootpath, file)
                inputfile2 = inputfile.replace('sfc', 'pl')
                inputfile3 = inputfile.replace('sfc', '6h')
                inputfile4 = inputfile.replace('sfc', '3h')
                sfcfile = Nio.open_file(inputfile, 'r')
                plfile = Nio.open_file(inputfile2, 'r')
                h6file = Nio.open_file(inputfile3, 'r')
                h3file = Nio.open_file(inputfile4, 'r')
                # 参数0是指第0个时次的预报,这里只是一个文件的2000个站的列表。
                GetStationsAndOnetimesFromEC(
                    ll, max_varinames, max_varinames001, min_varinames,
                    min_varinames001, h6file, h3file, sfc_varinames1, sfcfile,
                    pl_varinames, plfile, inputfile, maxecvaluelist,
                    minecvaluelist, stationdict, stationlist, dict01,
                    maxtemplist, mintemplist, dempath)
    #ecvaluelist = numpy.array(ecvaluelist)
    maxtemplist = numpy.array(maxtemplist)
    mintemplist = numpy.array(mintemplist)
    # class_train, class_test = train_test_split(ecvaluelist, test_size=0.33,random_state=7)
    # 数据训练前进行标准化
    #ecvaluelist = ecvaluelist.astype('float32')
    maxtemplist = maxtemplist.astype('float32')
    mintemplist = mintemplist.astype('float32')
    # print ecvaluelist.shape, maxtemplist.shape
    # x_scaled = preprocessing.scale(ecvaluelist)
    # ecvaluelist = x_scaled
    # max、min分开
    #ecvaluelist = numpy.array(ecvaluelist)
    maxecvaluelist = ecvaluelist
    # print minecvaluelist.shape
    # 为了统计准确率,提前分割数据集:这里只是为了计算训练前的准确率,训练后的准确率计算用不到这个分割;
    max_train, max_test, min_train, min_test = train_test_split(maxtemplist,
                                                                mintemplist,
                                                                test_size=0.33,
                                                                random_state=7)
    # 数据集标准化
    x_scaler = preprocessing.StandardScaler().fit(maxecvaluelist)
    max_scaler = x_scaler.transform(maxecvaluelist)
    maxecvaluelist = max_scaler
    maxscaler_file = os.path.join(outpath,
                                  'dem_maxscale' + str(forehours) + '.save')
    joblib.dump(x_scaler, maxscaler_file)
    # y_scaler = preprocessing.StandardScaler().fit(minecvaluelist)
    # min_scaler = y_scaler.transform(minecvaluelist)
    # minecvaluelist = min_scaler
    # minscaler_file = os.path.join(outpath, 'dem_minscale' + str(forehours) + '.save')
    # joblib.dump(y_scaler, minscaler_file)
    # xgboost,训练集和预测集分割:这里已经是标准化完的矩阵;
    x_train, x_test, y_train, y_test = train_test_split(maxecvaluelist,
                                                        maxtemplist,
                                                        test_size=0.33,
                                                        random_state=7)
    # u_train, u_test, v_train, v_test = train_test_split(minecvaluelist,
    #                                                     mintemplist,
    #                                                     test_size=0.33,
    #                                                     random_state=7)
    xgbtrain = xgboost.DMatrix(x_train, label=y_train)
    xgbtest = xgboost.DMatrix(x_test, label=y_test)
    # xgbtrain01 = xgboost.DMatrix(u_train, label=v_train)
    # xgbtest01 = xgboost.DMatrix(u_test, label=v_test)
    # xgbtrain.save_binary('train.buffer')
    # 特征选址
    # print x_train.shape, x_test.shape
    ff, pp = f_regression(x_train, y_train)
    # print ff, pp
    # 训练和验证的错误率
    watchlist = [(xgbtrain, 'xgbtrain'), (xgbtest, 'xgbeval')]
    watchlist01 = [(xgbtrain01, 'xgbtrain01'), (xgbtest01, 'xgbeval01')]
    params = {
        'booster': 'gbtree',
        'objective': 'reg:linear',  # 线性回归
        'gamma': 0.2,  # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
        'max_depth': 12,  # 构建树的深度,越大越容易过拟合
        'lambda': 2,  # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
        'subsample': 0.7,  # 随机采样训练样本
        'colsample_bytree': 0.7,  # 生成树时进行的列采样
        'min_child_weight': 3,
        # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
        # ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
        # 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
        'silent': 0,  # 设置成1则没有运行信息输出,最好是设置为0.
        'eta': 0.1,  # 如同学习率
        'seed': 1000,
        # 'nthread': 3,  # cpu 线程数
        # 'eval_metric': 'auc'
        'scale_pos_weight': 1
    }
    params01 = {
        'booster': 'gbtree',
        'objective': 'reg:linear',  # 线性回归
        'gamma': 0.2,  # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
        'max_depth': 12,  # 构建树的深度,越大越容易过拟合
        'lambda': 2,  # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
        'subsample': 0.7,  # 随机采样训练样本
        'colsample_bytree': 0.7,  # 生成树时进行的列采样
        'min_child_weight': 3,
        # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
        # ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
        # 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
        'silent': 0,  # 设置成1则没有运行信息输出,最好是设置为0.
        'eta': 0.1,  # 如同学习率
        'seed': 1000,
        # 'nthread': 3,  # cpu 线程数
        # 'eval_metric': 'auc'
        'scale_pos_weight': 1
    }
    plst = list(params.items())
    plst01 = list(params01.items())
    num_rounds = 99999
    # early_stopping_rounds当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练
    model = xgboost.train(plst,
                          xgbtrain,
                          num_rounds,
                          watchlist,
                          early_stopping_rounds=500)
    model01 = xgboost.train(plst01,
                            xgbtrain01,
                            num_rounds,
                            watchlist01,
                            early_stopping_rounds=500)
    # print model,watchlist
    preds = model.predict(xgbtest, ntree_limit=model.best_iteration)
    #preds01 = model01.predict(xgbtest01, ntree_limit=model.best_iteration)
    model.save_model(
        os.path.join(outpath, 'dem_maxtemp' + str(forehours) + '.model'))
    #model01.save_model(os.path.join(outpath, 'dem_mintemp' + str(forehours) + '.model'))
    # print preds, preds01
    # 训练后高温的RMSE,MAE
    mse = mean_squared_error(y_test, preds)
    rmse = math.sqrt(mse)
    mae = mean_absolute_error(y_test, preds, multioutput='uniform_average')
    bias = numpy.mean(y_test - preds)
    print("训练后最高气温MSE: %.4f" % mse)
    print("训练后最高气温RMSE: %.4f" % rmse)
    print("训练后最高气温MAE: %.4f" % mae)
    print("训练后最高气温bias: %.4f" % bias)
    # 训练后最高气温2度以内的准确率
    n = 0
    for x, y in zip(y_test, preds):
        if abs(x - y) < 2:
            n = n + 1
    accuracy2_after = float(n) / float(len(y_test))
    print("训练后最高气温2度的accuracy: %.4f" % accuracy2_after)
    n = 0
    for x, y in zip(y_test, preds):
        if abs(x - y) < 3:
            n = n + 1
    accuracy3_after = float(n) / float(len(y_test))
    print("训练后最高气温3度的accuracy: %.4f" % accuracy3_after)
    # 训练前高温的RMSE,MAE
    class_test = max_test.astype('float64')
    max_ec = (class_test[:, 0]) - 273.15
    mse0 = mean_squared_error(max_ec, y_test)
    rmse0 = math.sqrt(mse0)
    mae0 = mean_absolute_error(max_ec, y_test, multioutput='uniform_average')
    bias0 = numpy.mean(y_test - max_ec)
    print("训练前最高气温MSE: %.4f" % mse0)
    print("训练前最高气温RMSE: %.4f" % rmse0)
    print("训练前最高气温MAE: %.4f" % mae0)
    print("训练前最高气温BIAS: %.4f" % bias0)
    n = 0
    for x, y in zip(y_test, max_ec):
        if abs(x - y) < 2:
            n = n + 1
    accuracy2_before = float(n) / float(len(y_test))
    print("训练前最高气温2度的accuracy: %.4f" % accuracy2_before)
    n = 0
    for x, y in zip(y_test, max_ec):
        if abs(x - y) < 3:
            n = n + 1
    accuracy3_before = float(n) / float(len(y_test))
    print("训练前最高气温3度的accuracy: %.4f" % accuracy3_before)
    # 训练后低温的RMSE|MAE
    min_mse = mean_squared_error(v_test, preds01)
    min_rmse = math.sqrt(min_mse)
    min_mae = mean_absolute_error(v_test,
                                  preds01,
                                  multioutput='uniform_average')
    min_bias = numpy.mean(v_test - preds01)
    print("训练后最低气温MSE: %.4f" % min_mse)
    print("训练后最低气温RMSE: %.4f" % min_rmse)
    print("训练后最低气温MAE: %.4f" % min_mae)
    print("训练后最低气温BIAS: %.4f" % min_bias)
    # 训练后最低气温2度以内的准确率
    n = 0
    for x, y in zip(v_test, preds01):
        if abs(x - y) < 2:
            n = n + 1
    min_accuracy2_after = float(n) / float(len(v_test))
    print("训练后最低气温2度的accuracy: %.4f" % min_accuracy2_after)
    n = 0
    for x, y in zip(v_test, preds01):
        if abs(x - y) < 3:
            n = n + 1
    min_accuracy3_after = float(n) / float(len(v_test))
    print("训练后最低气温3度的accuracy: %.4f" % min_accuracy3_after)
    # 训练前低温的RMSE,MAE
    min_ec = min_test[:, 0] - 273.15
    min_mse0 = mean_squared_error(min_ec, v_test)
    min_rmse0 = math.sqrt(min_mse0)
    min_mae0 = mean_absolute_error(min_ec,
                                   v_test,
                                   multioutput='uniform_average')
    min_bias0 = numpy.mean(v_test - min_ec)
    print("训练前最低气温MSE: %.4f" % min_mse0)
    print("训练前最低气温RMSE: %.4f" % min_rmse0)
    print("训练前最低气温MAE: %.4f" % min_mae0)
    print("训练前最低气温BIAS: %.4f" % min_bias0)
    n = 0
    for x, y in zip(v_test, min_ec):
        if abs(x - y) < 2:
            n = n + 1
    min_accuracy2_before = float(n) / float(len(v_test))
    print("训练前最低气温2度的accuracy: %.4f" % min_accuracy2_before)
    n = 0
    for x, y in zip(v_test, min_ec):
        if abs(x - y) < 3:
            n = n + 1
    min_accuracy3_before = float(n) / float(len(v_test))
    print("训练前最低气温3度的accuracy: %.4f" % min_accuracy3_before)
    print str(rmse) + ',' + str(mae) + ',' + str(accuracy2_after) + ',' + str(
        accuracy3_after) + ',' + str(rmse0) + ',' + str(mae0) + ',' + str(
            accuracy2_before) + ',' + str(accuracy3_before) + ',' + str(
                bias) + ',' + str(bias0) + ',' + str(min_rmse) + ',' + str(
                    min_mae) + ',' + str(min_accuracy2_after) + ',' + str(
                        min_accuracy3_after) + ',' + str(
                            min_rmse0) + ',' + str(min_mae0) + ',' + str(
                                min_accuracy2_before) + ',' + str(
                                    min_accuracy3_before) + ',' + str(
                                        min_bias) + ',' + str(min_bias0)
    maxfile = os.path.join(outpath, 'demmaxt' + str(ll) + '.csv')
    maxtfw = open(maxfile, 'w')
    for pp in range(len(y_test)):
        maxtfw.write(
            str(max_ec[pp]) + ',' + str(preds[pp]) + ',' + str(y_test[pp]))
        maxtfw.write('\n')
    maxtfw.close()
    # 输出最低气温
    minfile = os.path.join(outpath, 'demmint' + str(ll) + '.csv')
    mintfw = open(minfile, 'w')
    for qq in range(len(v_test)):
        mintfw.write(
            str(min_ec[qq]) + ',' + str(preds01[qq]) + ',' + str(v_test[qq]))
        mintfw.write('\n')
    mintfw.close()
Beispiel #48
0
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument('--exp', dest='exp_name', required=True)
    ap.add_argument('--threshold', dest='threshold', type=int, default=20)

    args = ap.parse_args()

    bounds = (slice(100, 180), slice(90, 170))
    radar_elev, radar_lat, radar_lon = 1883, 41.151944, -104.806111
    proj = setupMapProjection(goshen_1km_proj, goshen_1km_gs)
    threshold = args.threshold
    exp_name = args.exp_name
    img_dir = "images-%s/ets_%ddBZ" % (exp_name, threshold)

    map = Basemap(**proj)
    radar_x, radar_y = map(radar_lon, radar_lat)

    obs_base = "hdf/KCYS/1km/goshen.hdfrefl2d"
    obs_times = np.array([int(f[-6:]) for f in glob.glob("%s*" % obs_base)])
    fcst_files = glob.glob(
        "/caps1/tsupinie/1km-control-%s/ena???.hdf014[47]00" % exp_name)
    fcst_files.extend(
        glob.glob("/caps1/tsupinie/1km-control-%s/ena???.hdf01[5678]?00" %
                  exp_name))

    ens_refl, ens_members, ens_times = loadAndInterpolateEnsemble(
        fcst_files, ['pt', 'p', 'qr', 'qs', 'qh'],
        computeReflectivity,
        "/caps1/tsupinie/1km-control-20120712/ena001.hdfgrdbas", {
            'z_base': radar_elev,
            'y_base': radar_y,
            'x_base': radar_x,
            'elev_angle': 0.5
        },
        agl=False,
        wrap=True)  #, aggregator=lambda x: np.mean(x, axis=0))

    #   ens_refl, ens_members, ens_times = loadAndInterpolateEnsemble(fcst_files, ['pt', 'p', 'qr', 'qs', 'qh'], computeReflectivity, "/caps1/tsupinie/1km-control-20120712/ena001.hdfgrdbas",
    #       {'z_base':radar_elev, 'y_base':radar_y, 'x_base':radar_x, 'elev_angle':0.5}, agl=False, wrap=True)

    #   ens_refl_mean = ens_refl.mean(axis=0)

    refl_ens_mean = probMatchMean(ens_refl)

    bounds_rev = [slice(None), slice(None)]
    bounds_rev.extend(bounds[::-1])
    bounds_rev = tuple(bounds_rev)

    #   refl_ens_mean = refl_ens_mean[bounds_rev[1:]]
    #   ens_refl = ens_refl[bounds_rev]

    all_ets = np.empty((len(ens_members), len(ens_times)), dtype=np.float32)
    all_ets_mean = np.empty((len(ens_times), ), dtype=np.float32)

    all_confusion = np.empty(ens_refl.shape, dtype=np.int32)
    all_confusion_mean = np.empty(refl_ens_mean.shape, dtype=np.int32)

    for wdt, time in enumerate(ens_times):
        idx = np.argmin(np.abs(obs_times - time))
        if obs_times[idx] > time and idx > 0:
            idx -= 1

        bounds_obs = [0]
        bounds_obs.extend(bounds[::-1])
        bounds_obs = tuple(bounds_obs)

        obs_file_name = "%s%06d" % (obs_base, obs_times[idx])
        obs_hdf = nio.open_file(obs_file_name, mode='r', format='hdf')
        obs_refl = obs_hdf.variables['refl2d'][0]  #[bounds_obs]

        all_ets_mean[wdt], all_confusion_mean[wdt] = ETS(
            refl_ens_mean[wdt], obs_refl, threshold)

        gs_x, gs_y = goshen_1km_gs
        for lde, member in enumerate(ens_members):
            all_ets[lde,
                    wdt], all_confusion[lde,
                                        wdt] = ETS(ens_refl[lde, wdt],
                                                   obs_refl, threshold)


#           nx, ny = ens_refl[lde, wdt].shape
#           xs, ys = np.meshgrid( gs_x * np.arange(nx), gs_y * np.arange(ny) )
#           pylab.clf()
#           pylab.contourf(xs, ys, ens_refl[lde, wdt], levels=np.arange(10, 80, 10))
#           pylab.colorbar()
#           pylab.savefig("sweep_interp_%s_%06d.png" % (member, time))

        nx, ny = refl_ens_mean[wdt].shape
        xs, ys = np.meshgrid(gs_x * np.arange(nx), gs_y * np.arange(ny))
        pylab.clf()
        pylab.contourf(xs,
                       ys,
                       refl_ens_mean[wdt],
                       levels=np.arange(10, 80, 10))
        pylab.colorbar()
        pylab.savefig("%s/sweep_interp_mean_%06d.png" % (img_dir, time))

    cPickle.dump(all_ets_mean, open("%s_%ddBZ.pkl" % (exp_name, threshold),
                                    'w'), -1)

    time_mean_ets = all_ets.mean(axis=1)
    sort_mean_idxs = np.argsort(time_mean_ets)

    pylab.clf()
    for lde, member in enumerate(ens_members):
        print sort_mean_idxs[lde] + 1, time_mean_ets[sort_mean_idxs[lde]]
        pylab.plot(ens_times, all_ets[lde], 'r-', lw=0.75)

    pylab.plot(ens_times, all_ets_mean, 'k-', lw=1.5)

    y_lb, y_ub = pylab.ylim()
    pylab.plot([14400, 14400], [y_lb, y_ub], 'k--', lw=0.5)

    pylab.ylim([y_lb, y_ub])
    pylab.xlim([10800, 18000])

    pylab.xlabel("Time (s)")
    pylab.ylabel("ETS")

    pylab.savefig("%s/ets_swath_mm.png" % img_dir)
    pylab.close()

    for wdt, time in enumerate(ens_times):
        fudge = 16
        if threshold == 20:
            fudge = 32

        plotConfusion(
            all_confusion_mean[wdt],
            map,
            goshen_1km_gs,
            "Confusion for Reflectivity of the Ensemble Mean at time %06d" %
            time,
            "%s/confusion_mean_%06d.png" % (img_dir, time),
            inset=flux_boxes[exp_name][wdt],
            fudge=fudge)

        #      for lde, member in enumerate(ens_members):
        #          plotConfusion(all_confusion[lde, wdt], map, goshen_1km_gs, "Confusion for Reflectivity of Member %s at time %06d" % (member, time), "%s/confusion_ena%s_zoom_%06d.png" % (img_dir, member, time))

        gc.collect()

    return
if latbl == lattr or lonbl == lontr:
    sys.exit('lat and lon values must be different')
else:
    if latbl < lattr:
        latbl, lattr = lattr, latbl
    if lonbl > lontr:
        lonbl, lontr = lontr, lonbl

# read in analysis files

a_fili = "analysis_gfs_4_%s_%s00_000.nc" % (init_dt[:8], init_dt[8:10])

# read pressure levels from analysis file

analysis = nio.open_file(diri + a_fili)

level_dim = analysis.variables["UGRD_P0_L100_GLL0"].dimensions[0]

levs_p1 = analysis.variables[level_dim]
levs_p = ['{:.0f}'.format(x) for x in levs_p1[:] / 100.0]
del levs_p1

# identify level index

lev1_index = levs_p.index(lev1)
lev2_index = levs_p.index(lev2)

# read in lat

lat1 = analysis.variables["lat_0"]
Beispiel #50
0
parser = argparse.ArgumentParser()
parser.add_argument('--data-file-SSTAYYC', dest='SSTAYYC_file')
parser.add_argument('--data-file-SSTAVAR', dest='SSTAVAR_file')
parser.add_argument('--domain-file', dest='domain_file')
parser.add_argument('--output-dir', dest='output_dir')
parser.add_argument('--casename', dest='casename')
parser.add_argument('--selected-month', type=int)

args = parser.parse_args()

print(str(args))

selected_month = args.selected_month

g = Nio.open_file(args.domain_file, "r")

lon = g.variables["xc"][1, :]                   #-- read clon
lat = g.variables["yc"][:, 1]                   #-- read clat

lon = ext_axis(lon)

print(lon)
f = Nio.open_file(args.SSTAYYC_file, "r")
data = f.variables["SSTAYYC"][selected_month-1, :, :]
missing_value = f.variables["SSTAYYC"]._FillValue[0]
data[np.isnan(data)] = missing_value

data = ext(data)
f.close()
Beispiel #51
0
import Ngl, Nio
import numpy as np

p0mb = 101325.
p0mb1 = 1013.25
interp = 1
extrap = False
pnew = [800., 750.]
lev = 0
f = Nio.open_file("/home/yumeng/realthesis/LowRes/LowRes_200610.01_tracer.nc")
# interpolation from sigma coordinate to pressure coordinate
# get parameters and variables for interpolation
hyam = f.variables["hyam"][:] / p0mb
hybm = f.variables["hybm"][:]
PS = f.variables["aps"][:, :, :]

f = Nio.open_file("/home/yumeng/realthesis/Uniform/AMRDUST.nc")
DU_CI = f.variables["CI"][:, :, :, :] * 1e6
DU_AI = f.variables["AI"][:, :, :, :] * 1e6
lon_Uni = f.variables["lon"][:]
lat_Uni = f.variables["lat"][:]
# start the interpolation
NewTracer_CI_Uni = Ngl.vinth2p(DU_CI[:, :, :, :], hyam, hybm, pnew,
                               PS[:, :, :], interp, p0mb1, 1, extrap)
NewTracer_AI_Uni = Ngl.vinth2p(DU_AI[:, :, :, :], hyam, hybm, pnew,
                               PS[:, :, :], interp, p0mb1, 1, extrap)
NewTracer_AI_Uni = np.ma.masked_where(NewTracer_AI_Uni == 1.e30,
                                      NewTracer_AI_Uni)
print(np.max(NewTracer_AI_Uni))

# day 10 to 20
Beispiel #52
0
import Nio

#
# Open a netCDF file containing the geodesic grid and data on that grid.
#
# This grid came to us via Dave Randall, Todd Ringler, and Ross Heikes of
# CSU. The data for this mesh were originally downloaded from:
#
#   http://kiwi.atmos.colostate.edu:16080/BUGS/projects/geodesic/
#
# The above URL doesn't seem to be active anymore. Here's a new URL:
#
#   http://kiwi.atmos.colostate.edu/BUGS/geodesic/interpolate.html
#
dirc = Ngl.pynglpath("data")
cfile = Nio.open_file(dirc + "/cdf/hswm_d000000p000.g2.nc", "r")

#
#  Read the grid centers and the kinetic energy into local variables.
#
r2d = 57.2957795  # radians to degrees
x = cfile.variables["grid_center_lon"][:] * r2d
y = cfile.variables["grid_center_lat"][:] * r2d
cx = cfile.variables["grid_corner_lon"][:] * r2d
cy = cfile.variables["grid_corner_lat"][:] * r2d
ke = cfile.variables["kinetic_energy"][2, :]

#
#  Open a workstation.
#
wks_type = "png"
            infile = os.path.join(
                indir, '%s.%s.clim.%04d-%04d.nc' % (case, var, yrstart, yrend))
        else:
            infile = os.path.join(
                indir, case,
                '%s.%s.clim.%04d-%04d.nc' % (case, var, yrstart, yrend))

        if os.path.isfile(infile):  # is file/variable available?

            for kz in range(len(dpthsin)):
                zdpth = dpthsin[kz]

                #----------------------------------------------------------------------
                # read model data
                print('reading %s' % (os.path.basename(infile)))
                fpmod = Nio.open_file(infile, 'r')
                zt = fpmod.variables['z_t'][:] / 100.  # cm -> m
                zw = fpmod.variables['z_w'][:] / 100.  # cm -> m
                dz = fpmod.variables['dz'][:] / 100.  # cm -> m
                area = fpmod.variables['TAREA'][:] * 1.e-4  # cm^2 -> m^2
                variables[var]['fillvalue'] = fpmod.variables[var]._FillValue[
                    0]

                # read specified depth
                zind = N.searchsorted(zt, zdpth).item()
                dpthmod = int(zt[zind])
                data['mod'][var] = fpmod.variables[var][:, zind, :, :]

                fpmod.close()  # close model file

                # compute temporal mean
Beispiel #54
0
def main(argv):

    # Get command line stuff and store in a dictionary
    s = 'verbose sumfile= indir= timeslice= nPC= sigMul= minPCFail= minRunFail= numRunFile= printVarTest'
    optkeys = s.split()
    try:
        opts, args = getopt.getopt(argv, "h", optkeys)
    except getopt.GetoptError:
        pyEnsLib.CECT_usage()
        sys.exit(2)

    # Set the default value for options
    opts_dict = {}
    opts_dict['timeslice'] = 1
    opts_dict['nPC'] = 50
    opts_dict['sigMul'] = 2
    opts_dict['verbose'] = False
    opts_dict['minPCFail'] = 3
    opts_dict['minRunFail'] = 2
    opts_dict['numRunFile'] = 3
    opts_dict['printVarTest'] = False
    # Call utility library getopt_parseconfig to parse the option keys
    # and save to the dictionary
    caller = 'CECT'
    gmonly = False
    opts_dict = pyEnsLib.getopt_parseconfig(opts, optkeys, caller, opts_dict)

    # Print out timestamp, input ensemble file and new run directory
    dt = datetime.now()
    verbose = opts_dict['verbose']
    print '--------pyCECT--------'
    print ' '
    print dt.strftime("%A, %d. %B %Y %I:%M%p")
    print ' '
    print 'Ensemble summary file = ' + opts_dict['sumfile']
    print ' '
    print 'Cam output directory = ' + opts_dict['indir']
    print ' '
    print ' '

    # Open all input files
    ifiles = []
    in_files_temp = os.listdir(opts_dict['indir'])
    in_files = sorted(in_files_temp)
    in_files_random = pyEnsLib.Random_pickup(in_files, opts_dict)
    for frun_file in in_files_random:
        if (os.path.isfile(opts_dict['indir'] + '/' + frun_file)):
            ifiles.append(
                Nio.open_file(opts_dict['indir'] + '/' + frun_file, "r"))
        else:
            print "COULD NOT LOCATE FILE " + opts_dict[
                'indir'] + frun_file + " EXISTING"
            sys.exit()

    # Read all variables from the ensemble summary file
    ens_var_name, ens_avg, ens_stddev, ens_rmsz, ens_gm, num_3d, mu_gm, sigma_gm, loadings_gm, sigma_scores_gm = pyEnsLib.read_ensemble_summary(
        opts_dict['sumfile'])

    if len(ens_rmsz) == 0:
        gmonly = True
    # Add ensemble rmsz and global mean to the dictionary "variables"
    variables = {}
    if not gmonly:
        for k, v in ens_rmsz.iteritems():
            pyEnsLib.addvariables(variables, k, 'zscoreRange', v)

    for k, v in ens_gm.iteritems():
        pyEnsLib.addvariables(variables, k, 'gmRange', v)

    # Get 3d variable name list and 2d variable name list seperately
    var_name3d = []
    var_name2d = []
    for vcount, v in enumerate(ens_var_name):
        if vcount < num_3d:
            var_name3d.append(v)
        else:
            var_name2d.append(v)

    # Get ncol and nlev value
    npts3d, npts2d, is_SE = pyEnsLib.get_ncol_nlev(ifiles[0])

    # Compare the new run and the ensemble summary file to get rmsz score
    results = {}
    countzscore = np.zeros(len(ifiles), dtype=np.int32)
    countgm = np.zeros(len(ifiles), dtype=np.int32)
    if not gmonly:
        for fcount, fid in enumerate(ifiles):
            otimeSeries = fid.variables
            for var_name in ens_var_name:
                orig = otimeSeries[var_name]
                Zscore, has_zscore = pyEnsLib.calculate_raw_score(
                    var_name, orig[opts_dict['timeslice']], npts3d, npts2d,
                    ens_avg, ens_stddev, is_SE)
                if has_zscore:
                    # Add the new run rmsz zscore to the dictionary "results"
                    pyEnsLib.addresults(results, 'zscore', Zscore, var_name,
                                        'f' + str(fcount))

        # Evaluate the new run rmsz score if is in the range of the ensemble summary rmsz zscore range
        for fcount, fid in enumerate(ifiles):
            countzscore[fcount] = pyEnsLib.evaluatestatus(
                'zscore', 'zscoreRange', variables, 'ens', results,
                'f' + str(fcount))

    # Calculate the new run global mean
    mean3d, mean2d = pyEnsLib.generate_global_mean_for_summary(
        ifiles, var_name3d, var_name2d, opts_dict['timeslice'], is_SE, verbose)
    means = np.concatenate((mean3d, mean2d), axis=0)

    # Add the new run global mean to the dictionary "results"
    for i in range(means.shape[1]):
        for j in range(means.shape[0]):
            pyEnsLib.addresults(results, 'means', means[j][i], ens_var_name[j],
                                'f' + str(i))

    # Evaluate the new run global mean if it is in the range of the ensemble summary global mean range
    for fcount, fid in enumerate(ifiles):
        countgm[fcount] = pyEnsLib.evaluatestatus('means', 'gmRange',
                                                  variables, 'gm', results,
                                                  'f' + str(fcount))

    # Calculate the PCA scores of the new run
    new_scores = pyEnsLib.standardized(means, mu_gm, sigma_gm, loadings_gm)
    pyEnsLib.comparePCAscores(ifiles, new_scores, sigma_scores_gm, opts_dict)

    # Print out
    if opts_dict['printVarTest']:
        print '*********************************************** '
        print 'Variable-based testing (for reference only - not used to determine pass/fail)'
        print '*********************************************** '
        for fcount, fid in enumerate(ifiles):
            print ' '
            print 'Run ' + str(fcount + 1) + ":"
            print ' '
            if not gmonly:
                print '***' + str(countzscore[fcount]), " of " + str(
                    len(ens_var_name)
                ) + ' variables are outside of ensemble RMSZ distribution***'
                pyEnsLib.printsummary(results, 'ens', 'zscore', 'zscoreRange',
                                      (fcount), variables, 'RMSZ')
                print ' '
            print '***' + str(countgm[fcount]), " of " + str(
                len(ens_var_name)
            ) + ' variables are outside of ensemble global mean distribution***'
            pyEnsLib.printsummary(results, 'gm', 'means', 'gmRange', fcount,
                                  variables, 'global mean')
            print ' '
            print '----------------------------------------------------------------------------'
Beispiel #55
0
import numpy as np

# Test if file exists
filename = "3B-MO.MS.MRG.3IMERG.20140701-S000000-E235959.07.V03D.HDF5"
if (not os.path.exists(filename)):
    print("You do not have the necessary '{}' HDF5 file to run this example.".
          format(filename))
    print(
        "You need to supply your own HDF5 data or download the file from http://www.ncl.ucar.edu/Applications/Data/"
    )
    sys.exit()

# Be sure to read this file using the advanced file structure.
opt = Nio.options()
opt.FileStructure = 'advanced'
f = Nio.open_file(filename, "r", options=opt)

# Open group "Grid" which will now look like a regular NioFile
g = f.groups['Grid']

# Read data from this group
precip = g.variables['precipitation']
lat = g.variables['lat'][:]
lon = g.variables['lon'][:]

# Print the metadata of precip, and min/max values
print(precip)
print("min/max = {:g} / {:g}".format(precip[:].min(), precip[:].max()))

wks_type = "png"
wks = Ngl.open_wks(wks_type, "mask1")
Beispiel #56
0
import Ngl
import os.path
#This script takes Grib Files from NAM simulations and converts them
#Into .dat format for  Newmanv3.1
t=0
Dates = ["20160609","20160610","20160611"]
Initials = ["0000","0600","1200","1800"]
Hours = ["000","001","002","003","004","005"]

for day in Dates:
    for init in Initials:
        for hr in Hours:
            filename = "nam_218_"+day+"_"+init+"_"+hr+".grb2"
            print filename
            
            file = Nio.open_file(filename,"r")
            names = file.variables.keys()

            # level 30 is the 800 millibar level
            uvar = file.variables["UGRD_P0_L100_GLC0"][:,:,:]
            uvar = numpy.squeeze(uvar[30,:,:])
            dim = numpy.shape(uvar)

            vvar = file.variables["VGRD_P0_L100_GLC0"][:,:,:]
            vvar = numpy.squeeze(vvar[30,:,:])

            #hardcoded origin to save time
            iorg = 195
            jorg = 155
            #choose -102 +103, want 2000km domain, with 500km bufferzone 
            #(total velocity field defined on 2500km^2)  centered at (36.7964,-120.822), 
Beispiel #57
0
    res.cnLevels = numpy.arange(-12, 42, 2)

    res.cnFillOn = True
    res.cnFillPalette = "BlueYellowRed"

    res.cnLinesOn = False
    res.cnLineLabelsOn = False
    res.cnInfoLabelOn = False

    res.lbOrientation = "Horizontal"

    return (res)


# Read in zonal winds
f = Nio.open_file("$NCARG_ROOT/lib/ncarg/data/cdf/uv300.nc", "r")
u = f.variables["U"]
lat = f.variables["lat"]
lon = f.variables["lon"]

# Start the graphics
wks_type = "png"
wks = Ngl.open_wks(wks_type, "./output/ngl_report/newcolor6")

res = set_common_resources()

# Set resources for contour/map plot
bres = set_common_resources()

bres.mpFillOn = False
bres.tiMainString = "Use transparency to emphasize a particular area"
Beispiel #58
0
def itp_dis():
    if state.get() == 'normal':
        if fnmatch.fnmatch(ent_agr_z.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_ind_z.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_pow_z.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_res_z.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_tra_z.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_agr_t.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_ind_t.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_pow_t.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_res_t.get().replace(' ', '').replace('.',''), '*[!0-9]*') or fnmatch.fnmatch(ent_tra_t.get().replace(' ', '').replace('.',''), '*[!0-9]*'):
            tk.messagebox.showerror(title='error',message='input format must be 0.23 0.34 ...')
        elif not len(ent_agr_z.get().strip().split(' '))==len(ent_ind_z.get().strip().split(' '))==len(ent_pow_z.get().strip().split(' '))==len(ent_res_z.get().strip().split(' '))==len(ent_tra_z.get().strip().split(' ')):
            tk.messagebox.showerror(title='error',message='number of z factor must be equal')
        elif not len(ent_agr_t.get().strip().split(' '))==len(ent_ind_t.get().strip().split(' '))==len(ent_pow_t.get().strip().split(' '))==len(ent_res_t.get().strip().split(' '))==len(ent_tra_t.get().strip().split(' '))==24:
            tk.messagebox.showerror(title='error',message='number of t factor must be 24')            
        else: 
            agr_z=[float(i) for i in ent_agr_z.get().strip().split(' ')]
            ind_z=[float(i) for i in ent_ind_z.get().strip().split(' ')]
            pow_z=[float(i) for i in ent_pow_z.get().strip().split(' ')]
            res_z=[float(i) for i in ent_res_z.get().strip().split(' ')]
            tra_z=[float(i) for i in ent_tra_z.get().strip().split(' ')]
            sec_z=[agr_z,ind_z,pow_z,res_z,tra_z,]
            agr_t=[float(i) for i in ent_agr_t.get().strip().split(' ')]
            ind_t=[float(i) for i in ent_ind_t.get().strip().split(' ')]
            pow_t=[float(i) for i in ent_pow_t.get().strip().split(' ')]
            res_t=[float(i) for i in ent_res_t.get().strip().split(' ')]
            tra_t=[float(i) for i in ent_tra_t.get().strip().split(' ')]
            sec_t=[agr_t,ind_t,pow_t,res_t,tra_t,]
            
            f_inp=Nio.open_file(ent_inp.get(),format='nc')
            lon_inp=f_inp.variables['XLONG'][0,:]
            lat_inp=f_inp.variables['XLAT'][0,:]
            time_inp=f_inp.variables['Times'][:][0]
            time_inp=''.join([i.decode('utf-8') for i in time_inp]).split('_')[0]
            f_inp.close()

        #put all the distributed meic species into meic_spec_emis:

            meic_spec_emis=[]
        #inorganic gas: ton/(grid.month) to mole/(km2.h)
            for spec,M in zip(['CO','CO2','NH3','NOx','SO2',],[28,44,17,46,64]):
                f_post=Nio.open_file(ent_dir.get()+'/merged/'+spec+'.nc')
                lon=f_post.variables['lon'][:]
                lat=f_post.variables['lat'][:]
                section=[(f_post.variables[sec][:,:]*1e6)/(ll_area(lat,0.25)*30*24*M) for sec in ['act','idt','pwr','rdt','tpt',]]
                f_post.close()
                sections=[meic2wrf(lon_inp,lat_inp,lon,lat,emis,) for emis in section]
                c=[sec2zt(i,j,k) for i,j,k in zip(sections,sec_z,sec_t)]
                c=sum(c)
                meic_spec_emis.append(c)
        #organic gas: million_mole/(grid.month) to mole/(km2.h)
            for spec in ['ALD','CSL','ETH','GLY','HC3','HC5','HC8','HCHO','ISO','KET','MACR','MGLY','MVK','NR','NVOL',
            'OL2','OLI','OLT','ORA1','ORA2','TOL','XYL',]:
                f_post=Nio.open_file(ent_dir.get()+'/merged/'+spec+'.nc')
            #lon=f_post.variables['lon'][:]
            #lat=f_post.variables['lat'][:]
                section=[(f_post.variables[sec][:,:]*1e6)/(ll_area(lat,0.25)*30*24) for sec in ['act','idt','pwr','rdt','tpt',]]
                f_post.close()
                sections=[meic2wrf(lon_inp,lat_inp,lon,lat,emis,) for emis in section]
                c=[sec2zt(i,j,k) for i,j,k in zip(sections,sec_z,sec_t)]
                c=sum(c)
                meic_spec_emis.append(c)
        #aerosol: ton/(grid.month) to ug/(m2.s)
            for spec in ['BC','OC','PM2.5','PMcoarse',]:
                f_post=Nio.open_file(ent_dir.get()+'/merged/'+spec+'.nc')
            #lon=f_post.variables['lon'][:]
            #lat=f_post.variables['lat'][:]
                section=[(f_post.variables[sec][:,:]*1e6)/(ll_area(lat,0.25)*30*24*3600) for sec in ['act','idt','pwr','rdt','tpt',]]
                f_post.close()
                sections=[meic2wrf(lon_inp,lat_inp,lon,lat,emis,) for emis in section]
                c=[sec2zt(i,j,k) for i,j,k in zip(sections,sec_z,sec_t)]
                c=sum(c)
                meic_spec_emis.append(c)

            #meic emission to RADM2 chemistry scheme:

            wrf_spec_emis=[np.zeros(meic_spec_emis[0][:].shape, dtype='float32')]*31

            wrf_spec_emis[0]=meic_spec_emis[0] #wrf: CO
            wrf_spec_emis[1]=meic_spec_emis[2] #wrf: NH3
            wrf_spec_emis[2]=meic_spec_emis[3]*0.9 #wrf: NO
            wrf_spec_emis[3]=meic_spec_emis[3]*0.1 #wrf: NO2
            wrf_spec_emis[4]=meic_spec_emis[4]*0.9 #wrf: SO2
            wrf_spec_emis[5]=meic_spec_emis[5] #wrf: ALD
            wrf_spec_emis[6]=meic_spec_emis[6] #wrf: CSL
            wrf_spec_emis[7]=meic_spec_emis[7] #wrf: ETH
            wrf_spec_emis[8]=meic_spec_emis[9] #wrf: HC3
            wrf_spec_emis[9]=meic_spec_emis[10] #wrf: HC5
            wrf_spec_emis[10]=meic_spec_emis[11] #wrf: HC8
            wrf_spec_emis[11]=meic_spec_emis[12] #wrf: HCHO
            wrf_spec_emis[12]=meic_spec_emis[13] #wrf: ISO
            wrf_spec_emis[13]=meic_spec_emis[14] #wrf: KET
            wrf_spec_emis[14]=meic_spec_emis[20]*1.1 #wrf: OL2
            wrf_spec_emis[15]=meic_spec_emis[21]*1.1 #wrf: OLI
            wrf_spec_emis[16]=meic_spec_emis[22]*1.1 #wrf: OLT
            wrf_spec_emis[17]=meic_spec_emis[24] #wrf: ORA2
            wrf_spec_emis[18]=meic_spec_emis[25]*1.1 #wrf: TOL
            wrf_spec_emis[19]=meic_spec_emis[26]*1.1 #wrf: XYL
            wrf_spec_emis[20]=meic_spec_emis[27]*0.2 #wrf: ECi
            wrf_spec_emis[21]=meic_spec_emis[27]*0.8 #wrf: ECj
            wrf_spec_emis[22]=meic_spec_emis[28]*0.2 #wrf: ORGi
            wrf_spec_emis[23]=meic_spec_emis[28]*0.8 #wrf: ORGj
            wrf_spec_emis[24]=meic_spec_emis[29]-meic_spec_emis[28]-meic_spec_emis[27]*0.2 #wrf: PM25i
            wrf_spec_emis[25]=meic_spec_emis[29]-meic_spec_emis[28]-meic_spec_emis[27]*0.8 #wrf: PM25j
            wrf_spec_emis[26]=meic_spec_emis[30]*0.8 #wrf: PM10
            wrf_spec_emis[27]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: SO4i
            wrf_spec_emis[28]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: SO4j
            wrf_spec_emis[29]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: NO3i
            wrf_spec_emis[30]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: NO3j

            #generate wrfchemi_00z_d01 anthropogenic emission data for wrf-chem model run:
            if os.path.exists(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1]):
                os.remove(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1])
                f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1],'c',format='nc')

                f_chem.create_dimension('Time',None)
                f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
                f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
                f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
                f_chem.create_dimension('DateStrLen',19)

                f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
                for i,time in enumerate([time_inp+'_00:00:00',time_inp+'_01:00:00',time_inp+'_02:00:00',time_inp+'_03:00:00',time_inp+'_04:00:00',
            time_inp+'_05:00:00',time_inp+'_06:00:00',time_inp+'_07:00:00',time_inp+'_08:00:00',time_inp+'_09:00:00',time_inp+'_10:00:00',
            time_inp+'_11:00:00',]):
                    f_chem.variables['Times'][i]=list(time) #split the string to char

                for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                    f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                    f_chem.variables[LL][:]=ll

                radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
                radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

                for gas in radm_gas:
                    f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[gas].FieldType = np.int16(104)
                    f_chem.variables[gas].MemoryOrder = 'XYZ'
                    f_chem.variables[gas].description = 'EMISSIONS'
                    f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                    f_chem.variables[gas].stagger = 'Z'
                    #f_chem.variables[gas].ordinates = 'XLONG XLAT'
                for aerosol in radm_aerosol:
                    f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[aerosol].FieldType = np.int16(104)
                    f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                    f_chem.variables[aerosol].description = 'EMISSIONS'
                    f_chem.variables[aerosol].units = 'ug/m3 m/s'
                    f_chem.variables[aerosol].stagger = 'Z'
                    #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

                radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
            'E_SO4J','E_NO3I','E_NO3J',]

                for i,spec in enumerate(radm_spec):
                    f_chem.variables[spec][:] = wrf_spec_emis[i][0:12,:,:,:] #dimension need to be matched with the variable defination

                f_chem.close()
                os.rename(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1])
            else:
                f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1],'c',format='nc')
                f_chem.create_dimension('Time',None)
                f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
                f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
                f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
                f_chem.create_dimension('DateStrLen',19)

                f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
                for i,time in enumerate([time_inp+'_00:00:00',time_inp+'_01:00:00',time_inp+'_02:00:00',time_inp+'_03:00:00',time_inp+'_04:00:00',
            time_inp+'_05:00:00',time_inp+'_06:00:00',time_inp+'_07:00:00',time_inp+'_08:00:00',time_inp+'_09:00:00',time_inp+'_10:00:00',
            time_inp+'_11:00:00',]):
                    f_chem.variables['Times'][i]=list(time) #split the string to char

                for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                    f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                    f_chem.variables[LL][:]=ll

                radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
                radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

                for gas in radm_gas:
                    f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[gas].FieldType = np.int16(104)
                    f_chem.variables[gas].MemoryOrder = 'XYZ'
                    f_chem.variables[gas].description = 'EMISSIONS'
                    f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                    f_chem.variables[gas].stagger = 'Z'
                    #f_chem.variables[gas].ordinates = 'XLONG XLAT'
                for aerosol in radm_aerosol:
                    f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[aerosol].FieldType = np.int16(104)
                    f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                    f_chem.variables[aerosol].description = 'EMISSIONS'
                    f_chem.variables[aerosol].units = 'ug/m3 m/s'
                    f_chem.variables[aerosol].stagger = 'Z'
                    #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

                radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
            'E_SO4J','E_NO3I','E_NO3J',]

                for i,spec in enumerate(radm_spec):
                    f_chem.variables[spec][:] = wrf_spec_emis[i][0:12,:,:,:] #dimension need to be matched with the variable defination

                f_chem.close()
                os.rename(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1])
            
            #generate wrfchemi_12z_d01 anthropogenic emission data for wrf-chem model run:
            if os.path.exists(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1]):
                os.remove(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1])
                f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1],'c',format='nc')

                f_chem.create_dimension('Time',None)
                f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
                f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
                f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
                f_chem.create_dimension('DateStrLen',19)

                f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
                for i,time in enumerate([time_inp+'_12:00:00',time_inp+'_13:00:00',time_inp+'_14:00:00',time_inp+'_15:00:00',time_inp+'_16:00:00',
            time_inp+'_17:00:00',time_inp+'_18:00:00',time_inp+'_19:00:00',time_inp+'_20:00:00',time_inp+'_21:00:00',time_inp+'_22:00:00',
            time_inp+'_23:00:00',]):
                    f_chem.variables['Times'][i]=list(time)

                for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                    f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                    f_chem.variables[LL][:]=ll

                radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
                radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

                for gas in radm_gas:
                    f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[gas].FieldType = np.int16(104)
                    f_chem.variables[gas].MemoryOrder = 'XYZ'
                    f_chem.variables[gas].description = 'EMISSIONS'
                    f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                    f_chem.variables[gas].stagger = 'Z'
                    #f_chem.variables[gas].ordinates = 'XLONG XLAT'
                for aerosol in radm_aerosol:
                    f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[aerosol].FieldType = np.int16(104)
                    f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                    f_chem.variables[aerosol].description = 'EMISSIONS'
                    f_chem.variables[aerosol].units = 'ug/m3 m/s'
                    f_chem.variables[aerosol].stagger = 'Z'
                    #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

                radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
            'E_SO4J','E_NO3I','E_NO3J',]

                for i,spec in enumerate(radm_spec):
                    f_chem.variables[spec][:] = wrf_spec_emis[i][12:24,:,:,:] #dimension need to be matched with the variable defination

                f_chem.close()
                os.rename(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1])
            else:
                f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1],'c',format='nc')
                f_chem.create_dimension('Time',None)
                f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
                f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
                f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
                f_chem.create_dimension('DateStrLen',19)

                f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
                for i,time in enumerate([time_inp+'_12:00:00',time_inp+'_13:00:00',time_inp+'_14:00:00',time_inp+'_15:00:00',time_inp+'_16:00:00',
            time_inp+'_17:00:00',time_inp+'_18:00:00',time_inp+'_19:00:00',time_inp+'_20:00:00',time_inp+'_21:00:00',time_inp+'_22:00:00',
            time_inp+'_23:00:00',]):
                    f_chem.variables['Times'][i]=list(time)

                for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                    f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                    f_chem.variables[LL][:]=ll

                radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
                radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

                for gas in radm_gas:
                    f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[gas].FieldType = np.int16(104)
                    f_chem.variables[gas].MemoryOrder = 'XYZ'
                    f_chem.variables[gas].description = 'EMISSIONS'
                    f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                    f_chem.variables[gas].stagger = 'Z'
                    #f_chem.variables[gas].ordinates = 'XLONG XLAT'
                for aerosol in radm_aerosol:
                    f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                    f_chem.variables[aerosol].FieldType = np.int16(104)
                    f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                    f_chem.variables[aerosol].description = 'EMISSIONS'
                    f_chem.variables[aerosol].units = 'ug/m3 m/s'
                    f_chem.variables[aerosol].stagger = 'Z'
                    #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

                radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
             'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
            'E_SO4J','E_NO3I','E_NO3J',]

                for i,spec in enumerate(radm_spec):
                    f_chem.variables[spec][:] = wrf_spec_emis[i][12:24,:,:,:] #dimension need to be matched with the variable defination

                f_chem.close()
                os.rename(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1])

    elif state.get() =='disable':
        f_inp=Nio.open_file(ent_inp.get(),format='nc')
        lon_inp=f_inp.variables['XLONG'][0,:]
        lat_inp=f_inp.variables['XLAT'][0,:]
        time_inp=f_inp.variables['Times'][:][0]
        time_inp=''.join([i.decode('utf-8') for i in time_inp]).split('_')[0]
        f_inp.close()
                                     
    #put all the distributed meic species into meic_spec_emis:
        meic_spec_emis=[]
    #inorganic gas: ton/(grid.month) to mole/(km2.h)
        for spec,M in zip(['CO','CO2','NH3','NOx','SO2',],[28,44,17,46,64]):
            f_post=Nio.open_file(ent_dir.get()+'/merged/'+spec+'.nc')
            lon=f_post.variables['lon'][:]
            lat=f_post.variables['lat'][:]
            section=[(f_post.variables[sec][:,:]*1e6)/(ll_area(lat,0.25)*30*24*M) for sec in ['act','idt','pwr','rdt','tpt',]]
            f_post.close()
            sections=[meic2wrf(lon_inp,lat_inp,lon,lat,emis,) for emis in section]
            c=[sec2zt(i,j,k) for i,j,k in zip(sections,sec_z_d,sec_t_d)]
            c=sum(c)
            meic_spec_emis.append(c)
    #organic gas: million_mole/(grid.month) to mole/(km2.h)
        for spec in ['ALD','CSL','ETH','GLY','HC3','HC5','HC8','HCHO','ISO','KET','MACR','MGLY','MVK','NR','NVOL',
        'OL2','OLI','OLT','ORA1','ORA2','TOL','XYL',]:
            f_post=Nio.open_file(ent_dir.get()+'/merged/'+spec+'.nc')
        #lon=f_post.variables['lon'][:]
        #lat=f_post.variables['lat'][:]
            section=[(f_post.variables[sec][:,:]*1e6)/(ll_area(lat,0.25)*30*24) for sec in ['act','idt','pwr','rdt','tpt',]]
            f_post.close()
            sections=[meic2wrf(lon_inp,lat_inp,lon,lat,emis,) for emis in section]
            c=[sec2zt(i,j,k) for i,j,k in zip(sections,sec_z_d,sec_t_d)]
            c=sum(c)
            meic_spec_emis.append(c)
    #aerosol: ton/(grid.month) to ug/(m2.s)
        for spec in ['BC','OC','PM2.5','PMcoarse',]:
            f_post=Nio.open_file(ent_dir.get()+'/merged/'+spec+'.nc')
        #lon=f_post.variables['lon'][:]
        #lat=f_post.variables['lat'][:]
            section=[(f_post.variables[sec][:,:]*1e6)/(ll_area(lat,0.25)*30*24*3600) for sec in ['act','idt','pwr','rdt','tpt',]]
            f_post.close()
            sections=[meic2wrf(lon_inp,lat_inp,lon,lat,emis,) for emis in section]
            c=[sec2zt(i,j,k) for i,j,k in zip(sections,sec_z_d,sec_t_d)]
            c=sum(c)
            meic_spec_emis.append(c)

        #meic emission to RADM2 chemistry scheme:

        wrf_spec_emis=[np.zeros(meic_spec_emis[0][:].shape, dtype='float32')]*31

        wrf_spec_emis[0]=meic_spec_emis[0] #wrf: CO
        wrf_spec_emis[1]=meic_spec_emis[2] #wrf: NH3
        wrf_spec_emis[2]=meic_spec_emis[3]*0.9 #wrf: NO
        wrf_spec_emis[3]=meic_spec_emis[3]*0.1 #wrf: NO2
        wrf_spec_emis[4]=meic_spec_emis[4]*0.9 #wrf: SO2
        wrf_spec_emis[5]=meic_spec_emis[5] #wrf: ALD
        wrf_spec_emis[6]=meic_spec_emis[6] #wrf: CSL
        wrf_spec_emis[7]=meic_spec_emis[7] #wrf: ETH
        wrf_spec_emis[8]=meic_spec_emis[9] #wrf: HC3
        wrf_spec_emis[9]=meic_spec_emis[10] #wrf: HC5
        wrf_spec_emis[10]=meic_spec_emis[11] #wrf: HC8
        wrf_spec_emis[11]=meic_spec_emis[12] #wrf: HCHO
        wrf_spec_emis[12]=meic_spec_emis[13] #wrf: ISO
        wrf_spec_emis[13]=meic_spec_emis[14] #wrf: KET
        wrf_spec_emis[14]=meic_spec_emis[20]*1.1 #wrf: OL2
        wrf_spec_emis[15]=meic_spec_emis[21]*1.1 #wrf: OLI
        wrf_spec_emis[16]=meic_spec_emis[22]*1.1 #wrf: OLT
        wrf_spec_emis[17]=meic_spec_emis[24] #wrf: ORA2
        wrf_spec_emis[18]=meic_spec_emis[25]*1.1 #wrf: TOL
        wrf_spec_emis[19]=meic_spec_emis[26]*1.1 #wrf: XYL
        wrf_spec_emis[20]=meic_spec_emis[27]*0.2 #wrf: ECi
        wrf_spec_emis[21]=meic_spec_emis[27]*0.8 #wrf: ECj
        wrf_spec_emis[22]=meic_spec_emis[28]*0.2 #wrf: ORGi
        wrf_spec_emis[23]=meic_spec_emis[28]*0.8 #wrf: ORGj
        wrf_spec_emis[24]=meic_spec_emis[29]-meic_spec_emis[28]-meic_spec_emis[27]*0.2 #wrf: PM25i
        wrf_spec_emis[25]=meic_spec_emis[29]-meic_spec_emis[28]-meic_spec_emis[27]*0.8 #wrf: PM25j
        wrf_spec_emis[26]=meic_spec_emis[30]*0.8 #wrf: PM10
        wrf_spec_emis[27]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: SO4i
        wrf_spec_emis[28]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: SO4j
        wrf_spec_emis[29]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: NO3i
        wrf_spec_emis[30]=np.zeros(meic_spec_emis[0][:].shape, dtype='float32') #wrf: NO3j

        #generate wrfchemi_00z_d01 anthropogenic emission data for wrf-chem model run:
        if os.path.exists(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1]):
            os.remove(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1])
            f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1],'c',format='nc')

            f_chem.create_dimension('Time',None)
            f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
            f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
            f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
            f_chem.create_dimension('DateStrLen',19)

            f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
            for i,time in enumerate([time_inp+'_00:00:00',time_inp+'_01:00:00',time_inp+'_02:00:00',time_inp+'_03:00:00',time_inp+'_04:00:00',
        time_inp+'_05:00:00',time_inp+'_06:00:00',time_inp+'_07:00:00',time_inp+'_08:00:00',time_inp+'_09:00:00',time_inp+'_10:00:00',
        time_inp+'_11:00:00',]):
                f_chem.variables['Times'][i]=list(time) #split the string to char

            for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                f_chem.variables[LL][:]=ll

            radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
            radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

            for gas in radm_gas:
                f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[gas].FieldType = np.int16(104)
                f_chem.variables[gas].MemoryOrder = 'XYZ'
                f_chem.variables[gas].description = 'EMISSIONS'
                f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                f_chem.variables[gas].stagger = 'Z'
                #f_chem.variables[gas].ordinates = 'XLONG XLAT'
            for aerosol in radm_aerosol:
                f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[aerosol].FieldType = np.int16(104)
                f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                f_chem.variables[aerosol].description = 'EMISSIONS'
                f_chem.variables[aerosol].units = 'ug/m3 m/s'
                f_chem.variables[aerosol].stagger = 'Z'
                #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

            radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
        'E_SO4J','E_NO3I','E_NO3J',]

            for i,spec in enumerate(radm_spec):
                f_chem.variables[spec][:] = wrf_spec_emis[i][0:12,:,:,:] #dimension need to be matched with the variable defination

            f_chem.close()
            os.rename(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1])
        else:
            f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1],'c',format='nc')
            f_chem.create_dimension('Time',None)
            f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
            f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
            f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
            f_chem.create_dimension('DateStrLen',19)

            f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
            for i,time in enumerate([time_inp+'_00:00:00',time_inp+'_01:00:00',time_inp+'_02:00:00',time_inp+'_03:00:00',time_inp+'_04:00:00',
        time_inp+'_05:00:00',time_inp+'_06:00:00',time_inp+'_07:00:00',time_inp+'_08:00:00',time_inp+'_09:00:00',time_inp+'_10:00:00',
        time_inp+'_11:00:00',]):
                f_chem.variables['Times'][i]=list(time) #split the string to char

            for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                f_chem.variables[LL][:]=ll

            radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
            radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

            for gas in radm_gas:
                f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[gas].FieldType = np.int16(104)
                f_chem.variables[gas].MemoryOrder = 'XYZ'
                f_chem.variables[gas].description = 'EMISSIONS'
                f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                f_chem.variables[gas].stagger = 'Z'
                #f_chem.variables[gas].ordinates = 'XLONG XLAT'
            for aerosol in radm_aerosol:
                f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[aerosol].FieldType = np.int16(104)
                f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                f_chem.variables[aerosol].description = 'EMISSIONS'
                f_chem.variables[aerosol].units = 'ug/m3 m/s'
                f_chem.variables[aerosol].stagger = 'Z'
                #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

            radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
        'E_SO4J','E_NO3I','E_NO3J',]

            for i,spec in enumerate(radm_spec):
                f_chem.variables[spec][:] = wrf_spec_emis[i][0:12,:,:,:] #dimension need to be matched with the variable defination

            f_chem.close()
            os.rename(ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_00z_'+ent_inp.get().split('_')[-1])

        #generate wrfchemi_12z_d01 anthropogenic emission data for wrf-chem model run:
        if os.path.exists(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1]):
            os.remove(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1])
            f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1],'c',format='nc')
            f_chem.create_dimension('Time',None)
            f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
            f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
            f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
            f_chem.create_dimension('DateStrLen',19)

            f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
            for i,time in enumerate([time_inp+'_12:00:00',time_inp+'_13:00:00',time_inp+'_14:00:00',time_inp+'_15:00:00',time_inp+'_16:00:00',
        time_inp+'_17:00:00',time_inp+'_18:00:00',time_inp+'_19:00:00',time_inp+'_20:00:00',time_inp+'_21:00:00',time_inp+'_22:00:00',
        time_inp+'_23:00:00',]):
                f_chem.variables['Times'][i]=list(time)

            for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                f_chem.variables[LL][:]=ll

            radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
            radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

            for gas in radm_gas:
                f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[gas].FieldType = np.int16(104)
                f_chem.variables[gas].MemoryOrder = 'XYZ'
                f_chem.variables[gas].description = 'EMISSIONS'
                f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                f_chem.variables[gas].stagger = 'Z'
                #f_chem.variables[gas].ordinates = 'XLONG XLAT'
            for aerosol in radm_aerosol:
                f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[aerosol].FieldType = np.int16(104)
                f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                f_chem.variables[aerosol].description = 'EMISSIONS'
                f_chem.variables[aerosol].units = 'ug/m3 m/s'
                f_chem.variables[aerosol].stagger = 'Z'
                #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

            radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
        'E_SO4J','E_NO3I','E_NO3J',]

            for i,spec in enumerate(radm_spec):
                f_chem.variables[spec][:] = wrf_spec_emis[i][12:24,:,:,:] #dimension need to be matched with the variable defination

            f_chem.close()
            os.rename(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1])
        else:
            f_chem=Nio.open_file(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1],'c',format='nc')

            f_chem.create_dimension('Time',None)
            f_chem.create_dimension('emissions_zdim',wrf_spec_emis[0].shape[1])
            f_chem.create_dimension('south_north',wrf_spec_emis[0].shape[2])
            f_chem.create_dimension('west_east',wrf_spec_emis[0].shape[3])
            f_chem.create_dimension('DateStrLen',19)

            f_chem.create_variable('Times','S1',('Time','DateStrLen'),)
            for i,time in enumerate([time_inp+'_12:00:00',time_inp+'_13:00:00',time_inp+'_14:00:00',time_inp+'_15:00:00',time_inp+'_16:00:00',
        time_inp+'_17:00:00',time_inp+'_18:00:00',time_inp+'_19:00:00',time_inp+'_20:00:00',time_inp+'_21:00:00',time_inp+'_22:00:00',
        time_inp+'_23:00:00',]):
                f_chem.variables['Times'][i]=list(time)

            for ll, LL in zip([lon_inp, lat_inp],['XLONG', 'XLAT']):
                f_chem.create_variable(LL, 'f', ('south_north', 'west_east',),)
                f_chem.variables[LL][:]=ll

            radm_gas=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL',]
            radm_aerosol=['E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I','E_SO4J','E_NO3I','E_NO3J',]

            for gas in radm_gas:
                f_chem.create_variable(gas,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[gas].FieldType = np.int16(104)
                f_chem.variables[gas].MemoryOrder = 'XYZ'
                f_chem.variables[gas].description = 'EMISSIONS'
                f_chem.variables[gas].units = 'mol km^-2 hr^-1'
                f_chem.variables[gas].stagger = 'Z'
                #f_chem.variables[gas].ordinates = 'XLONG XLAT'
            for aerosol in radm_aerosol:
                f_chem.create_variable(aerosol,'f',('Time','emissions_zdim','south_north','west_east',))
                f_chem.variables[aerosol].FieldType = np.int16(104)
                f_chem.variables[aerosol].MemoryOrder = 'XYZ'
                f_chem.variables[aerosol].description = 'EMISSIONS'
                f_chem.variables[aerosol].units = 'ug/m3 m/s'
                f_chem.variables[aerosol].stagger = 'Z'
                #f_chem.variables[aerosol].ordinates = 'XLONG XLAT'

            radm_spec=['E_CO','E_NH3','E_NO','E_NO2','E_SO2','E_ALD','E_CSL','E_ETH','E_HC3','E_HC5','E_HC8','E_HCHO','E_ISO','E_KET',
         'E_OL2','E_OLI','E_OLT','E_ORA2','E_TOL','E_XYL','E_ECI','E_ECJ','E_ORGI','E_ORGJ','E_PM25I','E_PM25J','E_PM_10','E_SO4I',
        'E_SO4J','E_NO3I','E_NO3J',]

            for i,spec in enumerate(radm_spec):
                f_chem.variables[spec][:] = wrf_spec_emis[i][12:24,:,:,:] #dimension need to be matched with the variable defination

            f_chem.close()
            os.rename(ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1]+'.nc', ent_dir.get()+'/merged/'+'wrfchemi_12z_'+ent_inp.get().split('_')[-1])
Beispiel #59
0
  -  Reading netCDF file
  -  Converting data from Kelvin to degC
  -  Writing data to new netCDF file

  2018-08-28  kmf
"""
import os
import numpy as np
import Ngl, Nio

#--  data file name
fname = "rectilinear_grid_3D.nc"

#-- open file
f = Nio.open_file(fname, "r")

#-- read temperature, time, latitude and longitude arrays
var = f.variables["t"]
time = f.variables["time"]
lat = f.variables["lat"]
lon = f.variables["lon"]

#-- convert data from units Kelvin to degC
varC = var[:, 0, :, :]  #-- copy variable at level=0; retain metadata
varC = varC - 273.15  #-- convert to degC

#-- open new netCDF file
os.system("rm -rf t_degC_py_short.nc")  #-- delete file if it exists
outf = Nio.open_file("t_degC_py_short.nc", "c")  #-- open new netCDF file
Beispiel #60
0
def main(argv):

    # Get command line stuff and store in a dictionary
    s = 'tag= compset= esize= tslice= res= sumfile= indir= sumfiledir= mach= verbose jsonfile= mpi_enable maxnorm gmonly popens cumul regx= startMon= endMon= fIndex='
    optkeys = s.split()
    try:
        opts, args = getopt.getopt(argv, "h", optkeys)
    except getopt.GetoptError:
        pyEnsLib.EnsSum_usage()
        sys.exit(2)

    # Put command line options in a dictionary - also set defaults
    opts_dict = {}

    # Defaults
    opts_dict['tag'] = 'cesm2_0_beta10'
    opts_dict['compset'] = 'F2000climo'
    opts_dict['mach'] = 'cheyenne'
    opts_dict['esize'] = 350
    opts_dict['tslice'] = 1
    opts_dict['res'] = 'f19_f19'
    opts_dict['sumfile'] = 'ens.summary.nc'
    opts_dict['indir'] = './'
    opts_dict['sumfiledir'] = './'
    opts_dict['jsonfile'] = 'exclude_empty.json'
    opts_dict['verbose'] = False
    opts_dict['mpi_enable'] = True
    opts_dict['maxnorm'] = False
    opts_dict['gmonly'] = True
    opts_dict['popens'] = False
    opts_dict['cumul'] = False
    opts_dict['regx'] = 'test'
    opts_dict['startMon'] = 1
    opts_dict['endMon'] = 1
    opts_dict['fIndex'] = 151

    # This creates the dictionary of input arguments
    opts_dict = pyEnsLib.getopt_parseconfig(opts, optkeys, 'ES', opts_dict)

    verbose = opts_dict['verbose']

    st = opts_dict['esize']
    esize = int(st)

    if opts_dict['popens'] == True:
        print "Error: Please use pyEnsSumPop.py for a POP ensemble (not --popens)."
        sys.exit()

    if not (opts_dict['tag'] and opts_dict['compset'] and opts_dict['mach']
            or opts_dict['res']):
        print 'Please specify --tag, --compset, --mach and --res options'
        sys.exit()

    # Now find file names in indir
    input_dir = opts_dict['indir']
    # The var list that will be excluded
    ex_varlist = []
    inc_varlist = []

    # Create a mpi simplecomm object
    if opts_dict['mpi_enable']:
        me = simplecomm.create_comm()
    else:
        me = simplecomm.create_comm(not opts_dict['mpi_enable'])

    if me.get_rank() == 0:
        print 'Running pyEnsSum!'

    if me.get_rank() == 0 and (verbose == True):
        print opts_dict
        print 'Ensemble size for summary = ', esize

    exclude = False
    if me.get_rank() == 0:
        if opts_dict['jsonfile']:
            inc_varlist = []
            # Read in the excluded or included var list
            ex_varlist, exclude = pyEnsLib.read_jsonlist(
                opts_dict['jsonfile'], 'ES')
            if exclude == False:
                inc_varlist = ex_varlist
                ex_varlist = []
            # Read in the included var list
            #inc_varlist=pyEnsLib.read_jsonlist(opts_dict['jsonfile'],'ES')

    # Broadcast the excluded var list to each processor
    #if opts_dict['mpi_enable']:
    #   ex_varlist=me.partition(ex_varlist,func=Duplicate(),involved=True)
    # Broadcast the excluded var list to each processor
    if opts_dict['mpi_enable']:
        exclude = me.partition(exclude, func=Duplicate(), involved=True)
        if exclude:
            ex_varlist = me.partition(ex_varlist,
                                      func=Duplicate(),
                                      involved=True)
        else:
            inc_varlist = me.partition(inc_varlist,
                                       func=Duplicate(),
                                       involved=True)

    in_files = []
    if (os.path.exists(input_dir)):
        # Get the list of files
        in_files_temp = os.listdir(input_dir)
        in_files = sorted(in_files_temp)

        # Make sure we have enough
        num_files = len(in_files)
        if me.get_rank() == 0 and (verbose == True):
            print 'Number of files in input directory = ', num_files
        if (num_files < esize):
            if me.get_rank() == 0 and (verbose == True):
                print 'Number of files in input directory (',num_files,\
                 ') is less than specified ensemble size of ', esize
            sys.exit(2)
        if (num_files > esize):
            if me.get_rank() == 0 and (verbose == True):
                print 'NOTE: Number of files in ', input_dir, \
                 'is greater than specified ensemble size of ', esize ,\
                 '\nwill just use the first ',  esize, 'files'
    else:
        if me.get_rank() == 0:
            print 'Input directory: ', input_dir, ' not found'
        sys.exit(2)

    if opts_dict['cumul']:
        if opts_dict['regx']:
            in_files_list = get_cumul_filelist(opts_dict, opts_dict['indir'],
                                               opts_dict['regx'])
        in_files = me.partition(in_files_list,
                                func=EqualLength(),
                                involved=True)
        if me.get_rank() == 0 and (verbose == True):
            print 'in_files=', in_files

    # Open the files in the input directory
    o_files = []
    if me.get_rank() == 0 and opts_dict['verbose']:
        print 'Input files are: '
        print "\n".join(in_files)
        #for i in in_files:
        #    print "in_files =",i
    for onefile in in_files[0:esize]:
        if (os.path.isfile(input_dir + '/' + onefile)):
            o_files.append(Nio.open_file(input_dir + '/' + onefile, "r"))
        else:
            if me.get_rank() == 0:
                print "COULD NOT LOCATE FILE ", input_dir + '/' + onefile, "! EXITING...."
            sys.exit()

    # Store dimensions of the input fields
    if me.get_rank() == 0 and (verbose == True):
        print "Getting spatial dimensions"
    nlev = -1
    nilev = -1
    ncol = -1
    nlat = -1
    nlon = -1
    lonkey = ''
    latkey = ''
    # Look at first file and get dims
    input_dims = o_files[0].dimensions
    ndims = len(input_dims)

    for key in input_dims:
        if key == "lev":
            nlev = input_dims["lev"]
        elif key == "ilev":
            nilev = input_dims["ilev"]
        elif key == "ncol":
            ncol = input_dims["ncol"]
        elif (key == "nlon") or (key == "lon"):
            nlon = input_dims[key]
            lonkey = key
        elif (key == "nlat") or (key == "lat"):
            nlat = input_dims[key]
            latkey = key

    if (nlev == -1):
        if me.get_rank() == 0:
            print "COULD NOT LOCATE valid dimension lev => EXITING...."
        sys.exit()

    if ((ncol == -1) and ((nlat == -1) or (nlon == -1))):
        if me.get_rank() == 0:
            print "Need either lat/lon or ncol  => EXITING...."
        sys.exit()

    # Check if this is SE or FV data
    if (ncol != -1):
        is_SE = True
    else:
        is_SE = False

    # Make sure all files have the same dimensions
    if me.get_rank() == 0 and (verbose == True):
        print "Checking dimensions across files...."
        print 'lev = ', nlev
        if (is_SE == True):
            print 'ncol = ', ncol
        else:
            print 'nlat = ', nlat
            print 'nlon = ', nlon

    for count, this_file in enumerate(o_files):
        input_dims = this_file.dimensions
        if (is_SE == True):
            if (nlev != int(input_dims["lev"])
                    or (ncol != int(input_dims["ncol"]))):
                if me.get_rank() == 0:
                    print "Dimension mismatch between ", in_files[
                        0], 'and', in_files[0], '!!!'
                sys.exit()
        else:
            if ( nlev != int(input_dims["lev"]) or ( nlat != int(input_dims[latkey]))\
                  or ( nlon != int(input_dims[lonkey]))):
                if me.get_rank() == 0:
                    print "Dimension mismatch between ", in_files[
                        0], 'and', in_files[0], '!!!'
                sys.exit()

    # Get 2d vars, 3d vars and all vars (For now include all variables)
    vars_dict_all = o_files[0].variables
    # Remove the excluded variables (specified in json file) from variable dictionary
    #print len(vars_dict_all)
    if exclude:
        vars_dict = vars_dict_all
        for i in ex_varlist:
            if i in vars_dict:
                del vars_dict[i]
    #Given an included var list, remove all float var that are not on the list
    else:
        vars_dict = vars_dict_all.copy()
        for k, v in vars_dict_all.iteritems():
            if (k not in inc_varlist) and (vars_dict_all[k].typecode() == 'f'):
                #print vars_dict_all[k].typecode()
                #print k
                del vars_dict[k]

    num_vars = len(vars_dict)
    #print num_vars
    #if me.get_rank() == 0:
    #   for k,v in vars_dict.iteritems():
    #       print 'vars_dict',k,vars_dict[k].typecode()

    str_size = 0
    d2_var_names = []
    d3_var_names = []
    num_2d = 0
    num_3d = 0

    # Which are 2d, which are 3d and max str_size
    for k, v in vars_dict.iteritems():
        var = k
        vd = v.dimensions  # all the variable's dimensions (names)
        vr = v.rank  # num dimension
        vs = v.shape  # dim values
        is_2d = False
        is_3d = False
        if (is_SE == True):  # (time, lev, ncol) or (time, ncol)
            if ((vr == 2) and (vs[1] == ncol)):
                is_2d = True
                num_2d += 1
            elif ((vr == 3) and (vs[2] == ncol and vs[1] == nlev)):
                is_3d = True
                num_3d += 1
        else:  # (time, lev, nlon, nlon) or (time, nlat, nlon)
            if ((vr == 3) and (vs[1] == nlat and vs[2] == nlon)):
                is_2d = True
                num_2d += 1
            elif ((vr == 4) and (vs[2] == nlat and vs[3] == nlon and
                                 (vs[1] == nlev or vs[1] == nilev))):
                is_3d = True
                num_3d += 1

        if (is_3d == True):
            str_size = max(str_size, len(k))
            d3_var_names.append(k)
        elif (is_2d == True):
            str_size = max(str_size, len(k))
            d2_var_names.append(k)
        #else:
        #    print 'var=',k

    if me.get_rank() == 0 and (verbose == True):
        print 'Number of variables found:  ', num_3d + num_2d
        print '3D variables: ' + str(num_3d) + ', 2D variables: ' + str(num_2d)

    # Now sort these and combine (this sorts caps first, then lower case -
    # which is what we want)
    d2_var_names.sort()
    d3_var_names.sort()

    if esize < num_2d + num_3d:
        if me.get_rank() == 0:
            print "************************************************************************************************************************************"
            print "  Error: the total number of 3D and 2D variables " + str(
                num_2d + num_3d
            ) + " is larger than the number of ensemble files " + str(esize)
            print "  Cannot generate ensemble summary file, please remove more variables from your included variable list,"
            print "  or add more varaibles in your excluded variable list!!!"
            print "************************************************************************************************************************************"
        sys.exit()
    # All vars is 3d vars first (sorted), the 2d vars
    all_var_names = list(d3_var_names)
    all_var_names += d2_var_names
    n_all_var_names = len(all_var_names)

    #if me.get_rank() == 0 and (verbose == True):
    #    print 'num vars = ', n_all_var_names, '(3d = ', num_3d, ' and 2d = ', num_2d, ")"

    # Create new summary ensemble file
    this_sumfile = opts_dict["sumfile"]

    if me.get_rank() == 0 and (verbose == True):
        print "Creating ", this_sumfile, "  ..."
    if (me.get_rank() == 0):
        if os.path.exists(this_sumfile):
            os.unlink(this_sumfile)

        opt = Nio.options()
        opt.PreFill = False
        opt.Format = 'NetCDF4Classic'
        nc_sumfile = Nio.open_file(this_sumfile, 'w', options=opt)

        # Set dimensions
        if me.get_rank() == 0 and (verbose == True):
            print "Setting dimensions ....."
        if (is_SE == True):
            nc_sumfile.create_dimension('ncol', ncol)
        else:
            nc_sumfile.create_dimension('nlat', nlat)
            nc_sumfile.create_dimension('nlon', nlon)
        nc_sumfile.create_dimension('nlev', nlev)
        nc_sumfile.create_dimension('ens_size', esize)
        nc_sumfile.create_dimension('nvars', num_3d + num_2d)
        nc_sumfile.create_dimension('nvars3d', num_3d)
        nc_sumfile.create_dimension('nvars2d', num_2d)
        nc_sumfile.create_dimension('str_size', str_size)

        # Set global attributes
        now = time.strftime("%c")
        if me.get_rank() == 0 and (verbose == True):
            print "Setting global attributes ....."
        setattr(nc_sumfile, 'creation_date', now)
        setattr(nc_sumfile, 'title', 'CAM verification ensemble summary file')
        setattr(nc_sumfile, 'tag', opts_dict["tag"])
        setattr(nc_sumfile, 'compset', opts_dict["compset"])
        setattr(nc_sumfile, 'resolution', opts_dict["res"])
        setattr(nc_sumfile, 'machine', opts_dict["mach"])

        # Create variables
        if me.get_rank() == 0 and (verbose == True):
            print "Creating variables ....."
        v_lev = nc_sumfile.create_variable("lev", 'f', ('nlev', ))
        v_vars = nc_sumfile.create_variable("vars", 'S1',
                                            ('nvars', 'str_size'))
        v_var3d = nc_sumfile.create_variable("var3d", 'S1',
                                             ('nvars3d', 'str_size'))
        v_var2d = nc_sumfile.create_variable("var2d", 'S1',
                                             ('nvars2d', 'str_size'))
        if not opts_dict['gmonly']:
            if (is_SE == True):
                v_ens_avg3d = nc_sumfile.create_variable(
                    "ens_avg3d", 'f', ('nvars3d', 'nlev', 'ncol'))
                v_ens_stddev3d = nc_sumfile.create_variable(
                    "ens_stddev3d", 'f', ('nvars3d', 'nlev', 'ncol'))
                v_ens_avg2d = nc_sumfile.create_variable(
                    "ens_avg2d", 'f', ('nvars2d', 'ncol'))
                v_ens_stddev2d = nc_sumfile.create_variable(
                    "ens_stddev2d", 'f', ('nvars2d', 'ncol'))
            else:
                v_ens_avg3d = nc_sumfile.create_variable(
                    "ens_avg3d", 'f', ('nvars3d', 'nlev', 'nlat', 'nlon'))
                v_ens_stddev3d = nc_sumfile.create_variable(
                    "ens_stddev3d", 'f', ('nvars3d', 'nlev', 'nlat', 'nlon'))
                v_ens_avg2d = nc_sumfile.create_variable(
                    "ens_avg2d", 'f', ('nvars2d', 'nlat', 'nlon'))
                v_ens_stddev2d = nc_sumfile.create_variable(
                    "ens_stddev2d", 'f', ('nvars2d', 'nlat', 'nlon'))

            v_RMSZ = nc_sumfile.create_variable("RMSZ", 'f',
                                                ('nvars', 'ens_size'))
        v_gm = nc_sumfile.create_variable("global_mean", 'f',
                                          ('nvars', 'ens_size'))
        v_standardized_gm = nc_sumfile.create_variable("standardized_gm", 'f',
                                                       ('nvars', 'ens_size'))
        v_loadings_gm = nc_sumfile.create_variable('loadings_gm', 'f',
                                                   ('nvars', 'nvars'))
        v_mu_gm = nc_sumfile.create_variable('mu_gm', 'f', ('nvars', ))
        v_sigma_gm = nc_sumfile.create_variable('sigma_gm', 'f', ('nvars', ))
        v_sigma_scores_gm = nc_sumfile.create_variable('sigma_scores_gm', 'f',
                                                       ('nvars', ))

        # Assign vars, var3d and var2d
        if me.get_rank() == 0 and (verbose == True):
            print "Assigning vars, var3d, and var2d ....."

        eq_all_var_names = []
        eq_d3_var_names = []
        eq_d2_var_names = []

        l_eq = len(all_var_names)
        for i in range(l_eq):
            tt = list(all_var_names[i])
            l_tt = len(tt)
            if (l_tt < str_size):
                extra = list(' ') * (str_size - l_tt)
                tt.extend(extra)
            eq_all_var_names.append(tt)

        l_eq = len(d3_var_names)
        for i in range(l_eq):
            tt = list(d3_var_names[i])
            l_tt = len(tt)
            if (l_tt < str_size):
                extra = list(' ') * (str_size - l_tt)
                tt.extend(extra)
            eq_d3_var_names.append(tt)

        l_eq = len(d2_var_names)
        for i in range(l_eq):
            tt = list(d2_var_names[i])
            l_tt = len(tt)
            if (l_tt < str_size):
                extra = list(' ') * (str_size - l_tt)
                tt.extend(extra)
            eq_d2_var_names.append(tt)

        v_vars[:] = eq_all_var_names[:]
        v_var3d[:] = eq_d3_var_names[:]
        v_var2d[:] = eq_d2_var_names[:]

        # Time-invarient metadata
        if me.get_rank() == 0 and (verbose == True):
            print "Assigning time invariant metadata ....."
        lev_data = vars_dict["lev"]
        v_lev = lev_data

    # Form ensembles, each missing one member; compute RMSZs and global means
    #for each variable, we also do max norm also (currently done in pyStats)
    tslice = opts_dict['tslice']

    if not opts_dict['cumul']:
        # Partition the var list

        var3_list_loc = me.partition(d3_var_names,
                                     func=EqualStride(),
                                     involved=True)
        var2_list_loc = me.partition(d2_var_names,
                                     func=EqualStride(),
                                     involved=True)
    else:
        var3_list_loc = d3_var_names
        var2_list_loc = d2_var_names

    # Calculate global means #
    if me.get_rank() == 0 and (verbose == True):
        print "Calculating global means ....."
    if not opts_dict['cumul']:
        gm3d, gm2d, var_list = pyEnsLib.generate_global_mean_for_summary(
            o_files, var3_list_loc, var2_list_loc, is_SE, False, opts_dict)
    if me.get_rank() == 0 and (verbose == True):
        print "Finish calculating global means ....."

    # Calculate RMSZ scores
    if (not opts_dict['gmonly']) | (opts_dict['cumul']):
        if me.get_rank() == 0 and (verbose == True):
            print "Calculating RMSZ scores ....."
        zscore3d, zscore2d, ens_avg3d, ens_stddev3d, ens_avg2d, ens_stddev2d, temp1, temp2 = pyEnsLib.calc_rmsz(
            o_files, var3_list_loc, var2_list_loc, is_SE, opts_dict)

    # Calculate max norm ensemble
    if opts_dict['maxnorm']:
        if me.get_rank() == 0 and (verbose == True):
            print "Calculating max norm of ensembles ....."
        pyEnsLib.calculate_maxnormens(opts_dict, var3_list_loc)
        pyEnsLib.calculate_maxnormens(opts_dict, var2_list_loc)

    if opts_dict['mpi_enable']:

        if not opts_dict['cumul']:
            # Gather the 3d variable results from all processors to the master processor
            slice_index = get_stride_list(len(d3_var_names), me)

            # Gather global means 3d results
            gm3d = gather_npArray(gm3d, me, slice_index,
                                  (len(d3_var_names), len(o_files)))
            if not opts_dict['gmonly']:
                # Gather zscore3d results
                zscore3d = gather_npArray(zscore3d, me, slice_index,
                                          (len(d3_var_names), len(o_files)))

                # Gather ens_avg3d and ens_stddev3d results
                shape_tuple3d = get_shape(ens_avg3d.shape, len(d3_var_names),
                                          me.get_rank())
                ens_avg3d = gather_npArray(ens_avg3d, me, slice_index,
                                           shape_tuple3d)
                ens_stddev3d = gather_npArray(ens_stddev3d, me, slice_index,
                                              shape_tuple3d)

            # Gather 2d variable results from all processors to the master processor
            slice_index = get_stride_list(len(d2_var_names), me)

            # Gather global means 2d results
            gm2d = gather_npArray(gm2d, me, slice_index,
                                  (len(d2_var_names), len(o_files)))

            var_list = gather_list(var_list, me)

            if not opts_dict['gmonly']:
                # Gather zscore2d results
                zscore2d = gather_npArray(zscore2d, me, slice_index,
                                          (len(d2_var_names), len(o_files)))

                # Gather ens_avg3d and ens_stddev2d results
                shape_tuple2d = get_shape(ens_avg2d.shape, len(d2_var_names),
                                          me.get_rank())
                ens_avg2d = gather_npArray(ens_avg2d, me, slice_index,
                                           shape_tuple2d)
                ens_stddev2d = gather_npArray(ens_stddev2d, me, slice_index,
                                              shape_tuple2d)

        else:
            gmall = np.concatenate((temp1, temp2), axis=0)
            gmall = pyEnsLib.gather_npArray_pop(
                gmall, me,
                (me.get_size(), len(d3_var_names) + len(d2_var_names)))
    # Assign to file:
    if me.get_rank() == 0:
        if not opts_dict['cumul']:
            gmall = np.concatenate((gm3d, gm2d), axis=0)
            if not opts_dict['gmonly']:
                Zscoreall = np.concatenate((zscore3d, zscore2d), axis=0)
                v_RMSZ[:, :] = Zscoreall[:, :]
            if not opts_dict['gmonly']:
                if (is_SE == True):
                    v_ens_avg3d[:, :, :] = ens_avg3d[:, :, :]
                    v_ens_stddev3d[:, :, :] = ens_stddev3d[:, :, :]
                    v_ens_avg2d[:, :] = ens_avg2d[:, :]
                    v_ens_stddev2d[:, :] = ens_stddev2d[:, :]
                else:
                    v_ens_avg3d[:, :, :, :] = ens_avg3d[:, :, :, :]
                    v_ens_stddev3d[:, :, :, :] = ens_stddev3d[:, :, :, :]
                    v_ens_avg2d[:, :, :] = ens_avg2d[:, :, :]
                    v_ens_stddev2d[:, :, :] = ens_stddev2d[:, :, :]
        else:
            gmall_temp = np.transpose(gmall[:, :])
            gmall = gmall_temp
        mu_gm, sigma_gm, standardized_global_mean, loadings_gm, scores_gm = pyEnsLib.pre_PCA(
            gmall, all_var_names, var_list, me)
        v_gm[:, :] = gmall[:, :]
        v_standardized_gm[:, :] = standardized_global_mean[:, :]
        v_mu_gm[:] = mu_gm[:]
        v_sigma_gm[:] = sigma_gm[:].astype(np.float32)
        v_loadings_gm[:, :] = loadings_gm[:, :]
        v_sigma_scores_gm[:] = scores_gm[:]

        if me.get_rank() == 0:
            print "All Done"