def main(self): """Main loop.""" self.gribdata = pygrib.open(self.gribfile) while not self.finished(): while not self.anyReady(): time.sleep(0.1) if self.dataReady("inbox"): data = self.recv("inbox") lat, lon = data result = [] log("Parsing grib for coords '%f:%f" % (lat, lon)) for no, grb in enumerate(self.gribdata): glat, glon = grb.latlons() if (glat.min() < lat) and (glat.max() > lat) and (glon.min() < lon) and (glon.max() > lon): dimension = ((glat.min(), glon.min()),(glat.max(), glon.max())) result.append(str((no, dimension, (grb))) + "\n") # yuck, ECMWF starts counting at 1 self.send(result, "outbox")
def __init__(self,source,target): def lon_lat_to_cartesian(lon,lat,R=1): """ calculates lon, lat coordinates of a point on a sphere with radius R """ lon_r=np.radians(lon) lat_r=np.radians(lat) x=R*np.cos(lat_r)*np.cos(lon_r) y=R*np.cos(lat_r)*np.sin(lon_r) z=R*np.sin(lat_r) return x,y,z """ read data from files """ self.source=source self.target=target self.D1=pygrib.open(self.source) self.latSOURCE,self.lonSOURCE=self.D1[8].latlons() self.D2=pygrib.open(self.target) self.latTARGET,self.lonTARGET=self.D2[1].latlons() """ flatten grid coordinates into cartesian x,y,z """ self.xs,self.ys,self.zs=lon_lat_to_cartesian(self.lonSOURCE.flatten(),self.latSOURCE.flatten()) self.xt,self.yt,self.zt=lon_lat_to_cartesian(self.lonTARGET.flatten(),self.latTARGET.flatten())
def __init__(self, center=None, radius=None, forecast_date=None, gfs_data_file=None, hrrr_data_file=None, keep_files=False): """ Construct a wind model centered at a given Latitude/Longitude with a radius defined in miles :param center: :param radius: :param forecast_date: """ if gfs_data_file is None and hrrr_data_file is None and (center is None or radius is None or forecast_date is None): raise Exception("Invalid center or radius for wind model") self.forecast_date = forecast_date self.gfs_data_file = gfs_data_file self.gfs_height_map = None self.gfs_latlons = None self.hrrr_data_file = hrrr_data_file self.hrrr_height_map = None self.hrrr_latlons = None if gfs_data_file is not None: self.gfs_height_map, self.gfs_latlons = self._parse_grbs(pygrib.open(gfs_data_file)) if hrrr_data_file is not None: self.hrrr_height_map, self.hrrr_latlons = self._parse_grbs(pygrib.open(hrrr_data_file)) if gfs_data_file is None and hrrr_data_file is None: lat_radius = change_in_latitude_miles(radius) lon_radius = max(change_in_longitude_miles(center[0] - lat_radius, radius), change_in_longitude_miles(center[0] + lat_radius, radius)) self.NW_bound = [center[0] + lat_radius, center[1] - lon_radius] self.SE_bound = [center[0] - lat_radius, center[1] + lon_radius] self._load_gfs(keep_files)
def plot_algorithms(file1,file2): file1=pygrib.open(file1) temp_var_file1=file1[8].values file2=pygrib.open(file2) temp_var_file2=file2[3].values file1.close() file2.close() plt.figure(figsize=(10,5)) plt.subplot(121) plt.pcolormesh(temp_var_file1) plt.xlim([0, temp_var_file1.shape[0]]) plt.ylim([0, temp_var_file1.shape[1]]) plt.colorbar() plt.title("from file1") plt.subplot(122) plt.pcolormesh(temp_var_file2) plt.colorbar() plt.xlim([0, temp_var_file2.shape[0]]) plt.ylim([0, temp_var_file2.shape[1]]) plt.title("from file2"); plt.show()
def pcpn(grids, valid, iarchive): """Attempt to use MRMS or stage IV pcpn here""" floor = datetime.datetime(2014, 11, 1) floor = floor.replace(tzinfo=pytz.timezone("UTC")) if valid < floor: # Use stageIV ts = (valid + datetime.timedelta(minutes=60)).replace(minute=0) gribfn = ts.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/stage4/ST4." "%Y%m%d%H.01h.grib")) if not os.path.isfile(gribfn): return grbs = pygrib.open(gribfn) grib = grbs[1] lats, lons = grib.latlons() vals = grib.values nn = NearestNDInterpolator((lons.flatten(), lats.flatten()), vals.flatten()) grids['pcpn'] = nn(XI, YI) return fn = None i = 0 while i < 10: ts = valid - datetime.timedelta(minutes=i) if ts.minute % 2 == 0: testfn = mrms_util.fetch('PrecipRate', ts) if testfn is not None: fn = testfn break i += 1 if fn is None: print("Warning, no PrecipRate data found!") return fp = gzip.GzipFile(fn, 'rb') (_, tmpfn) = tempfile.mkstemp() tmpfp = open(tmpfn, 'wb') tmpfp.write(fp.read()) tmpfp.close() grbs = pygrib.open(tmpfn) values = grbs[1]['values'] # just set -3 (no coverage) to 0 for now values = np.where(values < 0, 0, values) map(os.unlink, [fn, tmpfn]) # 3500, 7000, starts in upper left top = int((55. - reference.IA_NORTH) * 100.) bottom = int((55. - reference.IA_SOUTH) * 100.) right = int((reference.IA_EAST - -130.) * 100.) - 1 left = int((reference.IA_WEST - -130.) * 100.) # two minute accumulation is in mm/hr / 60 * 5 # stage IV is mm/hr grids['pcpn'] = np.flipud(values[top:bottom, left:right]) / 12.0
def load_data(self): for m, member in enumerate(self.members): for f, forecast_date in enumerate(self.forecast_dates.to_pydatetime()): dt = int((forecast_date - self.run_date).total_seconds() / 3600) filename_args = (self.ensemble_name, member, self.ml_model, self.variable, forecast_date.strftime("%Y%m%d%H%M")) filename = self.path + self.run_date.strftime("%Y%m%d") + \ "/{0}_{1}_{2}_{3}_{4}.grib2".format(*filename_args) if not exists(filename): filename_args = (self.ensemble_name, member, self.ml_model, self.variable, self.run_date.strftime("%Y%m%d%H") + "f{0:02d}".format(dt)) filename = self.path + self.run_date.strftime("%Y%m%d") + \ "/{0}_{1}_{2}_{3}_{4}.grib2".format(*filename_args) if not exists(filename): continue grbs = pygrib.open(filename) if self.lon is None: self.lat, self.lon = grbs[self.message_number].latlons() self.projparams = grbs[self.message_number].projparams self.proj = Proj(grbs[self.message_number].projparams) self.x, self.y = self.proj(self.lon, self.lat) self.x /= 1000.0 self.y /= 1000.0 self.dx = grbs[self.message_number]['DxInMetres'] / 1000.0 self.i, self.j = np.indices(self.lon.shape) data = grbs[self.message_number].values data *= 1000.0 if self.data is None: self.data = np.empty((len(self.members), len(self.forecast_dates), data.shape[0], data.shape[1]), dtype=float) self.data[m, f] = data.filled(0) grbs.close() return
def dl(now, varname): """get the files""" uri = now.strftime(("http://www.ftp.ncep.noaa.gov/data/nccf/com/cfs/prod/" "cfs/cfs.%Y%m%d/%H/time_grib_01/" + varname + ".01.%Y%m%d%H.daily.grb2")) response = exponential_backoff(requests.get, uri, timeout=60) if response is None or response.status_code != 200: print('download_cfs.py: dl %s failed' % (uri,)) return tmpfn = "/tmp/%s.cfs.grib" % (varname, ) o = open(tmpfn, 'wb') o.write(response.content) o.close() # Check out this file to see how much data we actually have, it had # better be a big number grb = pygrib.open(tmpfn) if grb.messages < REQUIRED_MSGS: print(("download_cfs %s %s has only %s messages, need %s+" ) % (now, varname, grb.messages, REQUIRED_MSGS)) else: # Inject into LDM cmd = ("/home/ldm/bin/pqinsert -p 'data a %s blah " "model/cfs/%02i/%s.01.%s.daily.grib2 grib' %s" ) % (now.strftime("%Y%m%d%H%M"), now.hour, varname, now.strftime("%Y%m%d%H"), tmpfn) subprocess.call(cmd, shell=True) os.remove(tmpfn)
def do_hrrr(ts): """Convert the hourly HRRR data to IEMRE grid""" total = None xaxis = None yaxis = None for hr in range(5, 23): # Only need 5 AM to 10 PM for solar utc = ts.replace(hour=hr).astimezone(pytz.timezone("UTC")) fn = utc.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/%H/" "hrrr.t%Hz.3kmf00.grib2")) if not os.path.isfile(fn): # print 'HRRR file %s missing' % (fn,) continue grbs = pygrib.open(fn) try: if utc >= SWITCH_DATE: grb = grbs.select(name='Downward short-wave radiation flux') else: grb = grbs.select(parameterNumber=192) except ValueError: print 'coop/hrrr_solarrad.py %s had no solar rad' % (fn,) continue if len(grb) == 0: print 'Could not find SWDOWN in HRR %s' % (fn,) continue g = grb[0] if total is None: total = g.values lat1 = g['latitudeOfFirstGridPointInDegrees'] lon1 = g['longitudeOfFirstGridPointInDegrees'] llcrnrx, llcrnry = LCC(lon1, lat1) nx = g['Nx'] ny = g['Ny'] dx = g['DxInMetres'] dy = g['DyInMetres'] xaxis = llcrnrx + dx * np.arange(nx) yaxis = llcrnry + dy * np.arange(ny) else: total += g.values if total is None: print 'coop/hrrr_solarrad.py found no HRRR data for %s' % ( ts.strftime("%d %b %Y"), ) return # We wanna store as W m-2, so we just average out the data by hour total = total / 24.0 nc = netCDF4.Dataset("/mesonet/data/iemre/%s_mw_daily.nc" % (ts.year,), 'a') offset = iemre.daily_offset(ts) data = nc.variables['rsds'][offset, :, :] for i, lon in enumerate(iemre.XAXIS): for j, lat in enumerate(iemre.YAXIS): (x, y) = LCC(lon, lat) i2 = np.digitize([x], xaxis)[0] j2 = np.digitize([y], yaxis)[0] data[j, i] = total[j2, i2] nc.variables['rsds'][offset] = data nc.close()
def _open_grib(self): """Open a Grib file""" url = self._url logging.debug("opening in mode : %s", self._mode) logging.debug("url : %s", url) # print "START", url self._handler = pygrib.open(url) # print "OPEN" # get list of fields self._handler.seek(0) for mm,msg in enumerate(self._handler): # print msg.shortName [msg.paramId self._param_id[mm+1] = msg.shortName self._messages[mm+1] = msg # pdb.set_trace() # if msg.shortName in self._messages: # self._messages[msg.shortName].append(msg) # else: # self._messages[msg.shortName] = [msg] #define an ordered longitudes indices array # firstfield = self._messages[self._messages.keys()[0]][0] firstfield = self._messages.values()[0] values = firstfield.longitudes #check whether USA is on the left if (values>300).any(): self._need_flip_longitudes = True else: self._need_flip_longitudes = False return self._handler
def processor(year, month, day): # i.e. '2014', '9', '5' timeint = int(year)*10000 + int(month)*100 + int(day) if len(month) < 2: month = "0" + month if len(day) < 2: day = "0" + day timestr = year + month + day results = [] # # www.ftp.ncep.noaa.gov/data/nccf/com/gfs/prod/gfs.2014093000/gfs.t00z.pgrb2f00 for k in range(8): if k<4: fn = '/home/ubuntu/gfs/gfs_wind/gfs_rawdata2/gfs.t00z.pgrb2f0'+str(k*3) else: fn = '/home/ubuntu/gfs/gfs_wind/gfs_rawdata2/gfs.t00z.pgrb2f'+str(k*3) gr = pygrib.open(fn) # Select variables relevant to wind speed msu = gr.select(name='U component of wind', \ typeOfLevel='heightAboveGround',level=80) msv = gr.select(name='V component of wind', \ typeOfLevel='heightAboveGround',level=80) # Get values of UGRD and VGRD vu = msu[0].values vv = msv[0].values # Latitude and longitude matrices for 0.5 degree resolution lats, lons = msu[0].latlons() for i in range(361): for j in range(720): latlong = str(lats[i][j])+'_'+str(lons[i][j]) velocity = round((vu[i][j]**2 + vv[i][j]**2)**0.5,5) results.append([timeint,k,latlong, velocity]) # Write to output file with open('/home/ubuntu/gfs/gfs_wind/gfs_hdfs_in2/v'+timestr+'.csv', 'wb') as f: writer = csv.writer(f) writer.writerows(results) return '/home/ubuntu/gfs/gfs_wind/gfs_hdfs_in2/v'+timestr+'.csv'
def containsMessages(filename): grbs = pg.open(filename) count = grbs.messages #print "messages: %s" % (count) if count: return grbs return None
def GribVals(infile): grbs = pygrib.open(infile) for grb in grbs: print grb grb_vals = pd.DataFrame(grb.values[:]) return grb_vals
def readfile(infile): '''Read the GRIB file and return the regional (SCS) sst data''' grbs=pygrib.open(infile) grb=grbs.select(name='Temperature')[0] sst, lats, lons = grb.data(lat1=14,lat2=30,lon1=106,lon2=126) grbs.close() return sst
def ncepgfs(lonStart=3, lonEnd=4, latStart=41, latEnd=42, \ m=None, name='ncepgfs', contour=None): """ Plot latest NCEP GFS field from \ http://nomad1.ncep.noaa.gov/pub/gfs_master/ """ base = '/media/SOLabNFS/SERVERS/media/hyrax/data/auxdata/model/ncep/gfs/' outdir = '/home/mag/' fn = 'gfs20101218/gfs.t18z.master.grbf00' # fn = 'gfs20101218/gfs.t18z.master.grbf03' refDate = datetime.datetime(1978,1,1,0,0,0) wantedDate = datetime.datetime(2010,12,18,17,39,0) # Get the file date grbs = pygrib.open(base + fn) grb = grbs.message(1) fileDate = grb.analDate filetime = fileDate.strftime("%Y%m%d_%H%M") print "File time: ", filetime print "File date: ", fileDate lats, lons = grb.latlons() u10 = grbs.message(9)['values'] v10 = grbs.message(9)['values'] # u = grbs.message(3)['values'] # v = grbs.message(4)['values'] w = sqrt(u10**2+v10**2)
def run(ts, routes): """ Run for a given UTC timestamp """ fn = ts.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/rtma/%H/" "rtma.t%Hz.awp2p5f000.grib2")) if not os.path.isfile(fn): print 'wind_power.py missing', fn return grb = pygrib.open(fn) try: u = grb.select(name='10 metre U wind component')[0] v = grb.select(name='10 metre V wind component')[0] except: print('Missing u/v wind for wind_power.py\nFN: %s' % (fn,)) return mag = (u['values']**2 + v['values']**2)**.5 mag = (mag * 1.35)**3 * 0.002641 # 0.002641 lats, lons = u.latlons() lts = ts.astimezone(pytz.timezone("America/Chicago")) pqstr = ("plot %s %s00 midwest/rtma_wind_power.png " "midwest/rtma_wind_power_%s00.png png" ) % (routes, ts.strftime("%Y%m%d%H"), ts.strftime("%H")) m = MapPlot(sector='midwest', title=(r'Wind Power Potential :: ' '(speed_mps_10m * 1.35)$^3$ * 0.002641'), subtitle=('valid: %s based on NOAA Realtime ' 'Mesoscale Analysis' ) % (lts.strftime("%d %b %Y %I %p"))) m.pcolormesh(lons, lats, mag, numpy.array(levels), units='MW') m.postprocess(pqstr=pqstr)
def create_sub(full_grib, data_dir='.'): ''' Extracts a subset of GRIB records from full_grib and saves them in a file named the same as full_grib, but in the data_dir folder. ''' #names=['Temperature', 'Soil temperature', 'Total precipitation', 'Wind speed (gust)', 'Water equivalent of accumulated snow depth'] name, ext = os.path.splitext(full_grib) out_file = '{2}/{0}_sub{1}'.format(name, ext, data_dir) grbout = open(out_file, 'wb') grbs = pygrib.open(full_grib) surface_names=['t', 'acpcp', 'gust', 'sdwe'] for name in surface_names: print 'finding {0}'.format(name) grbs.seek(0) for grb in grbs.select(shortName=name, typeOfLevel='surface'): grbout.write(grb.tostring()) agl_names=['10u', '10v'] grbs.seek(0) for name in agl_names: print 'finding {0}'.format(name) for grb in grbs.select(shortName=name, typeOfLevel='heightAboveGround'): grbout.write(grb.tostring()) grbout.close() return out_file
def soil_type_gfs(self, print_on=False): ne, ngq = self.ne, self.ngq up_size = self.up_size src_dir = self.src_dir dst_dir = self.dst_dir #------------------------------------- # Setup #------------------------------------- src_fpath = src_dir + 'clim.soiltype.grib' dst_fpath = dst_dir + 'stype_gfs_ne%.3dnp%d.nc'%(ne,ngq) src_vname = 'Snow sublimation heat flux' dst_vname = 'stype' dtype = np.int32 ll_type = 'regular-shift_lon' method = 'dominant' if print_on: print("Source: {}".format(src_fpath)) if print_on: print("Destination: {}".format(dst_fpath)) src_gbf = pygrib.open(src_fpath) grb = src_gbf.readline() # only 1 data nlat, nlon = grb.values.shape remap_args = (nlat, nlon, ll_type, method) remap_obj = self.create_remap_object(remap_args) if print_on: print("Check latlon grid") lats = grb.latlons()[0][:,0] lons = grb.latlons()[1][0,:] self.check_ll_grid(remap_obj, lats, lons, lon_shift=nlon//2) #------------------------------------- # Remapping #------------------------------------- if print_on: print("Remapping using {}".format(method)) if print_on: print("(nlat,nlon) -> (up_size,)") if print_on: print("({},{}) -> ({},)".format(nlat,nlon,up_size)) src_var = np.zeros((nlat,nlon), dtype) dst_var = np.zeros(up_size, dtype) src_var[:] = grb.values[:] src_var_transform = \ self.transform_ll_grid(src_var, lon_shift=nlon//2) remap_obj.remap(src_var_transform, dst_var) #------------------------------------- # Save as NetCDF #------------------------------------- if print_on: print("Save as NetCDF") dst_ncf = remap_obj.create_netcdf(dst_fpath) dst_ncf.createDimension('ncol', up_size) # the name 'ncol' is for PyCube vvar = dst_ncf.createVariable(dst_vname, dtype, ('ncol',)) vvar[:] = dst_var[:] dst_ncf.close()
def main(): """Go!""" title = 'NOAA MRMS Q3: RADAR + Guage Corrected Rainfall Estimates + NWS Storm Reports' mp = MapPlot(sector='custom', north=42.3, east=-93.0, south=41.65, west=-94.1, axisbg='white', titlefontsize=14, title=title, subtitle='Valid: 14 June 2018') shp = shapefile.Reader('cities.shp') for record in shp.shapeRecords(): geo = shape(record.shape) mp.ax.add_geometries([geo], ccrs.PlateCarree(), zorder=Z_OVERLAY2, facecolor='None', edgecolor='k', lw=2) grbs = pygrib.open('MRMS_GaugeCorr_QPE_24H_00.00_20180614-200000.grib2') grb = grbs.message(1) pcpn = distance(grb['values'], 'MM').value('IN') lats, lons = grb.latlons() lons -= 360. clevs = [0.01, 0.1, 0.3, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10] cmap = nwsprecip() cmap.set_over('k') mp.pcolormesh(lons, lats, pcpn, clevs, cmap=cmap, latlon=True, units='inch') lons, lats, vals, labels = get_data() mp.drawcounties() mp.plot_values(lons, lats, vals, "%s", labels=labels, labelbuffer=1, labelcolor='white') mp.drawcities(labelbuffer=5, minarea=0.2) mp.postprocess(filename='test.png')
def run(ts, routes): """ Run for a given UTC timestamp """ fn = ts.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/model/rtma/%H/rtma.t%Hz.awp2p5f000.grib2") if not os.path.isfile(fn): print "wind_power.py missing", fn return grb = pygrib.open(fn) u = grb.select(name="10 metre U wind component")[0] v = grb.select(name="10 metre V wind component")[0] mag = (u["values"] ** 2 + v["values"] ** 2) ** 0.5 mag = (mag * 1.35) ** 3 * 0.002641 # 0.002641 lats, lons = u.latlons() lts = ts.astimezone(pytz.timezone("America/Chicago")) pqstr = "plot %s %s00 midwest/rtma_wind_power.png midwest/rtma_wind_power_%s00.png png" % ( routes, ts.strftime("%Y%m%d%H"), ts.strftime("%H"), ) m = MapPlot( sector="midwest", title=r"Wind Power Potential :: (speed_mps_10m * 1.35)$^3$ * 0.002641", subtitle="valid: %s based on NOAA Realtime Mesoscale Analysis" % (lts.strftime("%d %b %Y %I %p")), ) m.pcolormesh(lons, lats, mag, numpy.array(levels), units="MW") m.postprocess(pqstr=pqstr)
def doday(): """ Create a plot of precipitation stage4 estimates for some day """ sts = mx.DateTime.DateTime(2013,5,25,12) ets = mx.DateTime.DateTime(2013,5,31,12) interval = mx.DateTime.RelativeDateTime(days=1) now = sts total = None while now < ets: fp = "/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.24h.grib" % ( now.strftime("%Y/%m/%d"), now.strftime("%Y%m%d%H") ) if os.path.isfile(fp): lts = now grbs = pygrib.open(fp) if total is None: g = grbs[1] total = g["values"] lats, lons = g.latlons() else: total += grbs[1]["values"] grbs.close() now += interval m = MapPlot(sector='iowa', title='NOAA Stage IV & Iowa ASOS Precipitation', subtitle='25-30 May 2013') m.pcolormesh(lons, lats, total / 25.4, numpy.arange(0,14.1,1), latlon=True, units='inch') m.drawcounties() m.plot_values(dlons, dlats, dvals, '%.02f') m.postprocess(filename='test.svg') import iemplot iemplot.makefeature('test')
def get(self, gmessage, key='values'): ''' Returns the value for the 'key' for a given message number 'gmessage' or message field name 'gmessage'. ''' grbs = pygrib.open(self._abspath) if type(gmessage) == int: mnbr = gmessage elif type(gmessage) == str: msg_found = False msgnum = 1 while msgnum < self.nmsgs + 1: if grbs[msgnum]['parameterName'] == gmessage: msg_found = True break msgnum = msgnum + 1 if msg_found: mnbr = msgnum else: print("No Grib message found with parameter name = %s" % gmessage) return None if grbs[mnbr].valid_key(key): arr = grbs[mnbr][key] grbs.close() return arr else: grbs.close() return
def main(): """Go Main""" grbs = pygrib.open('ds.snow.bin') # skip 1-off first field total = None lats = lons = None for grb in grbs[1:]: if lats is None: lats, lons = grb.latlons() total = grb['values'] continue total += grb['values'] # TODO tz-hack here analtime = grb.analDate - datetime.timedelta(hours=5) mp = MapPlot( sector='custom', west=-100, east=-92, north=45, south=41, axisbg='tan', title=("NWS Forecasted Accumulated Snowfall " "thru 7 PM 12 April 2019"), subtitle='NDFD Forecast Issued %s' % ( analtime.strftime("%-I %p %-d %B %Y"), ) ) cmap = nwssnow() cmap.set_bad('tan') mp.pcolormesh( lons, lats, total * 39.3701, [0.01, 1, 2, 3, 4, 6, 8, 12, 18, 24, 30, 36], cmap=cmap, units='inch') mp.drawcounties() mp.drawcities() mp.postprocess(filename='test.png') mp.close()
def render_map(grb_file, llclat, llclon, urclat, urclon, altitude_layer): """Given a grb file, renders a jpg map on disk.""" print('processing file %s ' % grb_file) grbs = pygrib.open(grb_file) try: data = grbs.select(name='V component of wind')[34]['values'] except IndexError: print(grb_file+' has not temperature at index', altitude_layer) return None # We don't like the way noaa aligns things. We like monotonic variations. data = realign_noaa_data(data) # Get the datetime of the file hour_of_the_day = int(grb_file[-8:-5]) datetime_file = datetime.strptime(grb_file[:-8], 'gfs_4_%Y%m%d_0000_') datetime_file += timedelta(hours=hour_of_the_day) # Plot nodes df = pd.read_csv('LMP_locs.csv', delimiter=',') lat_nodes = df['latitude'].values lon_nodes = df['longitude'].values points = interp_vector(data, lat_nodes, lon_nodes) node_interp_one = np.zeros((len(points), 4)) for i in range(len(points)): node_interp_one[i, :] = [datetime_file.strftime('%s'), lat_nodes[i], lon_nodes[i], points[i]] return node_interp_one
def main(): """Go Main Go.""" f0 = utc(2018, 10, 5, 0) fx = f0 + datetime.timedelta(hours=168) grbs = pygrib.open('p06m_2018100500f006.grb') grb = grbs[1] """ keys = list(grb.keys()) keys.sort() for key in keys: try: print("%s %s" % (key, getattr(grb, key, None))) except RuntimeError as exp: print("%s None" % (key, )) """ grb['dayOfEndOfOverallTimeInterval'] = fx.day grb['endStep'] = 168 grb['hourOfEndOfOverallTimeInterval'] = fx.hour grb['lengthOfTimeRange'] = 168 grb['stepRange'] = "0-168" grb = pygrib.reload(grb) # grb['validityDate'] = int(fx.strftime("%Y%m%d")) # grb['validityTime'] = int(fx.strftime("%H%M")) fp = open('test.grb', 'wb') fp.write(grb.tostring()) fp.close()
def gettigge(center,idate,var,lev,ftyp='cntl'): YYYYMM=idate.strftime('%Y%m') YYYYMMDDHH=idate.strftime('%Y%m%d%H') varname = var+str(lev) fname=FNAMEBASE.format(center=center.lower(), ftyp=ftyp, varname=varname, YYYYMM=YYYYMM, YYYYMMDDHH=YYYYMMDDHH) print fname # shortname=var if var=='z': shortname='gh' grbs = pygrib.open(fname) grb = grbs.select(shortName=shortname,level=lev) xn = grb[0]['Ni'] yn = grb[0]['Nj'] tn = len(grb) lat,lon = grb[0].latlons() out = np.empty((tn,yn,xn)) tyme = np.empty(tn,dtype=np.object) for t, gr in enumerate(grb): out[t,...] = gr.values tyme[t] = gr.validDate grbs.close() dims = ['tyme','lat','lon'] grid = McGrid(out, lon=lon[0,:], lat=lat[::-1,0], lev=lev, tyme=tyme, dims=dims) out = McField(out,name=var,grid=grid) return out
def read_grib(filename, dataset, dt): ''' read dataset in grib file filename with date and hour specified by datetime object dt ''' localFilename = copyToLocal(filename) hour = dt.hour date = int(dt.strftime('%Y%m%d')) try: g = pygrib.open(localFilename) except IOError: raise Exception('Could not open grib file %s' % (localFilename)) data = None grblist = [] for grb in g: grblist.append(grb) if (grb.name == dataset) and (grb.hour == hour) and (grb.dataDate == date): data = grb.values g.close() if data == None: print 'List of datasets in grib file:' for grb in grblist: print grb # for j in sorted(grb.keys()): print '->', j # print '===', '"%s"' % (grb.name), grb.dataDate, type(grb.dataDate), grb.hour raise Exception('Could not read "%s" at %s in file %s' % (dataset, dt, localFilename)) return data
def do(): now = sts while now < ets: fn = now.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/%H/hrrr.t%Hz.3kmf00.grib2") if not os.path.isfile(fn): print fn now += interval continue grbs = pygrib.open(fn) try: gs = grbs.select(name='2 metre temperature') except: print fn now += interval continue g = gs[0]['values'] if now == sts: lats,lons = gs[0].latlons() maxinterval = np.zeros(np.shape(g)) current = np.zeros(np.shape(g)) print np.max(g), np.min(g) current = np.where(g < 273, current + 1, 0) maxinterval = np.where(current > maxinterval, current, maxinterval) now += interval np.save('maxinterval.npy', np.array(maxinterval)) np.save('lats.npy', lats) np.save('lons.npy', lons)
def ww3_plot(fname,xlim=False,ylim=False,res='c'): ''' ex.: f='WW3_data/2012/multi_1.glo_30m.dp.201201.grb2' swanu.ww3_plot(f,xlim=[-100,-80],ylim=[20,32],res='i') ''' from mpl_toolkits.basemap import Basemap import pylab as pl f=pygrib.open(fname) lat,lon=f.message(1).latlons() lon[lon>180]=lon[lon>180]-360. if not xlim: xlim=-180,180 if not ylim: ylim=-90,90 m = Basemap(projection='cyl',llcrnrlat=ylim[0],urcrnrlat=ylim[1], llcrnrlon=xlim[0],urcrnrlon=xlim[1],resolution=res) m.drawcoastlines() m.fillcontinents(color='#cccccc',lake_color='w') # draw parallels and meridians. m.drawparallels(np.arange(-90.,91.,30.)) m.drawmeridians(np.arange(-180.,181.,60.)) x,y=m(lon,lat) pl.plot(x,y,'b.',ms=.5) pl.show()
def compute(valid): ''' Get me files ''' prob = None for hr in range(-15,0): ts = valid + datetime.timedelta(hours=hr) fn = ts.strftime("hrrr.ref.%Y%m%d%H00.grib2") if not os.path.isfile(fn): continue grbs = pygrib.open(fn) gs = grbs.select(level=1000,forecastTime=(-1 * hr * 60)) ref = generic_filter(gs[0]['values'], np.max, size=10) if prob is None: lats, lons = gs[0].latlons() prob = np.zeros( np.shape(ref) ) prob = np.where(ref > 29, prob+1, prob) prob = np.ma.array(prob / 15. * 100.) prob.mask = np.ma.where(prob < 1, True, False) m = MapPlot(sector='iowa', title='HRRR Composite Forecast 4 PM 20 May 2014 30+ dbZ Reflectivity', subtitle='frequency of previous 15 model runs all valid at %s, ~15km smoothed' % (valid.astimezone(pytz.timezone("America/Chicago")).strftime("%-d %b %Y %I:%M %p %Z"),)) m.pcolormesh(lons, lats, prob, np.arange(0,101,10), units='%', clip_on=False) m.map.drawcounties() m.postprocess(filename='test.ps') m.close()
def get_hrrr_variable_multi(DATE, variable, next=2, fxx=0, model='hrrr', field='sfc', removeFile=True): """ Uses cURL to grab a range of variables from a HRRR grib2 file on the MesoWest HRRR archive. Input: DATE - the datetime(year, month, day, hour) for the HRRR file you want variable - a string describing the variable you are looking for. Refer to the .idx files here: https://api.mesowest.utah.edu/archive/HRRR/ You want to put the variable short name and the level information For example, for 2m temperature: 'TMP:2 m above ground' fxx - the forecast hour you desire. Default is the anlaysis hour. model - the model you want. Options include ['hrrr', 'hrrrX', 'hrrrAK'] field - the file type your variable is in. Options include ['sfc', 'prs'] removeFile - True will remove the grib2 file after downloaded. False will not. """ # Model direcotry names are named differently than the model name. if model == 'hrrr': model_dir = 'oper' elif model == 'hrrrX': model_dir = 'exp' elif model == 'hrrrAK': model_dir = 'alaska' if removeFile is True: if DATE.hour % 2 == 0: try: outfile = '/scratch/local/brian_hrrr/temp_%04d%02d%02d%02d.grib2' \ % (DATE.year, DATE.month, DATE.day, DATE.hour) except: outfile = './temp_%04d%02d%02d%02d.grib2' \ % (DATE.year, DATE.month, DATE.day, DATE.hour) else: outfile = './temp_%04d%02d%02d%02d.grib2' \ % (DATE.year, DATE.month, DATE.day, DATE.hour) else: # Save the grib2 file as a temporary file (that isn't removed) outfile = './temp_%04d%02d%02d%02d.grib2' \ % (DATE.year, DATE.month, DATE.day, DATE.hour) print "Hour %s out file: %s" % (DATE.hour, outfile) # URL for the grib2 idx file fileidx = 'https://api.mesowest.utah.edu/archive/HRRR/%s/%s/%04d%02d%02d/%s.t%02dz.wrf%sf%02d.grib2.idx' \ % (model_dir, field, DATE.year, DATE.month, DATE.day, model, DATE.hour, field, fxx) # URL for the grib2 file (located on PANDO S3 archive) pandofile = 'https://pando-rgw01.chpc.utah.edu/HRRR/%s/%s/%04d%02d%02d/%s.t%02dz.wrf%sf%02d.grib2' \ % (model_dir, field, DATE.year, DATE.month, DATE.day, model, DATE.hour, field, fxx) try: try: # ?? Ignore ssl certificate (else urllib2.openurl wont work). # (Depends on your version of python.) # See here: # http://stackoverflow.com/questions/19268548/python-ignore-certicate-validation-urllib2 ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE idxpage = urllib2.urlopen(fileidx, context=ctx) except: idxpage = urllib2.urlopen(fileidx) lines = idxpage.readlines() # 1) Find the byte range for the variable. Need to first find where the # variable is located. Keep a count (gcnt) so we can get the end # byte range from the next line. gcnt = 0 for g in lines: expr = re.compile(variable) if expr.search(g): print 'matched a variable', g parts = g.split(':') rangestart = parts[1] parts = lines[gcnt + next].split(':') rangeend = int(parts[1]) - 1 print 'range:', rangestart, rangeend byte_range = str(rangestart) + '-' + str(rangeend) # 2) When the byte range is discovered, use cURL to download. os.system('curl -s -o %s --range %s %s' % (outfile, byte_range, pandofile)) gcnt += 1 return_this = {'msg': np.array([])} # 3) Get data from the file grbs = pygrib.open(outfile) for i in range(1, next + 1): return_this[grbs[i]['name']], return_this['lat'], return_this[ 'lon'] = grbs[1].data() return_this['msg'] = np.append(return_this['msg'], str(grbs[i])) return_this['valid'] = grbs[1].validDate return_this['anlys'] = grbs[1].analDate # 4) Remove the temporary file if removeFile is True: os.system('rm -f %s' % (outfile)) # 5) Return some import stuff from the file return return_this except: print " ! Could not get the file:", pandofile print " ! Is the variable right?", variable print " ! Does the file exist?", fileidx return { 'value': np.nan, 'lat': np.nan, 'lon': np.nan, 'valid': np.nan, 'anlys': np.nan, 'msg': np.nan }
def powersun_check(ecfile, lon, lat, name, txtpath): namearray = os.path.split(ecfile) ecname = namearray[1] ectime = ecname[4:12] #这里是EC预报的起报时间,为世界时 starttime = datetime.datetime(int(ecname[4:8]), int(ecname[8:10]), int(ecname[10:12]), int(ecname[13:15]), 0, 0) print starttime initialtime = starttime grbs = pygrib.open(ecfile) # for grb in grbs: # print grb # grb001 = grbs.select(name='Surface solar radiation downwards') # grbarray001=grb001[0].values # grbarray002=grb001[124].values # print grbarray001,grbarray002 # print len(grb001) # 总辐射 grb001 = grbs.select(name='Surface solar radiation downwards') # totalradiationarray = grb001[0].values # print totalradiationarray # 散射辐射是计算出来的,总辐射-水平面直接辐射 # 法直辐射:总辐射和法直辐射并没有什么关系: grb003 = grbs.select(name='Direct solar radiation') # straightradiationarray=grb003[0].values # print straightradiationarray # 暂时任务该变量为水平面直接辐射 grb002 = grbs.select(name='Total sky direct solar radiation at surface') # 平均风速 grb004 = grbs.select(name='10 metre U wind component') # u10array=grb004[0].values # print u10array grb005 = grbs.select(name='10 metre V wind component') # v10array=grb005[0].values # print v10array # 空气温度 grb006 = grbs.select(name='2 metre temperature') # temperaturearray=grb006[0].values # print temperaturearray # 露点温度 grb007 = grbs.select(name='2 metre dewpoint temperature') # dewpointarray=grb007[0].values # print dewpointarray # 气压 grb008 = grbs.select(name='Surface pressure') #定义list来存储不同要素的逐小时数据 plist_totalradiation = [] plist_straightradiation = [] plist_surfacedradiation = [] # test_radiation=[] plist_ws = [] plist_wd = [] plist_t = [] plist_rh = [] plist_p = [] #用最近的一次预报来做未来三天的预测 #但是木联能取得是未来三天中第二天的预报数据做的检验 for i in range(12, 91, 1): # 总辐射 perarray001 = grb001[i].values perarray001 = numpy.array(perarray001) pvalue001 = linearForECvalue(perarray001, lon, lat) plist_totalradiation.append(pvalue001) # 法直辐射 perarray003 = grb003[i].values perarray003 = numpy.array(perarray003) pvalue003 = linearForECvalue(perarray003, lon, lat) plist_straightradiation.append(pvalue003) # 散射辐射=总辐射-水平面直接辐射 # 这里是水平面直接辐射 perarray002 = grb002[i].values pvalue002 = linearForECvalue(perarray002, lon, lat) plist_surfacedradiation.append(pvalue002) # testarray002=grb002[i].values # testvalue002=linearForECvalue(testarray002,lon,lat) # test_radiation.append(testvalue002) # print pvalue001,pvalue003,testvalue002 # csvwf.write(str(pvalue001)+','+str(pvalue003)+','+str(testvalue002)) # csvwf.write('\n') # 平均风速 # u分量 perarray004 = grb004[i].values perarray004 = numpy.array(perarray004) # v分量 perarray005 = grb005[i].values perarray005 = numpy.array(perarray005) pvalue004 = linearForECvalue(perarray004, lon, lat) pvalue005 = linearForECvalue(perarray005, lon, lat) ws = math.sqrt(pvalue004 * pvalue004 + pvalue005 * pvalue005) plist_ws.append(ws) wd = calculatwinddirect(pvalue004, pvalue005) plist_wd.append(wd) # 气温 perarray006 = grb006[i].values perarray006 = numpy.array(perarray006) t = linearForECvalue(perarray006, lon, lat) - 273.15 plist_t.append(t) # 湿度 # 根据气温和露点温度计算 # 露点温度 perarray007 = grb007[i].values perarray007 = numpy.array(perarray007) dt = linearForECvalue(perarray007, lon, lat) - 273.15 rh = 100 * math.exp((17.625 * dt) / (243.04 + dt)) / math.exp( (17.625 * t) / (243.04 + t)) plist_rh.append(rh) # 气压 perarray008 = grb008[i].values perarray008 = numpy.array(perarray008) p = linearForECvalue(perarray008, lon, lat) / 100 plist_p.append(p) print len(plist_p), plist_totalradiation #其中太阳辐射的值是累积值,把他变为瞬时值(这种说法不完全正确) #针对各个要素进行线性插值 # 总辐射 pplist_total = totaltosiple_1h(plist_totalradiation) perlist_total = calculateRadiationbyInterplote_1h(pplist_total) print pplist_total, len(pplist_total) print perlist_total, len(perlist_total) # 法直辐射 pplist_straight = totaltosiple_1h(plist_straightradiation) perlist_straight = calculateRadiationbyInterplote_1h(pplist_straight) # print pplist_straight # 散射辐射=总辐射减去水平面直接辐射的值,这里还是水平面直接辐射的插值 pplist_surface = totaltosiple_1h(plist_surfacedradiation) perlist_fdir = calculateRadiationbyInterplote_1h(pplist_surface) # 风速 perlist_ws = calculateRadiationbyInterplote_1h(plist_ws) # 风向 perlist_wd = calculateRadiationbyInterplote_1h(plist_wd) # 气温 perlist_t = calculateRadiationbyInterplote_1h(plist_t) # 相对湿度 perlist_rh = calculateRadiationbyInterplote_1h(plist_rh) # 气压 perlist_p = calculateRadiationbyInterplote_1h(plist_p) # 平滑过滤,所有的要素都平滑过滤了 perlist_total = scipy.signal.savgol_filter(perlist_total, 5, 2) perlist_straight = scipy.signal.savgol_filter(perlist_straight, 5, 2) perlist_fdir = scipy.signal.savgol_filter(perlist_fdir, 5, 2) perlist_ws = scipy.signal.savgol_filter(perlist_ws, 5, 2) perlist_wd = scipy.signal.savgol_filter(perlist_ws, 5, 2) perlist_t = scipy.signal.savgol_filter(perlist_t, 5, 2) perlist_rh = scipy.signal.savgol_filter(perlist_rh, 5, 2) perlist_p = scipy.signal.savgol_filter(perlist_p, 5, 2) # 写文件 # 首先确定文件名称 #txtpath001 = txtpath + '/' + str(year_t) + '/' + pdate_t txtpath001 = txtpath + '/' + ecname[4:8] if not os.path.exists(txtpath001): os.mkdir(txtpath001) initialtimestring = datetime.datetime.strftime(initialtime, '%Y%m%d%H') txtfile = os.path.join(txtpath001, name + initialtimestring + '.txt') print txtfile wfile = open(txtfile, 'w') L = [] linelist = [] # 定义散射辐射列表,initialtime是起报时间世界时 #但是文件的起始时间不是起报时间,文件的起始时间向后推了12个小时,转为北京时间再+8 filetime = initialtime + datetime.timedelta(hours=20) perlist_scattered = [] for i in range(len(perlist_total)): #数据为每15分钟一个 endtime = filetime + datetime.timedelta(minutes=i * 15) endtimestring = datetime.datetime.strftime(endtime, '%Y%m%d%H%M') endtimestring001 = datetime.datetime.strftime(endtime, '%Y-%m-%d %H:%M:%S') # print perlist_total[i],perlist_straight[i],perlist_scattered[i] scatteredvalue = perlist_total[i] - perlist_fdir[i] if perlist_total[i] < 0: perlist_total[i] = 0 if perlist_straight[i] < 0: perlist_straight[i] = 0 if scatteredvalue < 0: scatteredvalue = 0 perlist_scattered.append(scatteredvalue) # print len(perlist_total), len(perlist_straight), len(perlist_ws), len( # perlist_wd), len(perlist_t), len(perlist_p), len(perlist_rh) wfile.write(endtimestring + ' ' + str("%.2f" % perlist_total[i]) + ' ' + str("%.2f" % perlist_straight[i]) + ' ' + str("%.2f" % scatteredvalue) + ' ' + str("%.2f" % perlist_ws[i]) + ' ' + str("%.2f" % perlist_wd[i]) + ' ' + str("%.2f" % perlist_t[i]) + ' ' + str("%.2f" % perlist_rh[i]) + ' ' + str("%.2f" % perlist_p[i])) wfile.write('\n') # print len(perlist_total),len(perlist_straight),len(perlist_ws),len(perlist_wd),len(perlist_t),len(perlist_p),len(perlist_rh) initial_txt = datetime.datetime.strftime(initialtime, '%Y-%m-%d %H:%M:%S') L.append((name, initial_txt, endtimestring001, str(perlist_total[i]), str(scatteredvalue), str(perlist_straight[i]), str(perlist_ws[i]), str(perlist_wd[i]), str(perlist_t[i]), str(perlist_rh[i]), str(perlist_p[i]))) wfile.close() # 数据入库 # db = MySQLdb.connect('192.168.1.20', 'meteou1', '1iyHUuq3', 'moge',3345) db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge', 3307) cursor = db.cursor() sql = 'replace into powerplant_radiation(city_id,initial_time,forecast_time,total_radiation,straight_radiation,scattered_radiation,wind_speed,wind_direction,temperature,humidity,air_pressure)VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ' cursor.executemany(sql, L) db.commit() db.close() # 判断文件是否生成成功 if not os.path.exists(txtfile): return -1 # 判断入库是否完成 # db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge', # 3307) # cursor = db.cursor() # sql = 'select count(*) from powerplant_radiation where initial_time="' + initial_txt + '"' # cursor.execute(sql) # data = cursor.fetchall() # dataint = int(data[0]) # db.close() # if dataint != 289: # return -1 return 0
import numpy as np import pygrib # import pygrib interface to grib_api import pandas as pd year = 2018 grbs = pygrib.open( 'data/weather_copernicus_TMA_grib_2018/copernicus_TMA_wind_010118.grib') #with open("copernicus_TMA_wind_2018.txt", "w") as text_file: # for grb in grbs: # print("{} {} {} {} {}#\n".format(grb.typeOfLevel,grb.level,grb.name,grb.shortName,grb.parameterUnits), file=text_file) # print("{}\n".format(grb.keys()), file=text_file) #grbs.rewind() # rewind the iterator my_y1 = 59 my_y2 = 61 my_x1 = 17 my_x2 = 19 new_data = [] import time start_time = time.time() selected_grbs = np.array(grbs.select(month=1, day=1)) for grb in selected_grbs: print(grb)
def calculateStationVariable(rainvaribalelist, prevariablelist, inputfile, stationlist): if inputfile[-4:] == 'grib': grbs = pygrib.open(inputfile) #把数据矩阵都拿出来 grb = grbs.select( name='Maximum temperature at 2 metres in the last 6 hours') maxtempArray = grb[0].values grb = grbs.select( name='Minimum temperature at 2 metres in the last 6 hours') mintempArray = grb[0].values grb = grbs.select(name='2 metre temperature') tempArray = grb[0].values grb = grbs.select(name='2 metre dewpoint temperature') dewpointArray = grb[0].values grb = grbs.select(name='10 metre U wind component') u10Array = grb[0].values grb = grbs.select(name='10 metre V wind component') v10Array = grb[0].values grb = grbs.select(name='Total cloud cover') tccArray = grb[0].values grb = grbs.select(name='Low cloud cover') lccArray = grb[0].values grb = grbs.select(name='Relative humidity', level=500) rh500Array = grb[0].values grb = grbs.select(name='Relative humidity', level=850) rh850Array = grb[0].values #遍历2867个站点 csvfile = '/Users/yetao.lu/Desktop/mos/stations.csv' #csvfile = '/home/wlan_dev/stations.csv' idlist = [] fileread = open(csvfile, 'r') fileread.readline() iii = 0 while True: iii = iii + 1 line = fileread.readline() perlist = line.split(',') if len(perlist) >= 4: stationlist.append(perlist) latitude = float(perlist[1]) longitude = float(perlist[2]) idlist.append(perlist[0]) # 经纬度索引 indexlat = int((90 - latitude) / 0.1) indexlon = int((longitude + 180) / 0.1) maxlist = [] minlist = [] templist = [] vstring = [] #气温 perstationvalue(vstring, maxtempArray, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) vstring = [] perstationvalue(vstring, mintempArray, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) vstring = [] perstationvalue(vstring, tempArray, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] perstationvalue(vstring, dewpointArray, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] perstationvalue(vstring, u10Array, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] perstationvalue(vstring, v10Array, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] perstationvalue(vstring, tccArray, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] perstationvalue(vstring, lccArray, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] perstationvalue(vstring, rh500Array, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] perstationvalue(vstring, rh850Array, indexlat, indexlon) for i in range(len(vstring)): maxlist.append(vstring[i]) minlist.append(vstring[i]) templist.append(vstring[i]) vstring = [] #把站点经度纬度和高度加上 maxlist.append(perlist[1]) minlist.append(perlist[1]) maxlist.append(perlist[2]) minlist.append(perlist[2]) maxlist.append(perlist[3]) minlist.append(perlist[3]) #print perlist[1],perlist[2],perlist[3] #添加到总的矩阵中 tempvariablelist.append(templist) maxtempvariablelist.append(maxlist) mintempvariablelist.append(minlist) if not line: break
import pygrib import csv import pandas as pd import numpy as np import os grbs = pygrib.open('/home/ecmwf/D1D03160000031718001') #grbs = pygrib.open('/home/ecmwf/D1D03180000032103001') for grb in grbs: print (grb) grbz = pygrib.open('/home/ecmwf/D1E03270000041100001') #for grb in grbz: # print (grb) #for dataset in grbz: #print(dataset['parameterUnits'],dataset['timeRangeIndicator'],dataset['P1'], dataset['P2'],dataset['perturbationNumber'], dataset['parameterName']) #print(dataset['startStep'],dataset['endStep']) #data = grbz[1] #for i in data.keys(): # print(i) # print(data[i]) ''' data = pd.DataFrame(grbz[1]['latLonValues'])
def __init__(self, file_name = "file.anl"): grib_file = file_name grbs = pygrib.open(grib_file) self.lat_ar, self.lon_ar = grbs[1].latlons() self.lat_min = np.min(self.lat_ar) self.lat_max = np.max(self.lat_ar) self.lon_min = np.min(self.lon_ar) self.lon_max = np.max(self.lon_ar) self.res = self.lat_ar[1,0] - self.lat_ar[0,0] self.n_lon = len(self.lon_ar[0]) self.n_lat = len(self.lat_ar) self.levels = [] for g in grbs: level = g.level if (not level in self.levels): self.levels.append(level) self.levels = self.levels[:-1] self.n_levels = len(self.levels) self.u_ar = np.zeros((self.n_lat, self.n_lon, self.n_levels)) self.v_ar = np.zeros((self.n_lat, self.n_lon, self.n_levels)) self.T_ar = np.zeros((self.n_lat, self.n_lon, self.n_levels)) self.h_ar = np.zeros((self.n_lat, self.n_lon, self.n_levels)) self.RH_ar = np.zeros((self.n_lat, self.n_lon, self.n_levels)) self.g_ar = np.zeros((self.n_lat, self.n_lon)) grbs.seek(0) for g in grbs: data = g.values v = g.shortName l = g.level li = np.searchsorted(self.levels, l) if(v == "u"): self.u_ar[:,:,li] = data elif (v == "v"): self.v_ar[:,:,li] = data elif (v == "t"): self.T_ar[:,:,li] = data elif (v == "gh"): self.h_ar[:,:,li] = data elif (v == "r"): self.RH_ar[:,:,li] = data elif(v == "orog"): self.g_ar[:,:] = data #reverse array order in final index so last index increases with height self.u_ar = self.u_ar[:,:,::-1] self.v_ar = self.v_ar[:,:,::-1] self.T_ar = self.T_ar[:,:,::-1] self.h_ar = self.h_ar[:,:,::-1] self.RH_ar = self.RH_ar[:,:,::-1] self.levels = self.levels[::-1] #these are later defined as interpolation functions self.P_interp = 0 self.u_interp = 0 self.v_interp = 0 self.T_interp = 0 self.RH_interp = 0 self.orog_interp = 0 #most recent i,j,k bins, used to avoid recomputing interpolation functions self.i_cur = -1 self.j_cur = -1 self.k_cur = -1
def main(istate, iexp, outdir, members, seed, var='t', level=30, pert=0.1, force=False): """ Create an ensemble of perturbed IFS initial conditions. Perturbs one random mode at level=level by amount=pert. :param istate: directory containing initial state to perturb [path] :type istate: string :param iexp: experiment name of initial state to perturb :type iexp: string :param outdir: base output directory [path] :type outdir: string :param members: number of ensemble members to create :type members: int :param seed: random seed :type seed: int :param var: variable name to perturb :type var: string :param level: model level :type level: int :param pert: amount to perturb with :type pert: float :param force: Force overwriting existing perturbed conditions :type force: bool """ inFile = "ICMSH{}INIT".format(iexp) file_in = os.path.join(istate, inFile) file_in_sha256 = sha256_checksum(file_in) # Open and read messages in original initial state file istate_in = pygrib.open(file_in) messages_in = istate_in.read() # read all messages in grib file istate_in.close() # total number of modes nmodes = len(messages_in[0].values) # seed the random generator np.random.seed(seed) random.seed(seed) # pick random even modes modes = np.random.choice(range(0, nmodes, 2), members, replace=False) # define input file for idx, mode in enumerate(modes): # define new experiment name expName = expName_generator() # output directory experiment outDirExp = os.path.join(outdir, expName) # remove directory if exists and force==True if force: shutil.rmtree(outDirExp, ignore_errors=True) # create output directory os.makedirs(outDirExp) # create logger logger = open(os.path.join(outDirExp, expName + ".log"), 'w') log = {} log['input'] = [ "[Input] sha256 {}: {}\n".format(file_in, file_in_sha256) ] log['output'] = [] # randomize sign of perturbation pert = np.random.choice([-1, 1]) * pert # list of outputfiles to copy that don't need to be modified oFiles = ["ICMGG{}INIT", "ICMGG{}INIUA"] for oFile in oFiles: outputFile = oFile.format(expName) outputPath = os.path.join(outDirExp, outputFile) inputFile = oFile.format(iexp) inputPath = os.path.join(istate, inputFile) # copy original initial state directory to new experiment try: shutil.copyfile(inputPath, outputPath) # add to logging dict sha256_in = sha256_checksum(inputPath) sha256_out = sha256_checksum(outputPath) log['input'].append("[Input] sha256 {}: {}\n".format( inputPath, sha256_in)) log['output'].append("[Output] sha256 {}: {}\n".format( outputPath, sha256_out)) except IOError: pass # silently fail # create a new grib file in which perturbed initial conditions # will be saved outFile = "ICMSH{}INIT".format(expName) fileOut = os.path.join(outDirExp, outFile) grbOut = open(fileOut, 'wb') # perturb for msg in messages_in: if (msg['shortName'] == var and msg['level'] == level): t = msg['values'] t[mode] = t[mode] + pert msg['values'] = t else: pass # write perturbed gribfile grbOut.write(msg.tostring()) # Closing the perturbed gribfile grbOut.close() sha256_out = sha256_checksum(fileOut) # add to logging dict log['output'].insert( 0, "[Output] sha256 {}: {}\n".format(fileOut, sha256_out)) # write log [logger.write(msg) for msg in log['input']] logger.write("\n") [logger.write(msg) for msg in log['output']] # test if perturbation was a success if testPerturbation(file_in, fileOut, var, level, mode): msg = (("\nPerturbed ensemble {}, variable {}, mode {}, " + "level {}, by amount {}. ").format(expName, var, mode, level, pert)) print(msg) logger.write(msg) msg = (("Random seed {} was used to create the perturbed " + "initial state.\n").format(seed)) logger.write(msg) logger.close() else: msg = (("Perturbation failed: input file {} and output file {}" + " are the same.\n").format(file_in, fileOut)) logger.write("\n {}".format(msg)) logger.close() raise IOError(msg)
def read_variable(main_folder=main_folder, file_prefix=file_prefix, run=run, variable=variable, level_type=level_type, date_method=False): """Read and concatenate variable from a list of files which is created here, if parameters are not provided then the deafults (here up top) will be used. Since data every 15 minutes are too much to be processed we first split data every 15 minutes with CDO and read data only every hour here. The date_method tries to read all the data and split according to dates but takes too much time.""" if date_method: files = sorted( glob(main_folder + file_prefix + '_' + level_type + '_' + run + '*' + variable + '.grib2')) dates = [] temps = [] for file in files: grbs = pygrib.open(file) for grb in grbs: dates.append("%d %s" % (grb['forecastTime'], grb.fcstimeunits)) if grb.parameterName == variables_names[ variable] and grb.level == variables_levels[variable]: temps.append(grb.values) dates = np.array(dates) u, ind = np.unique(dates, return_index=True) dates_unique = u[np.argsort(ind)] time = pd.date_range(start=grb.analDate, freq='15min', periods=dates_unique.shape[0]) var_ens = np.empty(shape=(0, number_ensembles, nlat, nlon), dtype=float) for time_ind in dates_unique: var_ens = np.append(var_ens, [temps[dates == time_ind]], axis=0) else: # New method which should be faster, for now uses the one from icon_eps which # assumes one time step per file files = sorted( glob(main_folder + file_prefix + '_' + level_type + '_' + run + '*_00_' + variable + '.grib2')) for file in files: temps = [] grbs = pygrib.open(file) for grb in grbs: if grb.parameterName == variables_names[ variable] and grb.level == variables_levels[variable]: temps.append(grb.values) if file == files[0]: # This is the first file we read, so... #...create the variable var_ens = np.empty(shape=(0, number_ensembles, nlat, nlon), dtype=float) var_ens = np.append(var_ens, [temps], axis=0) return ( var_ens ) # This gives back an array with [time, number_ensembles, number_cells]
import pygrib import matplotlib.pyplot as plt import numpy as np from netCDF4 import Dataset #opening .grib file grib_file = 'gfs.t12z.pgrb2.1p00.f000' grbs = pygrib.open(grib_file) #select U-V 10m components, lat e lon U_grd10m = grbs.select(name='10 metre U wind component')[0] V_grd10m = grbs.select(name='10 metre V wind component')[0] lat_f, lon_f = U_grd10m.latlons() lat_f = lat_f[::-1] #inverse lat lon_f = lon_f[::-1] #inverse lon #extracting U-V components values U_grd10m_field = U_grd10m.values V_grd10m_field = V_grd10m.values U_grd10m_field = U_grd10m_field[::-1] # inverse U array V_grd10m_field = V_grd10m_field[::-1] # inverse V rray # junta variaveis em 3D U_component = np.stack((U_grd10m_field, V_grd10m_field), axis=0) # criando arquivo netcdf dataset = Dataset('wnd_gfs.nc', 'w', format='NETCDF4_CLASSIC') # criando dimensoes lat = dataset.createDimension('lat', 181) lon = dataset.createDimension('lon', 360)
rmsnhall = [] rmsshall = [] rmstrall = [] rmsglall = [] acnhall = [] acshall = [] actrall = [] acglall = [] bias = None ntime = None for date in dates: datev = dateutils.dateshift(date, fhour) # read analysis filea = os.path.join(analpath, 'pgbanl.ecm.%s' % datev) grbs = pygrib.open(filea) grb = grbs.select(shortName=vargrb, level=level)[0] verif_data = grb.values[::-1, :] grbs.close() #print verif_data.shape, verif_data.min(), verif_data.max() # read climo grbsclimo = pygrib.open( os.path.join(climopath, 'cmean_1d.1959%s' % datev[4:8])) yyyy, mm, dd, hh = dateutils.splitdate(datev) grbclimo = grbsclimo.select(shortName=vargrb, level=level, dataTime=100 * hh)[0] climo_data = grbclimo.values[::-1, :] grbsclimo.close() #print climo_data.shape, climo_data.min(), climo_data.max() # read forecast data from tiled history files.
def run(ts): """Process data for this timestamp""" pgconn = get_dbconn('coop') cursor = pgconn.cursor() cursor2 = pgconn.cursor() total = None xaxis = None yaxis = None for hr in range(5, 23): # Only need 5 AM to 10 PM for solar utc = ts.replace(hour=hr).astimezone(pytz.utc) fn = utc.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/%H/" "hrrr.t%Hz.3kmf00.grib2")) if not os.path.isfile(fn): continue grbs = pygrib.open(fn) try: if utc >= SWITCH_DATE: grb = grbs.select(name='Downward short-wave radiation flux') else: grb = grbs.select(parameterNumber=192) except ValueError: if utc.hour != 3: print('coop/hrrr_solarrad.py %s had no solar rad' % (fn, )) continue if len(grb) == 0: print('Could not find SWDOWN in HRR %s' % (fn, )) continue g = grb[0] if total is None: total = g.values lat1 = g['latitudeOfFirstGridPointInDegrees'] lon1 = g['longitudeOfFirstGridPointInDegrees'] llcrnrx, llcrnry = LCC(lon1, lat1) nx = g['Nx'] ny = g['Ny'] dx = g['DxInMetres'] dy = g['DyInMetres'] xaxis = llcrnrx + dx * np.arange(nx) yaxis = llcrnry + dy * np.arange(ny) else: total += g.values if total is None: print(('coop/hrrr_solarrad.py found no HRRR data for %s') % (ts.strftime("%d %b %Y"), )) return # Total is the sum of the hourly values # We want MJ day-1 m-2 total = (total * 3600.0) / 1000000.0 cursor.execute( """ SELECT station, ST_x(geom), ST_y(geom) from alldata a JOIN stations t on (a.station = t.id) where day = %s and network ~* 'CLIMATE' """, (ts.strftime("%Y-%m-%d"), )) for row in cursor: (x, y) = LCC(row[1], row[2]) i = np.digitize([x], xaxis)[0] j = np.digitize([y], yaxis)[0] rad_mj = float(total[j, i]) if rad_mj < 0: print('WHOA! Negative RAD: %.2f, station: %s' % (rad_mj, row[0])) continue cursor2.execute( """ UPDATE alldata_""" + row[0][:2] + """ SET hrrr_srad = %s WHERE day = %s and station = %s """, (rad_mj, ts.strftime("%Y-%m-%d"), row[0])) cursor.close() cursor2.close() pgconn.commit() pgconn.close()
from datetime import datetime from functions import wind_calcs, custom_domains # Open a file date = '20150618' hour = '23' subdomain = 'no' # Model level pressure in milibars mb = 500 DIR = '/uufs/chpc.utah.edu/common/home/horel-group/archive/'+date+'/models/hrrr/' FILE = 'hrrr.t'+hour+'z.wrfprsf00.grib2' grbs = pygrib.open(DIR+FILE) # Grab geopotential height at 500 mb (just had to figure out that it's in the 18th index) grb = grbs.select(name='Geopotential Height',level=mb)[0] # for general info Z = grb.values Zunits = grb.units Zname = grb.name Ugrb = grbs.select(name='U component of wind',level=mb)[0] U = Ugrb.values Uunits = Ugrb.units
import sys import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import pygrib import numpy as np from pyiem.datatypes import temperature grbs = pygrib.open('rtma.t23z.awp2p5f000.grib2') for grb in grbs: print grb.name # print np.shape( grb.values ) # print grb.projparams g = grbs.select(name='2 metre temperature')[0] print dir(g), g['gridType'] print g['latitudeOfFirstGridPointInDegrees'] print g['longitudeOfFirstGridPointInDegrees'] print g['Nx'] print g['Ny'] print g['DxInMetres'] print g['DyInMetres'] lats, lons = g.latlons() data = temperature(g['values'], 'K').value('F') llcrnrlon = lons[0, 0] llcrnrlat = lats[0, 0] urcrnrlon = lons[-1, -1] urcrnrlat = lats[-1, -1] rsphere = (g.projparams['a'], g.projparams['b']) lat_1 = g.projparams['lat_1'] lat_2 = g.projparams['lat_2'] lon_0 = g.projparams['lon_0']
def get_hrrr_variable(DATE, variable, fxx=0, model='hrrr', field='sfc', removeFile=True, value_only=False, verbose=True, outDIR='./'): """ Uses cURL to grab just one variable from a HRRR grib2 file on the MesoWest HRRR archive. Input: DATE - the datetime(year, month, day, hour) for the HRRR file you want This must be in UTC, obviouslly. variable - a string describing the variable you are looking for. Refer to the .idx files here: https://api.mesowest.utah.edu/archive/HRRR/ You want to put the variable short name and the level information For example, for 2m temperature: 'TMP:2 m above ground' fxx - the forecast hour you desire. Default is the anlaysis hour. model - the model you want. Options include ['hrrr', 'hrrrX', 'hrrrak'] field - the file type your variable is in. Options include ['sfc', 'prs'] removeFile - True will remove the grib2 file after downloaded. False will not. value_only - Only return the values. Fastest return speed if set to True, when all you need is the value. Return Time .75-1 Second if False, .2 seconds if True. verbose - prints some stuff out """ # Temp file name has to be very unique, else when we use multiprocessing we # might accidentally delete files before we are done with them. outfile = '%stemp_%04d%02d%02d%02d_f%02d_%s.grib2' % ( outDIR, DATE.year, DATE.month, DATE.day, DATE.hour, fxx, variable[:3]) if verbose is True: print outfile # Dear User, # Only HRRR files for the previous day have been transferred to Pando. # That means if you are requesting data for today, you need to get it from # the NOMADS website. Good news, it's an easy fix. All we need to do is # redirect you to the NOMADS URLs. I'll check that the date you are # requesting is not for today's date. If it is, then I'll send you to # NOMADS. Deal? :) # -Sincerely, Brian # if DATE + timedelta(hours=fxx) < datetime.utcnow(): # Get HRRR from Pando if verbose is True: print "Oh, good, you requested a date that should be on Pando." pandofile = 'https://pando-rgw01.chpc.utah.edu/%s/%s/%s/%s.t%02dz.wrf%sf%02d.grib2' \ % (model, field, DATE.strftime('%Y%m%d'), model, DATE.hour, field, fxx) fileidx = pandofile + '.idx' else: # Get operational HRRR from NOMADS if model == 'hrrr': if verbose is True: print "\n-----------------------------------------------------------------------" print "!! Hey! You are requesting a date that is not on the Pando archive !!" print "!! That's ok, I'll redirect you to the NOMADS server. :) !!" print "-----------------------------------------------------------------------\n" # URL for the grib2 file (located on NOMADS server) pandofile = 'http://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.%s/%s.t%02dz.wrf%sf%02d.grib2' \ % (DATE.strftime('%Y%m%d'), model, DATE.hour, field, fxx) fileidx = pandofile + '.idx' # or, get experiemtnal HRRR from ESRL elif model == 'hrrrX': print "\n-----------------------------------------------------------------------" print "!! I haven't download that Experimental HRRR run from ESRL yet !!" print "-----------------------------------------------------------------------\n" return None try: # 0) Read in the grib2.idx file try: # ?? Ignore ssl certificate (else urllib2.openurl wont work). # Depends on your version of python. # See here: # http://stackoverflow.com/questions/19268548/python-ignore-certicate-validation-urllib2 ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE idxpage = urllib2.urlopen(fileidx, context=ctx) except: idxpage = urllib2.urlopen(fileidx) lines = idxpage.readlines() # 1) Find the byte range for the variable. Need to first find where the # variable is located. Keep a count (gcnt) so we can get the end # byte range from the next line. gcnt = 0 for g in lines: expr = re.compile(variable) if expr.search(g): if verbose is True: print 'matched a variable', g parts = g.split(':') rangestart = parts[1] parts = lines[gcnt + 1].split(':') rangeend = int(parts[1]) - 1 if verbose is True: print 'range:', rangestart, rangeend byte_range = str(rangestart) + '-' + str(rangeend) # 2) When the byte range is discovered, use cURL to download. os.system('curl -s -o %s --range %s %s' % (outfile, byte_range, pandofile)) gcnt += 1 # 3) Get data from the file, using pygrib grbs = pygrib.open(outfile) if value_only is True: value = grbs[1].values # (Remove the temporary file) # ?? Is it possible to push the data straight from curl to ?? # ?? pygrib, without writing/removing a temp file? and ?? # ?? would that speed up this process? ?? if removeFile is True: os.system('rm -f %s' % (outfile)) return {'value': value} else: value, lat, lon = grbs[1].data() validDATE = grbs[1].validDate anlysDATE = grbs[1].analDate msg = str(grbs[1]) # 4) Remove the temporary file if removeFile == True: os.system('rm -f %s' % (outfile)) # 5) Return some import stuff from the file return { 'value': value, 'lat': lat, 'lon': lon, 'valid': validDATE, 'anlys': anlysDATE, 'msg': msg } except: print " _______________________________________________________________" print " !! Run Date Requested :", DATE, "F%02d" % fxx print " !! Valid Date Requested :", DATE + timedelta(hours=fxx) print " !! Current UTC time :", datetime.utcnow() print " !! ------------------------------------------------------------" print " !! ERROR downloading from:", pandofile print " !! Is the variable right?", variable print " !! Does the .idx file exist?", fileidx print " ---------------------------------------------------------------" return { 'value': np.nan, 'lat': np.nan, 'lon': np.nan, 'valid': np.nan, 'anlys': np.nan, 'msg': np.nan }
def doday(ts, realtime): """ Create a plot of precipitation stage4 estimates for some day We should total files from 1 AM to midnight local time """ sts = ts.replace(hour=1) ets = sts + datetime.timedelta(hours=24) interval = datetime.timedelta(hours=1) now = sts total = None lts = None while now < ets: gmt = now.astimezone(pytz.utc) fn = gmt.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/" "stage4/ST4.%Y%m%d%H.01h.grib")) if os.path.isfile(fn): lts = now grbs = pygrib.open(fn) if total is None: total = grbs[1]["values"] lats, lons = grbs[1].latlons() else: total += grbs[1]["values"] grbs.close() now += interval if lts is None: if ts.hour > 1: print( ("stage4_today_total.py found no data for date: %s") % (ts, )) return lts = lts - datetime.timedelta(minutes=1) subtitle = "Total between 12:00 AM and %s" % ( lts.strftime("%I:%M %p %Z"), ) routes = "ac" if not realtime: routes = "a" for sector in ["iowa", "midwest", "conus"]: pqstr = ("plot %s %s00 %s_stage4_1d.png %s_stage4_1d.png png") % ( routes, ts.strftime("%Y%m%d%H"), sector, sector, ) mp = MapPlot( sector=sector, title=("%s NCEP Stage IV Today's Precipitation") % (ts.strftime("%-d %b %Y"), ), subtitle=subtitle, ) clevs = [0.01, 0.1, 0.3, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10] mp.pcolormesh( lons, lats, distance(total, "MM").value("IN"), clevs, cmap=nwsprecip(), units="inch", ) # map.drawstates(zorder=2) if sector == "iowa": mp.drawcounties() mp.postprocess(pqstr=pqstr) mp.close()
fhr = int(sys.argv[2]) fhour = str(fhr).zfill(2) print('fhour ' + fhour) itime = ymdh vtime = ndate(itime, int(fhr)) GRIB_FILE = str(sys.argv[3]) CARTOPY_DIR = str(sys.argv[4]) domain = str(sys.argv[5]) # Specify plotting domains domains = [domain] # Open the input file, if it exists if os.path.exists(GRIB_FILE): data1 = pygrib.open(GRIB_FILE) else: sys.exit('Error - input file does not exist (' + GRIB_FILE + '). Exit!') # Get the lats and lons grids = [data1] lats = [] lons = [] lats_shift = [] lons_shift = [] for data in grids: # Unshifted grid for contours and wind barbs lat, lon = data[1].latlons() lats.append(lat) lons.append(lon)
def do(now, realtime=False): """ Generate for this timestep! """ szx = 7000 szy = 3500 # Create the image data imgdata = np.zeros((szy, szx), 'u1') metadata = { 'start_valid': now.strftime("%Y-%m-%dT%H:%M:%SZ"), 'end_valid': now.strftime("%Y-%m-%dT%H:%M:%SZ"), 'product': 'lcref', 'units': '0.5 dBZ' } gribfn = mrms.fetch('SeamlessHSR', now) if gribfn is None: print(("mrms_lcref_comp.py NODATA for SeamlessHSR: %s") % (now.strftime("%Y-%m-%dT%H:%MZ"), )) return fp = gzip.GzipFile(gribfn, 'rb') (_, tmpfn) = tempfile.mkstemp() tmpfp = open(tmpfn, 'wb') tmpfp.write(fp.read()) tmpfp.close() grbs = pygrib.open(tmpfn) grb = grbs[1] os.unlink(tmpfn) os.unlink(gribfn) val = grb['values'] # -999 is no coverage, go to 0 # -99 is missing , go to 255 val = np.where(val >= -32, (val + 32) * 2.0, val) # val = np.where(val < -990., 0., val) # val = np.where(val < -90., 255., val) # This is an upstream BUG val = np.where(val < 0., 0., val) imgdata[:, :] = np.flipud(val.astype('int')) (tmpfp, tmpfn) = tempfile.mkstemp() # Create Image png = Image.fromarray(np.flipud(imgdata)) png.putpalette(make_colorramp()) png.save('%s.png' % (tmpfn, )) mrms.write_worldfile('%s.wld' % (tmpfn, )) # Inject WLD file prefix = 'lcref' pqstr = ("/home/ldm/bin/pqinsert -i -p 'plot ac %s " "gis/images/4326/mrms/%s.wld GIS/mrms/%s_%s.wld wld' %s.wld") % ( now.strftime("%Y%m%d%H%M"), prefix, prefix, now.strftime("%Y%m%d%H%M"), tmpfn) subprocess.call(pqstr, shell=True) # Now we inject into LDM pqstr = ("/home/ldm/bin/pqinsert -i -p 'plot ac %s " "gis/images/4326/mrms/%s.png GIS/mrms/%s_%s.png png' %s.png") % ( now.strftime("%Y%m%d%H%M"), prefix, prefix, now.strftime("%Y%m%d%H%M"), tmpfn) subprocess.call(pqstr, shell=True) # Create 900913 image cmd = ("gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff " "-tr 1000.0 1000.0 %s.png %s.tif") % (tmpfn, tmpfn) subprocess.call(cmd, shell=True) # Insert into LDM pqstr = ("/home/ldm/bin/pqinsert -i -p 'plot c %s " "gis/images/900913/mrms/%s.tif GIS/mrms/%s_%s.tif tif' %s.tif" ) % (now.strftime("%Y%m%d%H%M"), prefix, prefix, now.strftime("%Y%m%d%H%M"), tmpfn) subprocess.call(pqstr, shell=True) j = open("%s.json" % (tmpfn, ), 'w') j.write(json.dumps(dict(meta=metadata))) j.close() # Insert into LDM pqstr = ("/home/ldm/bin/pqinsert -p 'plot c %s " "gis/images/4326/mrms/%s.json GIS/mrms/%s_%s.json json' %s.json" ) % (now.strftime("%Y%m%d%H%M"), prefix, prefix, now.strftime("%Y%m%d%H%M"), tmpfn) subprocess.call(pqstr, shell=True) for suffix in ['tif', 'json', 'png', 'wld']: os.unlink('%s.%s' % (tmpfn, suffix)) os.close(tmpfp) os.unlink(tmpfn)
fhr = int(sys.argv[2]) fhour = str(fhr).zfill(2) print('fhour ' + fhour) itime = ymdh vtime = ndate(itime, int(fhr)) EXPT_BASEDIR = str(sys.argv[3]) CARTOPY_DIR = str(sys.argv[4]) domain = str(sys.argv[5]) # Specify plotting domains domains = [domain] for dom in domains: # Define the location of the input file data1 = pygrib.open(EXPT_BASEDIR + '/wrfprs_' + dom + '.' + fhour) # Get the lats and lons grids = [data1] lats = [] lons = [] lats_shift = [] lons_shift = [] for data in grids: # Unshifted grid for contours and wind barbs lat, lon = data[1].latlons() lats.append(lat) lons.append(lon) # Shift grid for pcolormesh
TABLA_VARIABLES = EXTRACT_VARIABLES_FROM_GRIB(NAM12_FILE_NAME, CUSTOM_WORD) #LA MANERA DE FILTRAR LAS VARIABLES HAY QUE AFINARLO TABLA_WIND= TABLA_VARIABLES[TABLA_VARIABLES.NAME.isin(set([item for item in list(TABLA_VARIABLES['NAME']) if 'wind' in item.lower()]))] TABLA_WIND= TABLA_WIND[TABLA_WIND['LEVEL_Pa'].isin(['level 80 m', 'level 10 m'])] ''' SELECCIONAMOS UNA VARIABLE... LA PRIMERA PERO PUEDE SER CUALQUIERA PARA SACAR LONGITUD Y LATITUD Y PODER SACAR LAS TABLAS DE 50 LOCALIZACIONES MAS CERCANAS ''' NAM12_GRIB= pygrib.open(NAM12_FILE_NAME) ''' SELECCIONAMOS CUALQUIER VARIABLE... DE ESTE MODO, PRIMERO SACAMOS POR NOMBRE Y TYPO Y LUEGO POR LEVEL... NO PUEDO EXTRAER DIRECTAMENTE AÑADIENDO EL LEVEL DA ERROR ValueError: no matches found ''' VARIABLE = NAM12_GRIB.select(name= TABLA_WIND['NAME'].iloc[1], typeOfLevel=TABLA_WIND['TYPE_VAR'].iloc[1]) VARIABLE = [item for item in VARIABLE if str(item).split(':')[-3]==TABLA_WIND['LEVEL_Pa'].iloc[1]][0]
num = int(((end_fhr - start_fhr) / increment_fhr) + 1) fhours = np.linspace(start_fhr, end_fhr, num, dtype='int') print(fhours) EXPT_DIR = str(sys.argv[5]) CARTOPY_DIR = str(sys.argv[6]) # Loop over forecast hours for fhr in fhours: fhour = str(fhr).zfill(3) print('Working on forecast hour ' + fhour) itime = ymdh vtime = ndate(itime, int(fhr)) # Define the location of the input file data1 = pygrib.open(EXPT_DIR + '/' + ymdh + '/postprd/rrfs.t' + cyc + 'z.bgdawpf' + fhour + '.tm00.grib2') # Get the lats and lons grids = [data1] lats = [] lons = [] lats_shift = [] lons_shift = [] for data in grids: # Unshifted grid for contours and wind barbs lat, lon = data[1].latlons() lats.append(lat) lons.append(lon) # Shift grid for pcolormesh
import pygrib import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap grbs = pygrib.open('../sampledata/flux.grb') grb = grbs.message(2) lats, lons = grb.latlons() data = grb['values'] m = Basemap(lon_0=180) #m.scatter(lons.flat,lats.flat,1,marker='o',color='k',zorder=10) x, y = m(lons, lats) m.drawcoastlines() m.contourf(x, y, data, 15) #m.fillcontinents() plt.title('Global Gaussian Grid') plt.show()
ndfd_bbox = [] ndfd_bbox.append(region_bbox[0] - lon_offset) ndfd_bbox.append(region_bbox[1] - lat_offset) ndfd_bbox.append(region_bbox[2] + lon_offset) ndfd_bbox.append(region_bbox[3] + lat_offset) print 'NDFD bounding box', ndfd_bbox # conus region needs the entire forecast area else: ndfd_bbox = None grib_filepath = factory.forecastGribFilepath(ndfd_config, fcast_date, options.timespan, grib_variable) print '\nreading gribs from', grib_filepath gribs = pygrib.open(grib_filepath) grib = gribs.select(name=VARNAME_MAP[grib_variable]) print '\n', grib print '\n grib %d :' % message_num, grib_variable message = grib[message_num] print ' "dataDate"', message.dataDate fcast_hour = message['hour'] print '\n "forecast hour"', fcast_hour print ' "forecastTime"', message['forecastTime'] fcast_time = asDatetime(message.dataDate) + relativedelta(hours=fcast_hour) print ' forecast datetime', fcast_time ndfd_lats, ndfd_lons = message.latlons() print '\nfrom grib file :' print ' grib lat', ndfd_lats.shape, ndfd_lats.min(), ndfd_lats.max() lat_diff = ndfd_lats[1:, :] - ndfd_lats[:-1, :] print ' min lat diff', lat_diff.min()
def fetch_wwiii(var, kwargs): """ download wwiii data and return associated filepaths args: var: string the variable name of desired parameter according to WWIII docs the complete list of variables can be found at the following URL under 'model output' https://polar.ncep.noaa.gov/waves/implementations.php south, north: float ymin, ymax coordinate boundaries (latitude). range: -90, 90 west, east: float xmin, xmax coordinate boundaries (longitude). range: -180, 180 start: datetime the start of the desired time range end: datetime the end of the desired time range return: True if new data was fetched, else False """ assert 6 == sum( map(lambda kw: kw in kwargs.keys(), ['south', 'north', 'west', 'east', 'start', 'end' ])), 'malformed query' t = datetime(kwargs['start'].year, kwargs['start'].month, 1) assert kwargs['end'] - kwargs['start'] <= timedelta(days=1), \ 'use fetch_handler for this' if serialized(kwargs, f'fetch_wwiii_{wwiii_varmap[var]}'): return False #print("WWIII NOTICE: resolution selection not implemented yet. defaulting to 0.5°") regions = ['glo_30m'] assert regions == ['glo_30m'], 'invalid region string' reg = regions[0] fname = f"multi_1.{reg}.{var}.{t.strftime('%Y%m')}.grb2" fetchfile = f"{storage_cfg()}{fname}" # if file hasnt been downloaded, fetch it if not isfile(fetchfile): # and kwargs['start'].day == 1: if 'lock' in kwargs.keys(): kwargs['lock'].acquire() logging.info(f'WWIII {kwargs["start"].date().isoformat()} {var}: ' f'downloading {fname} from NOAA WaveWatch III...') if reg == 'glo_30m' and not 'wind' in var and t.year >= 2018: fetchurl = f"{wwiii_src}{t.strftime('%Y/%m')}/gribs/{fname}" else: fetchurl = f"{wwiii_src}{t.strftime('%Y/%m')}/{reg}/{fname}" with requests.get(fetchurl, stream=True) as payload: assert payload.status_code == 200, 'couldn\'t retrieve file' with open(fetchfile, 'wb') as f: shutil.copyfileobj(payload.raw, f) if 'lock' in kwargs.keys(): kwargs['lock'].release() # function to insert the parsed data to local database def insert(table, agg, null, kwargs): if 'lock' in kwargs.keys(): kwargs['lock'].acquire() n1 = db.execute(f"SELECT COUNT(*) FROM {table}").fetchall()[0][0] db.executemany( f"INSERT OR IGNORE INTO {table} VALUES (?,?,?,CAST(? AS INT),?)", agg.T) n2 = db.execute(f"SELECT COUNT(*) FROM {table}").fetchall()[0][0] db.execute("COMMIT") conn.commit() insert_hash(kwargs, f'fetch_wwiii_{wwiii_varmap[var]}') if 'lock' in kwargs.keys(): kwargs['lock'].release() logging.info( f"WWIII {kwargs['start'].date().isoformat()} {table}: " f"processed and inserted {n2-n1} rows for region {fmt_coords(kwargs)}. " f"{null} null values removed, " f"{len(agg[0]) - (n2-n1)} duplicates ignored") # open the file, parse data, insert values grib = pygrib.open(fetchfile) assert grib.messages > 0, f'problem opening {fetchfile}' null = 0 agg = np.array([[], [], [], [], []]) grbvar = grib[1]['name'] table = f'{var}{grbvar[0]}' if var == 'wind' else var for msg, num in zip(grib, range(1, grib.messages)): if msg['name'] != grbvar: insert(table, agg, null, kwargs) table = f'{var}{msg["name"][0]}' if var == 'wind' else var agg = np.array([[], [], [], [], []]) grbvar = msg['name'] null = 0 if msg.validDate < kwargs['start']: continue if msg.validDate > kwargs['end']: continue z, y, x = msg.data() src = np.array(['wwiii' for each in z[~z.mask].data]) grid = np.vstack( (z[~z.mask].data, y[~z.mask], ((x[~z.mask] + 180) % 360) - 180, dt_2_epoch([msg.validDate for each in z[~z.mask].data]), src)).astype(object) agg = np.hstack((agg, grid)) null += sum(sum(z.mask)) insert(table, agg, null, kwargs) return True
def EXTRACT_INFO_NAM12_TO_CSV(NAM12_FILE_NAME, SELECTED_VARIABLES, NEAREST_POINTS, NAME, SEGUNDA_LOCALIZACION, NEAREST_POINTS2, NAME2): ''' FUNCION PARA EXTRAER LA INFORMACION PARA LAS VARIABLES REQUERIDAS ESTAS VARIABLES SE PUEDEN SACAR EMPLEANDO LA FUNCION EXTRACT_VARIABLES_FROM_GRIB. CUT DATA ES UN BOOLEANO. IF TRUE SE CORTA LA INFORMACION DE ACUERDO A NEAREST POINTS ''' NAM12_GRIB= pygrib.open(NAM12_FILE_NAME) NOMBRE_ARCHIVO = NAM12_FILE_NAME.replace(NAM12_FILE_NAME.split('/')[-1], '') + NAM12_FILE_NAME.split('/')[-1] +'_' + NAME+ '_CONVERTED.csv' NOMBRE_ARCHIVO2 = NAM12_FILE_NAME.replace(NAM12_FILE_NAME.split('/')[-1], '') + NAM12_FILE_NAME.split('/')[-1] +'_' + NAME2+ '_CONVERTED.csv' TABLA_TOTAL= pd.DataFrame() TABLA_TOTAL2= pd.DataFrame() for i in range(SELECTED_VARIABLES.shape[0]): VAR_LEVEL= SELECTED_VARIABLES['LEVEL_Pa'].iloc[i] VAR_NAME= SELECTED_VARIABLES['NAME'].iloc[i] EXE = 1 if os.path.isfile(NOMBRE_ARCHIVO): print(NOMBRE_ARCHIVO + ' YA EXISTE') EXE=0 if os.stat(NOMBRE_ARCHIVO).st_size == 0: print(NOMBRE_ARCHIVO + ' PERO ESTA VACIO') EXE=1 if EXE==1: try: VARIABLE = NAM12_GRIB.select(name= VAR_NAME) VARIABLE_ITEM = [item for item in VARIABLE if str(item).split(':')[-3]==VAR_LEVEL] ''' SACAMOS ARRAY DE VALORES, LONGITUD, LATITUD Y DATE AUNQUE HAY MUCHAS MAS VARIABLES CONTENIDAS EN EL FICHERO. TODAS ESTAS VARIABLES SE PUEDEN VER EJECUTANDO X.keys() ''' VARIABLE_VALUES = VARIABLE_ITEM[0].values LATS , LONS = VARIABLE_ITEM[0].latlons() DATE = VARIABLE_ITEM[0].validDate #print('CREANDO TABLA') dfObj = pd.DataFrame() dfObj['VALUES']= VARIABLE_VALUES.ravel() dfObj['LON']= LONS.ravel() dfObj['LAT']= LATS.ravel() dfObj['DATE'] = DATE TABLA_CUT = dfObj[(dfObj['LON'].isin(NEAREST_POINTS['LON'])) & (dfObj['LAT'].isin(NEAREST_POINTS['LAT']))] TABLA_CUT['VAR_NAME']= str(VARIABLE_ITEM[0]).split(':')[1] TABLA_CUT['LEVEL']= str(VARIABLE_ITEM[0]).split(':')[-3] TABLA_CUT['FCST_TIME']= str(VARIABLE_ITEM[0]).split(':')[-2] TABLA_TOTAL = TABLA_TOTAL.append(TABLA_CUT) if SEGUNDA_LOCALIZACION: TABLA_CUT2 = dfObj[(dfObj['LON'].isin(NEAREST_POINTS2['LON'])) & (dfObj['LAT'].isin(NEAREST_POINTS2['LAT']))] TABLA_CUT2['VAR_NAME']= str(VARIABLE_ITEM[0]).split(':')[1] TABLA_CUT2['LEVEL']= str(VARIABLE_ITEM[0]).split(':')[-3] TABLA_CUT2['FCST_TIME']= str(VARIABLE_ITEM[0]).split(':')[-2] TABLA_TOTAL2 = TABLA_TOTAL.append(TABLA_CUT2) except: print(NAM12_FILE_NAME + ' NO ENCONTRADO ' + VAR_NAME) if not TABLA_TOTAL.empty: print('GUARDANDO ' + NOMBRE_ARCHIVO) TABLA_TOTAL.to_csv(NOMBRE_ARCHIVO) if not TABLA_TOTAL2.empty: print('GUARDANDO ' + NOMBRE_ARCHIVO2) TABLA_TOTAL2.to_csv(NOMBRE_ARCHIVO2) return(NAM12_FILE_NAME)
return istat, grb except (IOError, ValueError, RuntimeError): print 'Error reading ', gribfilename istat = -1 return istat, grb # ===================================================================================== center = sys.argv[1] # ECMWF, NCEP, or CMC currently (all ensembles) cleade = sys.argv[2] # use 24 for 24 h end time, not 024 # ---- read in sample forecast in order to define the forecast lat/lon array infilename = '/Users/thamill/precip/ecmwf_data/ECMWF/ECMWF_2015120100_fhour12_pertno1.grb' grib = pygrib.open(infilename) grb2 = grib.select()[0] latsf, lonsf = grb2.latlons() lonsf = lonsf - 360. lonsf_1d = lonsf[0, :] latsf_1d = latsf[:, 0] latsf_1d = np.flip(latsf_1d, 0) nyf, nxf = np.shape(latsf) grib.close() # ---- define a list of dates to read in, set number of members to read in date_begin = '2015120100' date_end = '2016010100' date_list = daterange(date_begin, date_end, 24) ndates = len(date_list)
#!/usr/bin/env python3 import pvlib import numpy as np import matplotlib.pyplot as plt dirname = "/Volumes/Samsung_T5/ldaps/201901/" grb.data dddddddd #!/usr/bin/env python3 import os import pygrib import copy #%% import os import pygrib import pandas as pd import numpy as np import matplotlib.pyplot as plt pv = np.array([*range(0, 136)]) pv_in = np.array([0, 3, 7, 14, 15, 20, 25, 26, 28, 111, 112, 113, 114]) #pv_use = pv in pv_in dirname = "/Volumes/Samsung_T5/ldaps/201901" def image_save(grbs, filename): image_dir = os.path.join( "/Users/ebrain/Desktop/grib_api-1.28.0-Source/image", filename) if not os.path.isdir(image_dir): os.makedirs(image_dir) cnt = 1 for grb in grbs: val = grb.data()[0]
outfile = open('entries.csv', 'w') #This is what the recorder script will read csvw = csv.writer(outfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_NONE) d1 = dt.date(2007, 4, 13) #start date of 13km RUC d2 = dt.date(2012, 4, 30) #end date of 13km RUC events = np.zeros(3) #create a numpy array with list of events for row in csvr: vec = np.array([float(x) for x in row]) events = np.vstack((events, vec)) events = events[1:] #chop off that row of zeros grbs = pg.open( '../ruc2.t00z.pgrb13anl.grib2') #get a reference for coordinates lats, lons = grbs[1].latlons() grbs.close() x = d1 while x <= d2: xstr = str(x).replace('-', '') date = float(xstr) if date in events: dateInd = np.where(events == date) for k in range(len(dateInd[0])): lat = events[dateInd[0][k]][0] lon = events[dateInd[0][k]][1] X = np.where( abs(lats - lat) <=
clOBS = sys.argv[7].split(",") clevsOBS = [float(i) for i in clOBS] OB_lines = sys.argv[8] OB_lines = (True if OB_lines == '.true.' else False) num = int(maxhr) / 3 gribfile = ["" for x in range(num)] grbs = ["" for x in range(num)] obsfile = ["" for x in range(num)] obs = ["" for x in range(num)] cyctime = ["" for x in range(num)] grbtime = ["" for x in range(num)] date = ["" for x in range(num)] fhr = ["" for x in range(num)] for i in range(num): gribfile[i] = sys.argv[i + 9] grbs[i] = pygrib.open(gribfile[i]) obsfile[i] = sys.argv[i + num + 9] obs[i] = pygrib.open(obsfile[i]) bucket_length = 3 # Get the lats and lons lats, lons = grbs[i][1].latlons() latsOBS, lonsOBS = obs[i][1].latlons() #Get the date/time and forecast hour fhr[i] = grbs[i][1]['stepRange'] # Forecast hour # Pad fhr with a 0 #if int(fhr[i]) < 10: # lippi removed Feb 7, 2018 if fhr[i] < 10: fhr[i] = '0' + fhr[i] cyctime[i] = grbs[i][1].dataTime #Cycle (e.g. 1200 UTC)
def weatherfeatureFromEC(starttime, lon, lat, powerstationname, txtpath): ecfilelist = getECfilelistFromsystemtime2(starttime, ecrootpath) # 判断文件是否存在 flag = True for i in range(len(ecfilelist)): if not os.path.exists(ecfilelist[i]): print ecfilelist[i] + '文件不存在' return -1 yearstr = starttime.year monthstr = starttime.month daystr = starttime.day hourstr = starttime.hour initialtimestring = '' if hourstr < 17: pdatetime = datetime.datetime(yearstr, monthstr, daystr, 12, 0, 0) + datetime.timedelta(days=-1) year_t = pdatetime.year pdate_t = datetime.datetime.strftime(pdatetime, '%Y-%m-%d') initial_txt = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S') odatetime = pdatetime + datetime.timedelta(hours=8) initialtimestring = datetime.datetime.strftime(odatetime, '%Y%m%d%H') else: pdatetime = datetime.datetime(yearstr, monthstr, daystr) year_t = pdatetime.year pdate_t = datetime.datetime.strftime(pdatetime, '%Y-%m-%d') initial_txt = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S') odatetime = pdatetime + datetime.timedelta(hours=8) initialtimestring = datetime.datetime.strftime(odatetime, '%Y%m%d%H') # 定义list存放72小时的逐3小时的预报数据共25个时次 plist_totalradiation = [] plist_straightradiation = [] plist_surfacedradiation = [] # test_radiation=[] plist_ws = [] plist_wd = [] plist_t = [] plist_rh = [] plist_p = [] for i in range(len(ecfilelist)): ecfile = ecfilelist[i] grbs = pygrib.open(ecfile) # grbs001=pygrib.open(testfile) # # grbs001.seek(0) # for grb in grbs001: # print grb # 总辐射 grb001 = grbs.select(name='Surface solar radiation downwards') totalradiationarray = grb001[0].values # print totalradiationarray # 散射辐射是计算出来的,总辐射-水平面直接辐射 # 法直辐射:总辐射和法直辐射并没有什么关系: grb003 = grbs.select(name='Direct solar radiation') # straightradiationarray=grb003[0].values # print straightradiationarray # 暂时任务该变量为水平面直接辐射 grb002 = grbs.select( name='Total sky direct solar radiation at surface') # 平均风速 grb004 = grbs.select(name='10 metre U wind component') # u10array=grb004[0].values # print u10array grb005 = grbs.select(name='10 metre V wind component') # v10array=grb005[0].values # print v10array # 空气温度 grb006 = grbs.select(name='2 metre temperature') # temperaturearray=grb006[0].values # print temperaturearray # 露点温度 grb007 = grbs.select(name='2 metre dewpoint temperature') # dewpointarray=grb007[0].values # print dewpointarray # 气压 grb008 = grbs.select(name='Surface pressure') # airpressurearray=grb008[0].values # print airpressurearray # print len(airpressurearray) # parray=numpy.array(airpressurearray) # print parray.shape # csvfile='/Users/yetao.lu/2017/1.csv' # csvwf=open(csvfile,'w') # 获取25个预报时次的数据矩阵,然后根据该点的经纬度获取最邻近点的要素值 # 总辐射 perarray001 = grb001[0].values perarray001 = numpy.array(perarray001) pvalue001 = linearForECvalue(perarray001, lon, lat) plist_totalradiation.append(pvalue001) # 法直辐射 perarray003 = grb003[0].values perarray003 = numpy.array(perarray003) pvalue003 = linearForECvalue(perarray003, lon, lat) plist_straightradiation.append(pvalue003) # 散射辐射=总辐射-水平面直接辐射 # 这里是水平面直接辐射 perarray002 = grb002[0].values pvalue002 = linearForECvalue(perarray002, lon, lat) plist_surfacedradiation.append(pvalue002) # testarray002=grb002[i].values # testvalue002=linearForECvalue(testarray002,lon,lat) # test_radiation.append(testvalue002) # print pvalue001,pvalue003,testvalue002 # csvwf.write(str(pvalue001)+','+str(pvalue003)+','+str(testvalue002)) # csvwf.write('\n') # 平均风速 # u分量 perarray004 = grb004[0].values perarray004 = numpy.array(perarray004) # v分量 perarray005 = grb005[0].values perarray005 = numpy.array(perarray005) pvalue004 = linearForECvalue(perarray004, lon, lat) pvalue005 = linearForECvalue(perarray005, lon, lat) ws = math.sqrt(pvalue004 * pvalue004 + pvalue005 * pvalue005) plist_ws.append(ws) wd = calculatwinddirect(pvalue004, pvalue005) plist_wd.append(wd) # 气温 perarray006 = grb006[0].values perarray006 = numpy.array(perarray006) t = linearForECvalue(perarray006, lon, lat) - 273.15 plist_t.append(t) # 湿度 # 根据气温和露点温度计算 # 露点温度 perarray007 = grb007[0].values perarray007 = numpy.array(perarray007) dt = linearForECvalue(perarray007, lon, lat) - 273.15 rh = 100 * math.exp((17.625 * dt) / (243.04 + dt)) / math.exp( (17.625 * t) / (243.04 + t)) plist_rh.append(rh) # 气压 perarray008 = grb008[0].values perarray008 = numpy.array(perarray008) p = linearForECvalue(perarray008, lon, lat) / 100 plist_p.append(p) # csvwf.close() # 为了测试写的文件,瞬时值计算 # a1=totaltosiple(plist_totalradiation) # a2=totaltosiple(plist_straightradiation) # a3=totaltosiple(test_radiation) # csvfile2='/Users/yetao.lu/2017/2.csv' # cvw=open(csvfile2,'w') # for p in range(len(a1)): # cvw.write(str(a1[p])+','+str(a2[p])+','+str(a3[p])) # cvw.write('\n') # cvw.close() # 总辐射 pplist_total = totaltosiple_3h(plist_totalradiation) print pplist_total perlist_total = calculateRadiationbyInterplote(pplist_total) print perlist_total # 法直辐射 pplist_straight = totaltosiple_3h(plist_straightradiation) perlist_straight = calculateRadiationbyInterplote(pplist_straight) # print pplist_straight # 散射辐射=总辐射减去水平面直接辐射的值,这里还是水平面直接辐射的插值 pplist_surface = totaltosiple_3h(plist_surfacedradiation) perlist_fdir = calculateRadiationbyInterplote(pplist_surface) # 风速 perlist_ws = calculateRadiationbyInterplote(plist_ws) # 风向 perlist_wd = calculateRadiationbyInterplote(plist_wd) # 气温 perlist_t = calculateRadiationbyInterplote(plist_t) # 相对湿度 perlist_rh = calculateRadiationbyInterplote(plist_rh) # 气压 perlist_p = calculateRadiationbyInterplote(plist_p) # 平滑过滤,所有的要素都平滑过滤了 # print perlist_total perlist_total = scipy.signal.savgol_filter(perlist_total, 5, 2) perlist_straight = scipy.signal.savgol_filter(perlist_straight, 5, 2) perlist_fdir = scipy.signal.savgol_filter(perlist_fdir, 5, 2) perlist_ws = scipy.signal.savgol_filter(perlist_ws, 5, 2) perlist_wd = scipy.signal.savgol_filter(perlist_ws, 5, 2) perlist_t = scipy.signal.savgol_filter(perlist_t, 5, 2) perlist_rh = scipy.signal.savgol_filter(perlist_rh, 5, 2) perlist_p = scipy.signal.savgol_filter(perlist_p, 5, 2) # print perlist_total # 写文件 # 首先确定文件名称 txtpath001 = txtpath + '/' + str(year_t) + '/' + pdate_t txtfile = os.path.join(txtpath001, powerstationname + initialtimestring + '.txt') wfile = open(txtfile, 'w') L = [] linelist = [] # 定义散射辐射列表 perlist_scattered = [] for i in range(len(perlist_total)): endtime = odatetime + datetime.timedelta(minutes=i * 15) endtimestring = datetime.datetime.strftime(endtime, '%Y%m%d%H%M') endtimestring001 = datetime.datetime.strftime(endtime, '%Y-%m-%d %H:%M:%S') # print perlist_total[i],perlist_straight[i],perlist_scattered[i] scatteredvalue = perlist_total[i] - perlist_fdir[i] if perlist_total[i] < 0: perlist_total[i] = 0 if perlist_straight[i] < 0: perlist_straight[i] = 0 if scatteredvalue < 0: scatteredvalue = 0 perlist_scattered.append(scatteredvalue) print len(perlist_total), len(perlist_straight), len(perlist_ws), len( perlist_wd), len(perlist_t), len(perlist_p), len(perlist_rh) wfile.write(endtimestring + ' ' + str("%.2f" % perlist_total[i]) + ' ' + str("%.2f" % perlist_straight[i]) + ' ' + str("%.2f" % scatteredvalue) + ' ' + str("%.2f" % perlist_ws[i]) + ' ' + str("%.2f" % perlist_wd[i]) + ' ' + str("%.2f" % perlist_t[i]) + ' ' + str("%.2f" % perlist_rh[i]) + ' ' + str("%.2f" % perlist_p[i])) wfile.write('\n') # print len(perlist_total),len(perlist_straight),len(perlist_ws),len(perlist_wd),len(perlist_t),len(perlist_p),len(perlist_rh) L.append( (powerstationname, initial_txt, endtimestring001, str(perlist_total[i]), str(scatteredvalue), str(perlist_straight[i]), str(perlist_ws[i]), str(perlist_wd[i]), str(perlist_t[i]), str(perlist_rh[i]), str(perlist_p[i]))) wfile.close() # 数据入库 # db = MySQLdb.connect('192.168.1.20', 'meteou1', '1iyHUuq3', 'moge',3345) db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge', 3307) cursor = db.cursor() sql = 'insert ignore into t_r_powerplant_radiation(city_id,initial_time,forecast_time,total_radiation,straight_radiation,scattered_radiation,wind_speed,wind_direction,temperature,humidity,air_pressure)VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ' cursor.executemany(sql, L) db.commit() db.close() # 判断文件是否生成成功 if not os.path.exists(txtfile): return -1 # 判断入库是否完成 db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge', 3307) cursor = db.cursor() sql = 'select count(*) from t_r_powerplant_radiation where initial_time="' + initial_txt + '"' cursor.execute(sql) data = cursor.fetchall() dataint = int(data[0]) db.close() if dataint != 289: return -1 return 0
dir_inp_root = '/media/Elements/data_gdas' # NCEP re-analysis data dir_out = '.' # output directory h, t = {}, {} for year in range(year_ini, year_end + 1): dir_inp = '%s/%04d' % (dir_inp_root, year) for level in LEVELS: name = 'level_%04d' % level t[name], h[name] = {}, {} for month in range(1, 12 + 1): fname_inp = '%s/A*-%04d%02d.pgb.f00' % (dir_inp, year, month) fname_inp = glob(fname_inp)[0] # there's only one, take the first print "\n ---> NEXT: " + fname_inp g = pygrib.open(fname_inp) dname = '%04d-%02d' % (year, month) for level in LEVELS: name = 'level_%04d' % level t[name][dname], h[name][dname] = [], [] #print " --> date: ", year, month, gg = g.readline() # read 1st line while gg != None: # iterate over levels and days pname = gg['parameterName'] sname = gg['shortName'] yyyy, mm, dd = gg['year'], gg['month'], gg['day'] HH, MM = gg['hour'], gg['minute'] level = gg['level'] time = datetime(yyyy, mm, dd, HH, MM) rtime = (time - time_ini
def __init__(self, filename, filename_info, filetype_info): super(GRIBFileHandler, self).__init__(filename, filename_info, filetype_info) self._msg_datasets = {} self._start_time = None self._end_time = None try: with pygrib.open(filename) as grib_file: first_msg = grib_file.message(1) last_msg = grib_file.message(grib_file.messages) start_time = self._convert_datetime( first_msg, 'validityDate', 'validityTime') end_time = self._convert_datetime( last_msg, 'validityDate', 'validityTime') self._start_time = start_time self._end_time = end_time if 'keys' not in filetype_info: self._analyze_messages(grib_file) self._idx = None else: self._create_dataset_ids(filetype_info['keys']) self._idx = pygrib.index(filename, *filetype_info['keys'].keys()) except (RuntimeError, KeyError): raise IOError("Unknown GRIB file format: {}".format(filename))