예제 #1
0
파일: wind_fields.py 프로젝트: zzwei1/PyDDA
def make_intialization_from_hrrr(Grid, file_path):
    """
    This function will read an HRRR GRIB2 file and return initial guess
    u, v, and w fields from the model

    Parameters
    ----------
    Grid: Py-ART Grid
        The Py-ART Grid to use as the grid specification. The HRRR values
    will be interpolated to the Grid's specficiation and added as a field.
    file_path: string
        The path to the GRIB2 file to load.

    Returns
    -------
    Grid: Py-ART Grid
        This returns the Py-ART grid with the HRRR u, and v fields added.
        The shape will be the same shape as the fields in Grid and will
        correspond to the same x, y, and z locations as in Grid.
    """

    if(CFGRIB_AVAILABLE is False):
        raise RuntimeError(("The cfgrib optional dependency needs to be " +
                            "installed for the HRRR integration feature."))

    the_grib = cfgrib.open_file(
        file_path, filter_by_keys={'typeOfLevel': 'isobaricInhPa'})

    # Load the HRR data and tranform longitude coordinates
    grb_u = the_grib.variables['u']
    grb_v = the_grib.variables['v']
    grb_w = the_grib.variables['w']
    gh = the_grib.variables['gh']

    lat = the_grib.variables['latitude'].data[:, :]
    lon = the_grib.variables['longitude'].data[:, :]
    lon[lon > 180] = lon[lon > 180] - 360

    # Convert geometric height to geopotential height
    EARTH_MEAN_RADIUS = 6.3781e6
    gh = gh.data[:, :, :]
    height = (EARTH_MEAN_RADIUS*gh)/(EARTH_MEAN_RADIUS-gh)
    height = height - Grid.radar_altitude['data']

    radar_grid_lat = Grid.point_latitude['data']
    radar_grid_lon = Grid.point_longitude['data']
    radar_grid_alt = Grid.point_z['data']
    lat_min = radar_grid_lat.min()
    lat_max = radar_grid_lat.max()
    lon_min = radar_grid_lon.min()
    lon_max = radar_grid_lon.max()
    lon_r = np.tile(lon, (height.shape[0], 1, 1))
    lat_r = np.tile(lat, (height.shape[0], 1, 1))
    lon_flattened = lon_r.flatten()
    lat_flattened = lat_r.flatten()
    height_flattened = gh.flatten()
    the_box = np.where(np.logical_and.reduce(
                       (lon_flattened >= lon_min,
                        lat_flattened >= lat_min,
                        lon_flattened <= lon_max,
                        lat_flattened <= lat_max)))[0]

    lon_flattened = lon_flattened[the_box]
    lat_flattened = lat_flattened[the_box]
    height_flattened = height_flattened[the_box]

    u_flattened = grb_u.data[:, :, :].flatten()
    u_flattened = u_flattened[the_box]
    u_interp = NearestNDInterpolator(
        (height_flattened, lat_flattened, lon_flattened),
        u_flattened, rescale=True)
    u_new = u_interp(radar_grid_alt, radar_grid_lat, radar_grid_lon)

    v_flattened = grb_v.data[:, :, :].flatten()
    v_flattened = v_flattened[the_box]
    v_interp = NearestNDInterpolator(
        (height_flattened, lat_flattened, lon_flattened),
        v_flattened, rescale=True)
    v_new = v_interp(radar_grid_alt, radar_grid_lat, radar_grid_lon)

    w_flattened = grb_v.data[:, :, :].flatten()
    w_flattened = w_flattened[the_box]
    w_interp = NearestNDInterpolator(
        (height_flattened, lat_flattened, lon_flattened),
        w_flattened, rescale=True)
    w_new = w_interp(radar_grid_alt, radar_grid_lat, radar_grid_lon)

    del grb_u, grb_v, grb_w, lat, lon
    del the_grib
    gc.collect()

    return u_new, v_new, w_new
예제 #2
0
파일: cfgrib_.py 프로젝트: benbovy/xarray
 def __init__(self, filename, lock=None, **backend_kwargs):
     import cfgrib
     if lock is None:
         lock = ECCODES_LOCK
     self.lock = ensure_lock(lock)
     self.ds = cfgrib.open_file(filename, **backend_kwargs)
예제 #3
0
def scan_grib(url, common_vars, storage_options, inline_threashold=100, skip=0, filter={}):
    """
    Generate references for a GRIB2 file

    Parameters
    ----------

    url: str
        File location
    common_vars: list[str]
        Names of variables that are common to multiple measurable (i.e., coordinates)
    storage_options: dict
        For accessing the data, passed to filesystem
    inline_threashold: int
        If given, store array data smaller than this value directly in the output
    skip: int
        If non-zero, stop processing the file after this many messages
    filter: dict
        cfgrib-style filter dictionary

    Returns
    -------

    dict: references dict in Version 1 format.
    """
    if filter:
        assert "typeOfLevel" in filter
    logger.debug(f"Open {url}")

    store = {}
    z = zarr.open_group(store, mode='w')
    common = False
    with fsspec.open(url, "rb", **storage_options) as f:
        for fn, offset, size in _split_file(f, skip=skip):
            logger.debug(f"File {fn}")
            ds = cfgrib.open_file(fn)
            if filter:
                var = filter["typeOfLevel"]
                if var not in ds.variables:
                    continue
                if "level" in filter and ds.variables[var].data not in np.array(filter["level"]):
                    continue
                attr = ds.variables[var].attributes or {}
                attr['_ARRAY_DIMENSIONS'] = []
                if var not in z:
                    _store_array(store, z, np.array(ds.variables[var].data), var, 100000, 0, 0,
                                 attr)
            if common is False:
                # done for first valid message
                logger.debug("Common variables")
                z.attrs.update(ds.attributes)
                for var in common_vars:
                    # assume grid, etc is the same across all messages
                    attr = ds.variables[var].attributes or {}
                    attr['_ARRAY_DIMENSIONS'] = ds.variables[var].dimensions
                    _store_array(store, z, ds.variables[var].data, var, inline_threashold, offset, size,
                                 attr)
                common = True

            for var in ds.variables:
                if (
                    var not in common_vars and getattr(ds.variables[var].data, "shape", None)
                    and var != filter.get("typeOfLevel", "")
                ):

                    attr = ds.variables[var].attributes or {}
                    if "(deprecated)" in attr.get("GRIB_name", ""):
                        continue
                    attr['_ARRAY_DIMENSIONS'] = ds.variables[var].dimensions
                    _store_array(store, z, ds.variables[var].data, var, inline_threashold, offset, size,
                         attr)
    logger.debug("Done")
    return {"version": 1,
            "refs": {k: v.decode() if isinstance(v, bytes) else v for k, v in store.items()},
            "templates": {"u": url}}
예제 #4
0
파일: cfgrib_.py 프로젝트: mathause/xarray
    def __init__(self, filename, lock=None, **backend_kwargs):

        if lock is None:
            lock = ECCODES_LOCK
        self.lock = ensure_lock(lock)
        self.ds = cfgrib.open_file(filename, **backend_kwargs)
예제 #5
0
def QueryGrib(gribFile,LatLonTimeFile,\
  filterKeys={'dataType':'an','numberOfPoints':65160},\
  ignoreList=['number','time','step','valid_time','longitude','latitude','heightAboveGround'],\
  DistanceLim=1,TimeDelayLim=3,QueryColumns=['time','lat','lon'],\
  forecastInd=0):
    try:
        #Load GRIB:
        #ds = xarray.open_dataset(gribFile, engine='cfgrib')
        ds = cfgrib.open_file(gribFile, filter_by_keys=filterKeys)
        #Load Query file (comma separated; time, lon, lat):
        Queries = pd.read_csv(LatLonTimeFile, names=QueryColumns)
    except:
        print('Error loading GRIB or Query files, ')
        return -1
    else:
        tic = time.time()
        #Extract frame data:
        Glat = list(ds.variables['latitude'].data)  #list(ds.latitude.data)
        Glon = list(ds.variables['longitude'].data -
                    180)  #list(ds.longitude.data)
        # list((ds.time.data).astype(float))/1e9 #we convert implicitly from np.datetime64 to timestamp!
        Gtime = list(ds.variables['time'].data)
        #Extract list of variables:
        gribVariables = ds.variables.keys()
        numActualVars = len(
            [var for var in gribVariables if var not in ignoreList])
        iVars = -1
        FRM = defaultdict(list)
        FRM['Time'] = Queries.time
        FRM['Latitude'] = Queries.lat
        FRM['Longitude'] = Queries.lon
        #
        dimsLogged = False
        for key in gribVariables:  #load the Grib Variables into memory one by one!
            if key not in ignoreList:  #Leaev open the possibility for a ignoreList to ignore specific keys
                iVars += 1
                try:
                    keyData = ds.variables[key].data  # try loading the data:
                except:
                    print('failed to load variable {}'.format(key))
                else:
                    for i in range(len(
                            Queries)):  # cycle through the number of queries!
                        #find the closest (here we find the closest neighbor in the three dimensions (latitude, longitude and time)!
                        print('{:02.1f}% complete, {:.1f} seconds \elapsed'.format((iVars/numActualVars + \
                         i/len(Queries)/numActualVars)*100,time.time()- tic), end="\r", flush=True)
                        iLat, dLat = find_nearest(Queries.lat[i], Glat)
                        iLon, dLon = find_nearest(Queries.lon[i], Glon)
                        iTime, dTime = find_nearest(float(Queries.time[i]),
                                                    Gtime)
                        #
                        NearingDist = np.sqrt(dLat * dLat + dLon * dLon)
                        #
                        if not dimsLogged:
                            FRM['DistFrmGrdPnt'].append(NearingDist)
                            FRM['TimeOffset'].append(dTime)
                        if NearingDist <= DistanceLim and dTime <= TimeDelayLim * 3600:
                            try:
                                if len(
                                        ds.dimensions.keys()
                                ) == 4:  #we are dealing with a forecast array!
                                    bufr = keyData[forecastInd, iTime, iLat,
                                                   iLon]
                                    if type(bufr) == 'numpy.ndarray':
                                        bufr[
                                            bufr == keyData.
                                            missing_value] = np.nan  #remove any missing values!
                                    elif type(bufr) == 'numpy.float32':
                                        if bufr == keyData.missing_value:
                                            bufr = np.nan
                                    val = bufr.mean()
                                else:
                                    val = keyData[iTime, iLat, iLon]
                                if val == 9999:
                                    val = np.nan
                            except:
                                val = np.nan
                        else:
                            val = np.nan
                        #Assign value
                        FRM[key].append(val)
                    dimsLogged = True
        return FRM