def _load(self, domain="NW", variable="rainfall", year=2016, month=8, part=3): url = "{url}/radar/radar_coords_{domain}.npz".format(url=self.URL, domain=domain) coords = np.load(download_and_cache(url), allow_pickle=True) resolution = 0.01 lats = coords["lats"] - resolution / 2 lons = coords["lons"] + resolution / 2 url = "{url}/radar/{variable}_{domain}_{year}_{month:02d}.{part}.npz".format( url=self.URL, domain=domain, variable=variable, year=year, month=month, part=part, ) path = download_and_cache(url) content = np.load(path, allow_pickle=True) data = content["data"] times = content["dates"] # missing = content['miss_dates'] self.variable = variable ds = xr.Dataset( { variable: (["time", "y", "x"], data), "x": (["x"], range(0, data.shape[2])), "y": (["y"], range(0, data.shape[1])), }, coords={ "lon": (["y", "x"], lons), "lat": (["y", "x"], lats), "time": times, }, ) self.north = np.amax(lats) self.south = np.amin(lats) self.east = np.amax(lons) self.west = np.amin(lons) ds["lon"].attrs["standard_name"] = "longitude" ds["lat"].attrs["standard_name"] = "latitude" ds["time"].attrs["standard_name"] = "time" ds["x"].attrs["axis"] = "X" ds["y"].attrs["axis"] = "Y" self._xarray = ds
def _param_id_dict(): if not PARAMS: path = download_and_cache(URL) with open(path) as f: entries = json.load(f) for entry in entries["parameters"]: PARAMS[int(entry["param_id"])] = entry return PARAMS
def __init__(self, domain="NW", date="20160101"): url = "{url}/ground_stations/{domain}_{date}.csv".format(url=self.URL, domain=domain, date=date) self.path = download_and_cache(url) self._pandas = pd.read_csv(self.path, parse_dates=[4], infer_datetime_format=True)
def _load(self, bassin="atlantic", url=None): if url is None: url = URLS[bassin.lower()] path = download_and_cache(url) p = [] with open(path) as f: lines = f for line in lines: if line[0] in (" ", "<", "\n"): continue bassin = line[0:2] number = int(line[2:4]) year = int(line[4:8]) name = line[18:28].strip().lower() # id = line[0:8] # http://www.aoml.noaa.gov/hrd/hurdat/hurdat2-format-may2015.pdf for _ in range(0, int(line[33:36])): line = next(lines) knots = float(line[38:41]) pressure = np.NaN if line[43] == "-" else float( line[43:47]) time = "%s-%s-%sZ%s:%s" % ( line[0:4], line[4:6], line[6:8], line[10:12], line[12:14], ) p.append( dict( # id=id, bassin=bassin, number=number, year=year, name=name, time=parse_date(time), type=line[16], status=line[19:21], lat=float(line[23:27]) * SIGN[line[27]], lon=float(line[30:35]) * SIGN[line[35]], knots=knots, category=category(knots), pressure=pressure, )) self.cyclones = self.annotate(pd.DataFrame(p), style="cyclone-track")
def load_remote(self, name): catalogs = self.settings("datasets-catalogs-urls") for catalog in catalogs: url = f"{catalog}/{name}.yaml" path = download_and_cache( url, update_if_out_of_date=True, return_none_on_404=True, ) if path: LOG.debug("Found dataset at %s", url) return self.load_yaml(path) return None
def test_download_4(): url = "https://get.ecmwf.int/test-data/climetlab/input/missing.txt" r = download_and_cache(url, return_none_on_404=True) assert r is None, r
def test_download_3(): with settings.temporary("download-out-of-date-urls", True): url = "https://get.ecmwf.int/test-data/climetlab/input/test.txt" download_and_cache(url)
def test_download_2(): url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib" download_and_cache(url)
def test_download_1(): url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib?_=%s" % ( time.time(), ) download_and_cache(url)
def cf_standard_names(version=78): path = download_and_cache(URL.format(version=version)) xmldoc = minidom.parse(path) entries = xmldoc.getElementsByTagName("entry") for entry in entries: yield entry.attributes["id"].value