def __init__(self, direction, n, start, end, dtype): dtype = DummyDtype.validator(dtype) delta = (end - start) / n vert = nplike.linspace(start, end, n + 1, dtype=dtype) cntr = nplike.linspace(start + delta / 2., end - delta / 2., n, dtype=dtype) if direction == "x": xface = copy.deepcopy(vert) yface = copy.deepcopy(cntr) else: # if this is not "y", pydantic will let me know xface = copy.deepcopy(cntr) yface = copy.deepcopy(vert) # pydantic will validate the data here super().__init__(direction=direction, n=n, start=start, end=end, delta=delta, dtype=dtype, vert=vert, cntr=cntr, xface=xface, yface=yface)
def get_snapshot_times(output_type: str, params: List[Union[int, float]], dt: float): """Generate a list of time when the solver should output solution snapshots. Arguments --------- output_type : str params : a list/tuple dt : float Returns ------- t : a list/tuple of snapshot times. Notes ----- See the data model TemporalConfig for the allowed output_type and params. The `output_type` is the first element in TemporalConfig.output, and `params` are the remaining elements in that list. dt is only used when output_type is "t_start every_steps multiple". """ # write solutions to a file at give times if output_type == "at": t = list(params) # output every `every_seconds` seconds `multiple` times from `t_start` elif output_type == "t_start every_seconds multiple": bg, dt, n = params t = (nplike.arange(0, n + 1) * dt + bg).tolist() # including saving t_start # output every `every_steps` constant-size steps for `multiple` times from t=`t_start` elif output_type == "t_start every_steps multiple": bg, steps, n = params t = (nplike.arange(0, n + 1) * dt * steps + bg).tolist() # including saving t_start # from `t_start` to `t_end` evenly outputs `n_saves` times (including both ends) elif output_type == "t_start t_end n_saves": bg, ed, n = params t = nplike.linspace(bg, ed, n + 1).tolist() # including saving t_start # run simulation from `t_start` to `t_end` but not saving solutions at all elif output_type == "t_start t_end no save": t = params # run simulation from `t_start` with `n_steps` iterations but not saving solutions at all elif output_type == "t_start n_steps no save": t = [params[0], params[1] * dt] # should never reach this branch because pydantic has detected any invalid arguments else: raise ValueError( "{} is not an allowed output method.".format(output_type)) return t
def get_gridline(direction: str, n: int, start: float, end: float, dtype: str): """Get a Gridline object. Arguments --------- direction : str Either "x" or "y". n : int Number of cells. start, end : float Lower and upper bound of this axis. dtype : str, nplike.float32, or nplike.float64 Returns ------- gridline : Gridline """ dtype = DummyDtype.validator(dtype) delta = (end - start) / n vert = nplike.linspace(start, end, n + 1, dtype=dtype) cntr = nplike.linspace(start + delta / 2., end - delta / 2., n, dtype=dtype) if direction == "x": xface = copy.deepcopy(vert) yface = copy.deepcopy(cntr) else: # if this is not "y", pydantic will let me know xface = copy.deepcopy(cntr) yface = copy.deepcopy(vert) # pydantic will validate the data here return Gridline(direction=direction, n=n, start=start, end=end, delta=delta, dtype=dtype, vert=vert, cntr=cntr, xface=xface, yface=yface)
def __init__(self, spatial: SpatialConfig, temporal: TemporalConfig, dtype: str): # manually launch validation spatial.check() temporal.check() # write solutions to a file at give times if temporal.output[0] == "at": t = list(temporal.output[1]) # output every `every_seconds` seconds `multiple` times from `t_start` elif temporal.output[0] == "t_start every_seconds multiple": bg, dt, n = temporal.output[1:] t = (nplike.arange(0, n + 1) * dt + bg).tolist() # including saving t_start # output every `every_steps` constant-size steps for `multiple` times from t=`t_start` elif temporal.output[0] == "t_start every_steps multiple": bg, steps, n = temporal.output[1:] dt = temporal.dt t = (nplike.arange(0, n + 1) * dt * steps + bg).tolist() # including saving t_start # from `t_start` to `t_end` evenly outputs `n_saves` times (including both ends) elif temporal.output[0] == "t_start t_end n_saves": bg, ed, n = temporal.output[1:] t = nplike.linspace(bg, ed, n + 1).tolist() # including saving t_start # run simulation from `t_start` to `t_end` but not saving solutions at all elif temporal.output[0] == "t_start t_end no save": t = temporal.output[1:] # should never reach this branch because pydantic has detected any invalid arguments else: raise ValueError("{} is not an allowed output method.".format( temporal.output[0])) super().__init__(x=Gridline("x", spatial.discretization[0], spatial.domain[0], spatial.domain[1], dtype), y=Gridline("y", spatial.discretization[1], spatial.domain[2], spatial.domain[3], dtype), t=t)
def read_esri_ascii(filepath): """Read an Esri ASCII raster file. Note, the output array, data, is in traditional numerical simulation style. That is to say, data[0, 0] is the most bottom-left data point in a structured grid. And data[-1, -1] represents the data point at upper-right corner of a structured grid. Args: ----- filepath: path to the input file. Returns: -------- data: a dictionary that has key-value pairs of x: a 1D nplike.ndarray; gridline in x direction. y: a 1D nplike.ndarray; gridline in y direction. data: a 2D nplike.ndarray; the data attrs: a mimic to the output of read_cf. The only output is a dictionary: {"data": {"_fill_value": nodata_value}}. """ filepath = os.path.abspath(filepath) with open(filepath, "r") as fobj: raw = fobj.read() raw = raw.splitlines() header = { "ncols": None, "nrows": None, "xllcenter": None, "xllcorner": None, "yllcenter": None, "yllcorner": None, "cellsize": None, "nodata_value": None } # header information for line in raw[:6]: line = line.split() assert len(line) == 2 if line[0].lower() not in header.keys(): raise KeyError("{} is an illegal header key.".format(line[0])) header[line[0].lower()] = line[1] assert header[ "ncols"] is not None, "NCOLS or ncols does not exist in the header" assert header[ "nrows"] is not None, "NROWS or nrows does not exist in the header" assert header[ "cellsize"] is not None, "CELLSIZE or cellsize does not exist in the header" header["ncols"] = int(header["ncols"]) header["nrows"] = int(header["ncols"]) header["cellsize"] = float(header["cellsize"]) try: header["nodata_value"] = float(header["nodata_value"]) except TypeError: header["nodata_value"] = -9999. if (header["xllcenter"] is not None) and (header["yllcenter"] is not None): header["xll"] = float(header["xllcenter"]) header["yll"] = float(header["yllcenter"]) elif (header["xllcorner"] is not None) and (header["xllcorner"] is not None): header["xll"] = float(header["xllcorner"]) header["yll"] = float(header["yllcorner"]) else: raise KeyError("Missing xllcenter/xllcorner/yllcenter/yllcorner.") del header["xllcenter"], header["yllcenter"], header["xllcorner"], header[ "yllcorner"] x = nplike.linspace(header["xll"], header["xll"] + header["cellsize"] * (header["ncols"] - 1), header["ncols"], dtype=nplike.float64) y = nplike.linspace(header["yll"], header["yll"] + header["cellsize"] * (header["nrows"] - 1), header["nrows"], dtype=nplike.float64) assert nplike.all((x[1:] - x[:-1]) > 0.) assert nplike.all((y[1:] - y[:-1]) > 0.) data = nplike.zeros((header["nrows"], header["ncols"]), dtype=nplike.float64) for i, line in zip(range(header["nrows"] - 1, -1, -1), raw[6:]): data[i, :] = nplike.fromstring(line, nplike.float64, -1, " ") return { "x": x, "y": y, "data": data }, { "data": { "_fill_value": header["nodata_value"] } }