def write(filename, d): d2 = ds.copy(d) for var in ds.get_vars(d2): if isinstance(d2[var], np.ndarray): d2[var] = d2[var].tolist() with open(filename, 'w') as f: json.dump(d2, f)
def read(filename, variables=None, sel=None, full=False, jd=False): with open(filename) as f: d = json.load(f) for var in ds.get_vars(d): if type(d[var]) is list: d[var] = np.array(d[var]) return d
def read(filename, variables=None, sel=None, full=False, jd=False): if type(filename) is bytes and str != bytes: filename = os.fsdecode(filename) with Dataset(filename, 'r') as f: d = {} d['.'] = {} d['.']['.'] = read_attrs(f) for name in f.variables.keys(): if variables is not None and name not in variables: if full: _, d['.'][name] = read_var(f, name, sel, False) else: d[name], d['.'][name] = read_var(f, name, sel) if jd: for name in ds.get_vars(d): process_datetime_var(d, name) return d
def output_sample(d, tres, output_sampling): t = d['time_bnds'][0, 0] r = (t + 0.5) % output_sampling t1 = t - r t2 = t1 + output_sampling dims = ds.get_dims(d) n = dims['time'] n2 = int(output_sampling / tres) time = d['time'] time_bnds = d['time_bnds'] time_half2 = np.linspace(t1, t2, n2 + 1) time2 = 0.5 * (time_half2[1:] + time_half2[:-1]) for var in ds.get_vars(d): if 'time' not in d['.'][var]['.dims']: continue i = d['.'][var]['.dims'].index('time') x = d[var] size = x.shape size2 = list(size) size2[i] = n2 x2 = np.full(size2, np.nan, dtype=x.dtype) for j in range(n2): mask = np.maximum( 0, np.minimum(time_bnds[:, 1], time_half2[j + 1]) - np.maximum(time_bnds[:, 0], time_half2[j])) s = np.sum(mask) if s > 0.: mask /= np.sum(mask) sel2 = tuple( [slice(None) if l != i else j for l in range(len(size))]) for k in np.argwhere(mask > 0): sel = tuple( [slice(None) if l != i else k for l in range(len(size))]) x2[sel2] = np.where(np.isnan(x2[sel2])*mask[k], 0, x2[sel2]) + \ x[sel]*mask[k] d[var] = x2 d['time'] = time2 d['time_bnds'] = np.full((n2, 2), np.nan, np.float64) d['time_bnds'][:, 0] = time_half2[:-1] d['time_bnds'][:, 1] = time_half2[1:]
def output_sample(d, tres, output_sampling): t1 = d['time'][0] - ((d['time'][0] + 0.5) % 1.0) t2 = t1 + output_sampling dims = ds.get_dims(d) n = dims['time'] n2 = int(output_sampling/tres) time = d['time'] time_half = np.zeros(n + 1, dtype=np.float64) time_half[1:-1] = 0.5*(time[1:] + time[:-1]) time_half[0] = time[0] - 0.5*(time[1] - time[0]) time_half[-1] = time[-1] + 0.5*(time[-1] - time[-2]) time_half2 = np.linspace(t1, t2, n2 + 1) time2 = 0.5*(time_half2[1:] + time_half2[:-1]) for var in ds.get_vars(d): if 'time' not in d['.'][var]['.dims']: continue i = d['.'][var]['.dims'].index('time') x = d[var] size = x.shape size2 = list(size) size2[i] = n2 x2 = np.full(size2, np.nan, dtype=x.dtype) for j in range(n2): mask = np.maximum(0, np.minimum(time_half[1:], time_half2[j + 1]) - np.maximum(time_half[:-1], time_half2[j]) ) s = np.sum(mask) if s > 0.: mask /= np.sum(mask) sel2 = tuple([ slice(None) if l != i else j for l in range(len(size)) ]) for k in range(len(mask)): sel = tuple([ slice(None) if l != i else k for l in range(len(size)) ]) x2[sel2] = np.where(np.isnan(x2[sel2])*mask[k], 0, x2[sel2]) + \ x[sel]*mask[k] d[var] = x2 d['time'] = time2
def write(filename, d): ds.validate(d) if type(filename) is bytes and hasattr(os, 'fsdecode'): filename = os.fsdecode(filename) with Dataset(filename, 'w') as f: dims = ds.get_dims(d) for k, v in dims.items(): f.createDimension(k, v) for name in ds.get_vars(d): data = ds.get_var(d, name) if data.dtype == 'O' and \ len(data.flatten()) > 0 and \ type(data.flatten()[0]) is str: dtype = str else: dtype = data.dtype v = f.createVariable(name, dtype, ds.get_dims(d, name)) v.setncatts(ds.get_attrs(d, name)) v[::] = data f.setncatts(ds.get_attrs(d))
def tsample(d, tres): w = d['time_bnds'][:, 1] - d['time_bnds'][:, 0] d['time_bnds'] = np.array( [[np.amin(d['time_bnds'][:, 0]), np.amax(d['time_bnds'][:, 1])]]) d['time'] = np.array([np.mean(d['time_bnds'], axis=1)]) if 'backscatter_sd' in d: n, m = d['backscatter_sd'].shape d['backscatter_sd'] = np.sqrt(1. / n * np.average( d['backscatter_sd']**2, axis=0, weights=w, )) d['backscatter_sd'] = d['backscatter_sd'].reshape([1, m]) for var in ds.get_vars(d): if var in ('time', 'time_bnds', 'backscatter_sd'): continue if 'time' not in d['.'][var]['.dims']: continue i = d['.'][var]['.dims'].index('time') shape = list(d[var].shape) d[var] = np.average(d[var], axis=i, weights=w) shape[i] = 1 d[var] = d[var].reshape(shape)
def read(dirname, track, warnings=[], step=6. / 24.): d_ll = ds.read(os.path.join(dirname, 'LL125.nc'), ['latitude', 'longitude', 'z']) lat_ll = d_ll['latitude'] lon_ll = d_ll['longitude'] orog_ll = d_ll['z'][0, :, :] / 9.80665 dd_idx = ds.readdir( dirname, variables=['time', 'latitude', 'longitude'], jd=True, full=True, warnings=warnings, ) start_time = track['time'][0] end_time = track['time'][-1] d_out = {} for var in VARS: dd = [] var2 = TRANS[var] for d_idx in dd_idx: if var not in d_idx['.']: continue time = d_idx['time'] lat = d_idx['latitude'] lon = d_idx['longitude'] filename = d_idx['filename'] ii = np.nonzero((time >= start_time - step * 0.5) & (time < end_time + step * 0.5))[0] for i in ii: t = time[i] i2 = np.argmin(np.abs(track['time'] - time[i])) lat0 = track['lat'][i2] lon0 = track['lon'][i2] j = np.argmin(np.abs(lat - lat0)) k = np.argmin(np.abs(lon - lon0)) j_ll = np.argmin(np.abs(lat_ll - lat0)) k_ll = np.argmin(np.abs(lon_ll - lon0)) d = ds.read( filename, VARS_AUX + [var], sel={ 'time': [i], 'latitude': j, 'longitude': k, }, jd=True, ) for a, b in TRANS.items(): if a in d.keys(): ds.rename(d, a, b) d['lat'] = np.array([d['lat']]) d['lon'] = np.array([d['lon']]) d['orog'] = np.array([orog_ll[j_ll, k_ll]]) d['.']['lat']['.dims'] = ['time'] d['.']['lon']['.dims'] = ['time'] d['.']['orog'] = {'.dims': ['time']} if 'pfull' in ds.get_vars(d): d['pfull'] = d['pfull'].reshape([1, len(d['pfull'])]) d['.']['pfull']['.dims'] = ['time', 'pfull'] d['pfull'] = d['pfull'][:, ::-1] d[var2] = d[var2][:, ::-1] ds.select(d, {'pfull': np.arange(27)}) dd.append(d) d = ds.op.merge(dd, 'time') for var_aux in VARS_AUX: if TRANS[var_aux] in ds.get_vars(d_out) \ and TRANS[var_aux] in ds.get_vars(d) \ and not np.all(d_out[TRANS[var_aux]] == d[TRANS[var_aux]]): raise ValueError('%s: Field differs between input files' % TRANS[var_aux]) d_out.update(d) d_out['pfull'] = d_out['pfull'] * 1e2 if 'time' in d_out: d_out['time_bnds'] = misc.time_bnds(d_out['time'], step, start_time, end_time) d_out['time'] = np.mean(d_out['time_bnds'], axis=1) d_out['.'] = META return d_out