def merge(dim, *args, **opts): input_ = args[:-1] output = args[-1] dd = [] for filename in input_: d = ds.read(filename) dd.append(d) d = ds.op.merge(dd, dim, variables=opts.get('variables')) ds.write(output, d)
def write(d, output): if len(d['time']) == 0: return t1 = d['time_bnds'][0, 0] t1 = np.round(t1 * 86400.) / 86400. filename = os.path.join(output, '%s.nc' % aq.to_iso(t1).replace(':', '')) ds.write(filename, d) print('-> %s' % filename) return []
def write(*args, **opts): d = {'.': {}} output = args[0] variables = args[1:] for var in variables: attrs = None if len(var) == 3: name, dims, values = var elif len(var) == 4: name, dims, values, attrs = var else: raise ValueError('Invalid variable: %s' % var) d[name] = np.array(values) d['.'][name] = attrs if attrs is not None else {} d['.'][name]['.dims'] = dims if isinstance(dims, list) \ else [dims] if len(opts.keys()) > 0: d['.']['.'] = opts ds.write(output, d)
def main_(input_type, output_type, input_, output, surf=None): d_raw = None d_pts = None d_prof = None d_prof_desc = None d_surf = None desc = False not_supported_msg = 'input or output type not supported' if input_type.startswith('raw:'): name = input_type[(input_type.index(':') + 1):] drv = get_driver(name) d_raw = ds.read(input_) elif input_type == 'pts': d_pts = ds.read(input_) elif input_type == 'prof': d_prof = ds.read(input_) else: drv = get_driver(input_type) #if output_type == 'prof' and hasattr(drv, 'read_prof'): # d_prof = drv.read_prof(input_) if hasattr(drv, 'read'): d_raw = drv.read(input_) if d_pts is None and d_raw is not None and hasattr(drv, 'pts'): d_pts = drv.pts(d_raw) if d_prof is None and d_pts is not None: d_prof = prof(d_pts) d_prof_desc = prof(d_pts, desc=True) if d_prof is not None and surf is not None: drv = rstoollib.drivers.surf d_surf = drv.read(surf, d_prof['time'][0]) if d_surf is not None: for k, v in d_surf.items(): if k != '.': d_prof[k] = d_surf[k] if d_prof is not None: postprocess(d_prof) if d_prof_desc is not None: postprocess(d_prof_desc) if output_type == 'prof': if d_prof is None: raise ValueError(not_supported_msg) d = d_prof elif output_type == 'prof:desc': if d_prof_desc is None: raise ValueError(not_supported_msg) d = d_prof_desc elif output_type == 'pts': if d_pts is None: raise ValueError(not_supported_msg) d = d_pts elif output_type == 'raw': if d_raw is None: raise ValueError(not_supported_msg) d = d_raw else: raise ValueError(not_supported_msg) d['.'] = d.get('.', {}) d['.']['.'] = d['.'].get('.', {}) d['.']['.'].update({ 'software': 'rstool ' + VERSION + \ ' (https://github.com/peterkuma/rstool)', 'created': aq.to_iso(aq.from_datetime(dt.datetime.utcnow())), }) ds.write(output, d)
def run(input_, output, tlim=None, blim=[5., 200.], bres=5., bsd_lim=[0.001, 10.], bsd_log=True, bsd_res=0.001, bsd_z=8000., filter=None, zlim=[0., 15000.], zres=100., **kwargs): ''' alcf-stats -- Calculate cloud occurrence statistics. ========== Synopsis -------- alcf stats <input> <output> [<options>] Arguments --------- - `input`: Input filename or directory. - `output`: Output filename or directory. Options ------- - `blim: <value>`: Backscatter histogram limits (1e-6 m-1.sr-1). Default: `{ 5 200 }`. - `bres: <value>`: Backscatter histogram resolution (1e-6 m-1.sr-1). Default: `10`. - `bsd_lim: { <low> <high> }`: Backscatter standard deviation histogram limits (1e-6 m-1.sr-1). Default: `{ 0.001 10 }`. - `bsd_log: <value>`: Enable/disable logarithmic scale of the backscatter standard deviation histogram (`true` or `false`). Default: `true`. - `bsd_res: <value>`: Backscatter standard deviation histogram resolution (1e-6 m-1.sr-1). Default: `0.001`. - `bsd_z: <value>`: Backscatter standard deviation histogram height (m). Default: `8000`. - `filter: <value> | { <value> ... }`: Filter profiles by condition: `cloudy` for cloudy profiles only, `clear` for clear sky profiles only, `night` for nighttime profiles, `day` for daytime profiles, `none` for all profiles. If an array of values is supplied, all conditions must be true. For `night` and `day`, lidar profiles must contain valid longitude and latitude fields set via the `lon` and `lat` arguments of `alcf lidar` or read implicitly from raw lidar data files if available (mpl, mpl2nc). Default: `none`. - `tlim: { <start> <end> }`: Time limits (see Time format below). Default: `none`. - `zlim: { <low> <high> }`: Height limits (m). Default: `{ 0 15000 }`. - `zres: <value>`: Height resolution (m). Default: `50`. Time format ----------- `YYYY-MM-DD[THH:MM[:SS]]`, where `YYYY` is year, `MM` is month, `DD` is day, `HH` is hour, `MM` is minute, `SS` is second. Example: `2000-01-01T00:00:00`. Examples -------- Calculate statistics from processed lidar data in `alcf_cl51_lidar` and store the output in `alcf_cl51_stats.nc`. alcf stats alcf_cl51_lidar alcf_cl51_stats.nc ''' tlim_jd = parse_time(tlim) if tlim is not None else None state = {} options = { 'tlim': tlim_jd, 'blim': np.array(blim, dtype=np.float64) * 1e-6, 'bres': bres * 1e-6, 'bsd_lim': np.array(bsd_lim, dtype=np.float64) * 1e-6, 'bsd_log': bsd_log, 'bsd_res': bsd_res * 1e-6, 'bsd_z': bsd_z, 'filter': filter if type(filter) is list else [filter], 'zlim': zlim, 'zres': zres, } if os.path.isdir(input_): files = sorted(os.listdir(input_)) for file_ in files: filename = os.path.join(input_, file_) if not os.path.isfile(filename): continue d = ds.read(filename, VARIABLES) print('<- %s' % filename) dd = stats.stream([d], state, **options) else: d = ds.read(input_, VARIABLES) print('<- %s' % input_) dd = stats.stream([d], state, **options) dd = stats.stream([None], state, **options) print('-> %s' % output) ds.write(output, dd[0])
def select(input_, output, variables=None, sel=None): sel = sel[0] if sel is not None and len(sel) > 0 else None d = ds.read(input_, variables, sel) ds.write(output, d)
def run(type_, input_, output, point=None, time=None, track=None, track_override_year=None, track_lon_180=False, **kwargs ): """ alcf model - extract model data at a point or along a track Usage: alcf model <type> point: { <lon> <lat> } time: { <start> <end> } <input> <output> [options] alcf model <type> track: <track> <input> <output> Arguments: - `type`: input data type (see Types below) - `input`: input directory - `output`: output directory - `lon`: point longitude - `lat`: point latitutde - `start`: start time (see Time format below) - `end`: end time (see Time format below) - `track`: track NetCDF file (see Track below) - `options`: see Options below Options: - `track_override_year: <year>`: Override year in track. Use if comparing observations with a model statistically. Default: `none`. - `--track_lon_180`: expect track longitude between -180 and 180 degrees Types: - `amps`: Antarctic Mesoscale Prediction System (AMPS) - `era5`: ERA5 - `jra55`: JRA-55 - `merra2`: Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2) - `nzcsm`: New Zealand Convection Scale Model (NZCSM) - `nzesm`: New Zealand Earth System Model (NZESM) (experimental) - `um`: UK Met Office Unified Model (UM) Time format: "YYYY-MM-DD[THH:MM[:SS]]", where YYYY is year, MM is month, DD is day, HH is hour, MM is minute, SS is second. Example: 2000-01-01T00:00:00. Track: Track file is a NetCDF file containing 1D variables `lon`, `lat`, and `time`. `time` is time in format conforming with the NetCDF standard, `lon` is longitude between 0 and 360 degrees and `lat` is latitude between -90 and 90 degrees. """ time1 = None track1 = None if track is not None: track1 = ds.read(track) if track_override_year is not None: date = aq.to_date(track1['time']) date[1][:] = track_override_year track1['time'] = aq.from_date(date) if track_lon_180: track1['lon'] = np.where( track1['lon'] > 0, track1['lon'], 360. + track1['lon'] ) time1 = track1['time'][0], track1['time'][-1] elif point is not None and time is not None: pass else: raise ValueError('Point and time or track is required') if time is not None: time1 = [None, None] for i in 0, 1: time1[i] = aq.from_iso(time[i]) if time1[i] is None: raise ValueError('Invalid time format: %s' % time[i]) # if os.path.isdir(output): t1, t2 = time1[0], time1[1] for t in np.arange(np.floor(t1 - 0.5), np.ceil(t2 - 0.5)) + 0.5: output_filename = os.path.join(output, '%s.nc' % \ aq.to_iso(t).replace(':', '')) d = model(type_, input_, point, time=[t, t + 1.], track=track1) if d is not None: ds.write(output_filename, d) print('-> %s' % output_filename)
def run(type_, input_, output, point=None, time=None, track=None, track_override_year=None, track_lon_180=False, debug=False, **kwargs ): ''' alcf-model -- Extract model data at a point or along a track. ========== Synopsis -------- alcf model <type> point: { <lon> <lat> } time: { <start> <end> } <input> <output> [options] alcf model <type> track: <track> <input> <output> Arguments --------- - `type`: Input data type (see Types below). - `input`: Input directory. - `output`: Output directory. - `lon`: Point longitude. - `lat`: Point latitutde. - `start`: Start time (see Time format below). - `end`: End time (see Time format below). - `track`: Track NetCDF file (see Files below). - `options`: See Options below. Options ------- - `--track_lon_180`: Expect track longitude between -180 and 180 degrees. - `track_override_year: <year>`: Override year in track. Use if comparing observations with a model statistically. Default: `none`. Types ----- - `amps`: Antarctic Mesoscale Prediction System (AMPS). - `era5`: ERA5. - `jra55`: JRA-55. - `merra2`: Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2). - `nzcsm`: New Zealand Convection Scale Model (NZCSM). - `nzesm`: New Zealand Earth System Model (NZESM). [Experimental] - `um`: UK Met Office Unified Model (UM). Time format ----------- `YYYY-MM-DD[THH:MM[:SS]]`, where `YYYY` is year, `MM` is month, `DD` is day, `HH` is hour, `MM` is minute, `SS` is second. Example: `2000-01-01T00:00:00`. Files ----- The track file is a NetCDF file containing 1D variables `lon`, `lat`, and `time`. `time` is time in format conforming with the NetCDF standard, `lon` is longitude between 0 and 360 degrees and `lat` is latitude between -90 and 90 degrees. Examples -------- Extract MERRA-2 model data in `M2I3NVASM.5.12.4` at 45 S, 170 E between 1 and 2 January 2020 and store the output in the directory `alcf_merra2_model`. alcf model merra2 point: { -45.0 170.0 } time: { 2020-01-01 2020-01-02 } M2I3NVASM.5.12.4 alcf_merra2_model ''' time1 = None track1 = None if track is not None: track1 = ds.read(track) if track_override_year is not None: date = aq.to_date(track1['time']) date[1][:] = track_override_year track1['time'] = aq.from_date(date) if track_lon_180: track1['lon'] = np.where( track1['lon'] > 0, track1['lon'], 360. + track1['lon'] ) time1 = track1['time'][0], track1['time'][-1] elif point is not None and time is not None: pass else: raise ValueError('Point and time or track is required') if time is not None: time1 = [None, None] for i in 0, 1: time1[i] = aq.from_iso(time[i]) if time1[i] is None: raise ValueError('Invalid time format: %s' % time[i]) # if os.path.isdir(output): t1, t2 = time1[0], time1[1] for t in np.arange(np.floor(t1 - 0.5), np.ceil(t2 - 0.5)) + 0.5: output_filename = os.path.join(output, '%s.nc' % \ aq.to_iso(t).replace(':', '')) d = model(type_, input_, point, time=[t, t + 1.], track=track1, debug=debug) if d is not None: ds.write(output_filename, d) print('-> %s' % output_filename)