def main(argv=None): # setup the input arguments args = inputs(argv) site = args.site node = args.node sensor = args.sensor method = args.method stream = args.stream deploy = args.deploy start = args.start stop = args.stop # determine the start and stop times for the data request based on either the deployment number or user entered # beginning and ending dates. if not deploy or (start and stop): return SyntaxError('You must specify either a deployment number or beginning and end dates of interest.') else: if deploy: # Determine start and end dates based on the deployment number start, stop = get_deployment_dates(site, node, sensor, deploy) if not start or not stop: exit_text = ('Deployment dates are unavailable for %s-%s-%s, deployment %02d.' % (site, node, sensor, deploy)) raise SystemExit(exit_text) # Request the data for download r = m2m_request(site, node, sensor, method, stream, start, stop) if not r: exit_text = ('Request failed for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) # Valid request, start downloading the data if deploy: phsen = m2m_collect(r, ('.*deployment%04d.*PHSEN.*\\.nc$' % deploy)) else: phsen = m2m_collect(r, '.*PHSEN.*\\.nc$') if not phsen: exit_text = ('Data unavailable for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) # clean-up and reorganize if method in ['telemetered', 'recovered_host']: if re.match('.*imodem.*', stream): phsen = phsen_imodem(phsen) else: phsen = phsen_datalogger(phsen) else: phsen = phsen_instrument(phsen) vocab = get_vocabulary(site, node, sensor)[0] phsen = update_dataset(phsen, vocab['maxdepth']) # save the data to disk out_file = os.path.abspath(args.outfile) if not os.path.exists(os.path.dirname(out_file)): os.makedirs(os.path.dirname(out_file)) phsen.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
def main(): # Setup needed parameters for the request, the user would need to vary these to suit their own needs and # sites/instruments of interest. Site, node, sensor, stream and delivery method names can be obtained from the # Ocean Observatories Initiative web site. The last two parameters (level and instrmt) will set path and naming # conventions to save the data to the local disk. site = 'CE02SHSM' # OOI Net site designator node = 'SBD12' # OOI Net node designator sensor = '04-PCO2AA000' # OOI Net sensor designator stream = 'pco2a_a_dcl_instrument_air' # OOI Net stream name method = 'telemetered' # OOI Net data delivery method level = 'buoy' # local directory name, level below site instrmt = 'pco2a' # local directory name, instrument below level # We are after telemetered data. Determine list of deployments and use the last, presumably currently active, # deployment to determine the start and end dates for our request. vocab = get_vocabulary(site, node, sensor)[0] deployments = list_deployments(site, node, sensor) deploy = deployments[-1] start, stop = get_deployment_dates(site, node, sensor, deploy) # request and download the data -- air measurements r = m2m_request(site, node, sensor, method, stream, start, stop) air = m2m_collect(r, ('.*deployment%04d.*PCO2A.*air.*\\.nc$' % deploy)) # request and download the data -- water measurements r = m2m_request(site, node, sensor, method, 'pco2a_a_dcl_instrument_water', start, stop) water = m2m_collect(r, ('.*deployment%04d.*PCO2A.*water.*\\.nc$' % deploy)) # clean-up and reorganize the air and water datasets air = pco2a_datalogger(air, True) air = update_dataset(air, vocab['maxdepth']) water = pco2a_datalogger(water, True) water = update_dataset(water, vocab['maxdepth']) # save the data -- utilize groups for the air and water datasets out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(), level, instrmt) out_path = os.path.abspath(out_path) if not os.path.exists(out_path): os.makedirs(out_path) out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' % (site.lower(), level, instrmt, deploy, method, stream)) nc_out = os.path.join(out_path, out_file) air.to_netcdf(nc_out, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS, group='air') water.to_netcdf(nc_out, mode='a', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS, group='water')
def main(): # Setup needed parameters for the request, the user would need to vary these to suit their own needs and # sites/instruments of interest. Site, node, sensor, stream and delivery method names can be obtained from the # Ocean Observatories Initiative web site. The last two will set path and naming conventions to save the data # to the local disk site = 'CE02SHSM' # OOI Net site designator node = 'RID26' # OOI Net node designator sensor = '06-PHSEND000' # OOI Net sensor designator stream = 'phsen_abcdef_dcl_instrument' # OOI Net stream name method = 'telemetered' # OOI Net data delivery method level = 'nsif' # local directory name, level below site instrmt = 'phsen' # local directory name, instrument below level # We are after telemetered data. Determine list of deployments and use the last, presumably currently active, # deployment to determine the start and end dates for our request. vocab = get_vocabulary(site, node, sensor)[0] deployments = list_deployments(site, node, sensor) deploy = deployments[-1] start, stop = get_deployment_dates(site, node, sensor, deploy) # request and download the data r = m2m_request(site, node, sensor, method, stream, start, stop) phsen = m2m_collect(r, '.*PHSEN.*\\.nc$') phsen = phsen.where(phsen.deployment == deploy, drop=True) # limit to the deployment of interest # clean-up and reorganize phsen = phsen_datalogger(phsen) phsen = update_dataset(phsen, vocab['maxdepth']) # save the data out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(), level, instrmt) out_path = os.path.abspath(out_path) if not os.path.exists(out_path): os.makedirs(out_path) out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' % (site.lower(), level, instrmt, deploy, method, stream)) nc_out = os.path.join(out_path, out_file) phsen.to_netcdf(nc_out, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
def main(): # Setup needed parameters for the request, the user would need to vary these to suit their own needs and # sites/instruments of interest. Site, node, sensor, stream and delivery method names can be obtained from the # Ocean Observatories Initiative web site. The last two will set path and naming conventions to save the data # to the local disk site = 'CE01ISSM' # OOI Net site designator node = 'SBD17' # OOI Net node designator sensor = '06-FLORTD000' # OOI Net sensor designator stream = 'flort_sample' # OOI Net stream name method = 'recovered_inst' # OOI Net data delivery method level = 'buoy' # local directory name, level below site instrmt = 'flort' # local directory name, instrument below level # We are after recovered instrument data. Determine list of deployments and use a more recent deployment to # determine the start and end dates for our request. vocab = get_vocabulary(site, node, sensor)[0] deployments = list_deployments(site, node, sensor) deploy = deployments[5] start, stop = get_deployment_dates(site, node, sensor, deploy) # request and download the data r = m2m_request(site, node, sensor, method, stream, start, stop) flort = m2m_collect(r, '.*FLORT.*\\.nc$') flort = flort.where(flort.deployment == deploy, drop=True) # limit to the deployment of interest # clean-up and reorganize flort = flort_instrument(flort) flort = update_dataset(flort, vocab['maxdepth']) # save the data out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(), level, instrmt) out_path = os.path.abspath(out_path) if not os.path.exists(out_path): os.makedirs(out_path) out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' % (site.lower(), level, instrmt, deploy, method, stream)) nc_out = os.path.join(out_path, out_file) flort.to_netcdf(nc_out, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
def main(): # Setup needed parameters for the request, the user would need to vary these to suit their own needs and # sites/instruments of interest. Site, node, sensor and stream names can be obtained from the Ocean Observatories # Initiative web site site = 'CE01ISSM' # OOI Net site designator node = 'RID16' # OOI Net node designator sensor = '05-PCO2WB000' # OOI Net sensor designator stream = 'pco2w_abc_instrument' # OOI Net stream name method = 'recovered_inst' # OOI Net data delivery method level = 'nsif' # local directory name, level below site instrmt = 'pco2w' # local directory name, instrument below level # We are after recovered host data. Determine list of deployments and use a previous one to determine the # start and end dates for our request. vocab = get_vocabulary(site, node, sensor)[0] deployments = list_deployments(site, node, sensor) deploy = deployments[-3] start, stop = get_deployment_dates(site, node, sensor, deploy) # request and download the data r = m2m_request(site, node, sensor, method, stream, start, stop) pco2w = m2m_collect(r, '^(?!.*blank).*PCO2W.*nc$') pco2w = pco2w.where(pco2w.deployment == deploy, drop=True) # limit to the deployment of interest # clean-up and reorganize pco2w = pco2w_instrument(pco2w) pco2w = update_dataset(pco2w, vocab['maxdepth']) # save the data out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(), level, instrmt) out_path = os.path.abspath(out_path) if not os.path.exists(out_path): os.makedirs(out_path) out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' % (site.lower(), level, instrmt, deploy, method, stream)) nc_out = os.path.join(out_path, out_file) pco2w.to_netcdf(nc_out, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
def request_data(site, node, sensor, method, stream, start, stop): """ Request and download data from the OOI M2M system :param site: :param node: :param sensor: :param method: :param stream: :param start: :param stop: :return data: """ # Request the data (this may take some time). r = m2m_request(site, node, sensor, method, stream, start, stop) # Use a regex tag to download the sensor data from the THREDDS catalog # created by our request. tag = ('.*{}.*\\.nc$'.format(sensor[3:8])) data = m2m_collect(r, tag) return data
def main(argv=None): args = inputs(argv) site = args.site node = args.node sensor = args.sensor method = args.method stream = args.stream deploy = args.deploy start = args.start stop = args.stop burst = args.burst sensor_type = args.sensor_type # determine the start and stop times for the data request based on either the deployment number or user entered # beginning and ending dates. if not deploy or (start and stop): return SyntaxError( 'You must specify either a deployment number or beginning and end dates of interest.' ) else: if deploy: # Determine start and end dates based on the deployment number start, stop = get_deployment_dates(site, node, sensor, deploy) if not start or not stop: exit_text = ( 'Deployment dates are unavailable for %s-%s-%s, deployment %02d.' % (site, node, sensor, deploy)) raise SystemExit(exit_text) # Request the data for download r = m2m_request(site, node, sensor, method, stream, start, stop) if not r: exit_text = ('Request failed for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) # Valid request, start downloading the data if deploy: dosta = m2m_collect(r, ('.*deployment%04d.*DOSTA.*\\.nc$' % deploy)) else: dosta = m2m_collect(r, '.*DOSTA.*\\.nc$') if not dosta: exit_text = ('Data unavailable for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) if not sensor_type in ['solo', 'ctdbp']: exit_text = 'You need to specify the type of DOSTA in order to process: solo or ctdbp' raise SystemExit(exit_text) # clean-up and reorganize based on the type and data delivery method if sensor_type == 'solo': dosta = dosta_datalogger(dosta, burst) if sensor_type == 'ctdbp': if method in ['telemetered', 'recovered_host']: dosta = dosta_ctdbp_datalogger(dosta) else: dosta = dosta_ctdbp_instrument(dosta) vocab = get_vocabulary(site, node, sensor)[0] dosta = update_dataset(dosta, vocab['maxdepth']) # save the data to disk out_file = os.path.abspath(args.outfile) if not os.path.exists(os.path.dirname(out_file)): os.makedirs(os.path.dirname(out_file)) dosta.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
def main(argv=None): args = inputs(argv) site = args.site node = args.node sensor = args.sensor method = args.method stream = args.stream deploy = args.deploy start = args.start stop = args.stop burst = args.burst # check if we are specifying a deployment or a specific date and time range if not deploy or (start and stop): return SyntaxError( 'You must specify either a deployment number or beginning and end dates of interest.' ) # if we are specifying a deployment number, then get the data from the Gold Copy THREDDS server if deploy: # download the data for the deployment flort = load_gc_thredds(site, node, sensor, method, stream, ('.*deployment%04d.*FLORT.*\\.nc$' % deploy)) # check to see if we downloaded any data if not flort: exit_text = ( 'Data unavailable for %s-%s-%s, %s, %s, deployment %d.' % (site, node, sensor, method, stream, deploy)) raise SystemExit(exit_text) else: # otherwise, request the data for download from OOINet via the M2M API using the specified dates r = m2m_request(site, node, sensor, method, stream, start, stop) if not r: exit_text = ( 'Request failed for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method, stream, start, stop)) raise SystemExit(exit_text) # Valid M2M request, start downloading the data flort = m2m_collect(r, '.*FLORT.*\\.nc$') # check to see if we downloaded any data if not flort: exit_text = ( 'Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method, stream, start, stop)) raise SystemExit(exit_text) # clean-up and reorganize the data if node == 'SP001': # this FLORT is part of a CSPP flort = flort_cspp(flort) elif node == 'WFP01': # this FLORT is part of a Wire-Following Profiler flort = flort_wfp(flort) elif node == 'SBD17': # this FLORT is connected to the CTDBP on an EA Inshore Surface Mooring flort = flort_instrument(flort) if not flort: # there was no data after removing all the 0's sys.exit() else: # this FLORT is stand-alone on one of the moorings flort = flort_datalogger(flort, burst) vocab = get_vocabulary(site, node, sensor)[0] flort = update_dataset(flort, vocab['maxdepth']) # save the data to disk out_file = os.path.abspath(args.outfile) if not os.path.exists(os.path.dirname(out_file)): os.makedirs(os.path.dirname(out_file)) flort.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
def main(argv=None): # setup the input arguments args = inputs(argv) site = args.site node = args.node sensor = args.sensor method = args.method stream = args.stream deploy = args.deploy start = args.start stop = args.stop burst = args.burst # determine the start and stop times for the data request based on either the deployment number or user entered # beginning and ending dates. if not deploy or (start and stop): return SyntaxError( 'You must specify either a deployment number or beginning and end dates of interest.' ) else: if deploy: # Determine start and end dates based on the deployment number start, stop = get_deployment_dates(site, node, sensor, deploy) if not start or not stop: exit_text = ( 'Deployment dates are unavailable for %s-%s-%s, deployment %02d.' % (site, node, sensor, deploy)) raise SystemExit(exit_text) # Request the data for download r = m2m_request(site, node, sensor, method, stream, start, stop) if not r: exit_text = ('Request failed for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) # Valid request, start downloading the data if re.match(r'.*_air.*', stream): if deploy: pco2a = m2m_collect( r, ('.*deployment%04d.*PCO2A.*air.*\\.nc$' % deploy)) else: pco2a = m2m_collect(r, '.*PCO2A.*air.*\\.nc$') nc_group = 'air' else: if deploy: pco2a = m2m_collect( r, ('.*deployment%04d.*PCO2A.*water.*\\.nc$' % deploy)) else: pco2a = m2m_collect(r, '.*PCO2A.*water.*\\.nc$') nc_group = 'water' if not pco2a: exit_text = ('Data unavailable for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) # clean-up and reorganize pco2a = pco2a_datalogger(pco2a, burst) vocab = get_vocabulary(site, node, sensor)[0] pco2a = update_dataset(pco2a, vocab['maxdepth']) # save the data to disk out_file = os.path.abspath(args.outfile) if not os.path.exists(os.path.dirname(out_file)): os.makedirs(os.path.dirname(out_file)) if os.path.isfile(out_file): pco2a.to_netcdf(out_file, mode='a', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS, group=nc_group) else: pco2a.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS, group=nc_group)
def main(argv=None): # setup the input arguments args = inputs(argv) site = args.site node = args.node sensor = args.sensor method = args.method stream = args.stream deploy = args.deploy start = args.start stop = args.stop # check if we are specifying a deployment or a specific date and time range if not deploy or (start and stop): return SyntaxError( 'You must specify either a deployment number or beginning and end dates of interest.' ) # if we are specifying a deployment number, then get the data from the Gold Copy THREDDS server if deploy: # download the data for the deployment pco2w = load_gc_thredds( site, node, sensor, method, stream, ('^(?!.*blank).*deployment%04d.*PCO2W.*\\.nc$' % deploy)) # check to see if we downloaded any data if not pco2w: exit_text = ( 'Data unavailable for %s-%s-%s, %s, %s, deployment %d.' % (site, node, sensor, method, stream, deploy)) raise SystemExit(exit_text) else: # otherwise, request the data for download from OOINet via the M2M API using the specified dates r = m2m_request(site, node, sensor, method, stream, start, stop) if not r: exit_text = ( 'Request failed for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method, stream, start, stop)) raise SystemExit(exit_text) # Valid M2M request, start downloading the data pco2w = m2m_collect(r, '^(?!.*blank).*PCO2W.*\\.nc$') # check to see if we downloaded any data if not pco2w: exit_text = ( 'Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method, stream, start, stop)) raise SystemExit(exit_text) # clean-up and reorganize if method in ['telemetered', 'recovered_host']: pco2w = pco2w_datalogger(pco2w) else: pco2w = pco2w_instrument(pco2w) vocab = get_vocabulary(site, node, sensor)[0] pco2w = update_dataset(pco2w, vocab['maxdepth']) # save the data to disk out_file = os.path.abspath(args.outfile) if not os.path.exists(os.path.dirname(out_file)): os.makedirs(os.path.dirname(out_file)) pco2w.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
def data_request(site, assembly, instrument, method, **kwargs): """ Requests data via the OOI M2M API using the site code, assembly type, instrument class and data delivery method as defined in the m2m_urls.yml to construct the OOI specific data request. :param site: OOI site code as an 8 character string :param assembly: The assembly type where the instrument is located :param instrument: the OOI instrument class name for the instrument of interest :param method: The data delivery method for the system of interest :param kwargs: Takes the following optional keyword arguments: start: Starting date/time for the data request in a dateutil.parser recognizable form. If ``None``, the default, the beginning of the data record will be used stop: Ending date/time for the data request in a dateutil.parser recognizable form If ``None``, the default, the end of the data record will be used deploy: Use the deployment number, entered as an integer, to set the starting and ending dates If ``None``, the default, the starting and ending dates are used. If you enter both, the deployment number will take priority in setting the start and end dates aggregate: In cases where more than one instance of an instrument class is part of an assembly, will collect all of the data if the integer value entered is ``0``, or the specific instance of the instrument is requested if any value greater than ``0`` is used. If ``None``, the default, the first instance of an instrument will be used. :return data: Returns the request data as an xarray dataset for further analysis """ # setup inputs to the function, make sure case is correct site = site.upper() assembly = assembly.lower() instrument = instrument.lower() method = method.lower() # parse the keyword arguments start = None stop = None deploy = None aggregate = None for key, value in kwargs.items(): if key not in ['start', 'stop', 'deploy', 'aggregate']: raise KeyError('Unknown keyword (%s) argument.' % key) else: if key == 'start': start = value if key == 'stop': stop = value if key == 'deploy': deploy = value if key == 'aggregate': aggregate = value # use the assembly, instrument and data delivery methods to find the system of interest node, sensor, stream = filter_urls(site, assembly, instrument, method) # check the formatting of the start and end dates. We need to be able to parse and convert to an ISO format. if start: # check the formatting of the start date string and convert to the ISO format used by the M2M API try: start = parser.parse(start) start = start.astimezone(pytz.utc) start = start.strftime('%Y-%m-%dT%H:%M:%S.000Z') except parser.ParserError: raise SyntaxError('Formatting of the starting date string needs to be in a recognizable format') if stop: # check the formatting of the stop date string and convert to the ISO format used by the M2M API try: stop = parser.parse(stop) stop = stop.astimezone(pytz.utc) stop = stop.strftime('%Y-%m-%dT%H:%M:%S.000Z') except parser.ParserError: raise SyntaxError('Formatting of the ending date string needs to be in a recognizable format') if deploy: # Determine start and end dates based on the deployment number start, stop = get_deployment_dates(site, node[0], sensor[0], deploy) if not start or not stop: exit_text = ('Deployment dates are unavailable for %s-%s-%s-%s, deployment %02d.' % (site.lower(), assembly, instrument, method, deploy)) raise RuntimeWarning(exit_text) # for some cases, there maybe more than 1 stream, but in general, we only want the first one if isinstance(stream[0], list): stream = stream[0][0] else: stream = stream[0] tag = ('.*{instrument}.*\\.nc$'.format(instrument=instrument.upper())) # set regex tag to use when downloading data = None # setup the default data set # check if there are multiple instances of this instrument class on the assembly if len(node) > 1: print('There are multiple instances of the instrument %s under %s-%s.' % (instrument, site.lower(), assembly)) # check if we are aggregating the multiple instruments into a single data set if isinstance(aggregate, int): if aggregate == 0: # request all of the instruments associated with this site, assembly, instrument and method print(('Requesting all %d instances of this instrument. Data sets will be concatenated\n' 'and a new variable called `sensor_count` will be added to help distinguish the \n' 'instruments for later processing.') % len(node)) for i in range(len(node)): r = m2m_request(site, node[i], sensor[i], method, stream, start, stop) if r: temp = m2m_collect(r, tag) temp['sensor_count'] = temp['deployment'] * 0 + i + 1 if not data: data = temp else: data = xr.concat([data, temp], dim='time') else: # request a specific instrument of the multiple instruments associated with this site, assembly, # instrument and method. if aggregate > len(node): raise SyntaxError('Only %d instruments available, you selected %d' % (len(node), aggregate)) print('Requesting instrument %d out of %d.' % (aggregate, len(node))) i = aggregate - 1 r = m2m_request(site, node[i], sensor[i], method, stream, start, stop) if r: data = m2m_collect(r, tag) else: r = m2m_request(site, node[0], sensor[0], method, stream, start, stop) if r: data = m2m_collect(r, tag) if not data: raise RuntimeWarning('Data unavailable for %s-%s-%s-%s.' % (site.lower(), assembly, instrument, method)) # return the resulting data, which is an xarray.Dataset object for v in data.variables: # first convert strings with data types set as objects or S64 with binary encoding if data[v].dtype == np.dtype('O') or data[v].dtype == np.dtype('S64'): data[v] = data[v].astype(np.str) return data
def main(argv=None): args = inputs(argv) site = args.site node = args.node sensor = args.sensor method = args.method stream = args.stream deploy = args.deploy start = args.start stop = args.stop burst = args.burst # determine the start and stop times for the data request based on either the deployment number or user entered # beginning and ending dates. if not deploy or (start and stop): return SyntaxError( 'You must specify either a deployment number or beginning and end dates of interest.' ) else: if deploy: # Determine start and end dates based on the deployment number start, stop = get_deployment_dates(site, node, sensor, deploy) if not start or not stop: exit_text = ( 'Deployment dates are unavailable for %s-%s-%s, deployment %02d.' % (site, node, sensor, deploy)) raise SystemExit(exit_text) if stream not in ['suna_dcl_recovered']: exit_text = ( 'Currently the only stream supported is suna_dcl_recovered, you requested %s.' % stream) raise SystemExit(exit_text) # Request the data for download r = m2m_request(site, node, sensor, method, stream, start, stop) if not r: exit_text = ('Request failed for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) # Valid request, start downloading the data if deploy: nutnr = m2m_collect(r, ('.*deployment%04d.*NUTNR.*\\.nc$' % deploy)) else: nutnr = m2m_collect(r, '.*NUTNR.*\\.nc$') if not nutnr: exit_text = ('Data unavailable for %s-%s-%s. Check request.' % (site, node, sensor)) raise SystemExit(exit_text) # clean-up and reorganize nutnr = nutnr_datalogger(nutnr, burst) vocab = get_vocabulary(site, node, sensor)[0] nutnr = update_dataset(nutnr, vocab['maxdepth']) # save the data to disk out_file = os.path.abspath(args.outfile) if not os.path.exists(os.path.dirname(out_file)): os.makedirs(os.path.dirname(out_file)) nutnr.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)