コード例 #1
0
def main(argv=None):
    # setup the input arguments
    args = inputs(argv)
    site = args.site
    node = args.node
    sensor = args.sensor
    method = args.method
    stream = args.stream
    deploy = args.deploy
    start = args.start
    stop = args.stop

    # determine the start and stop times for the data request based on either the deployment number or user entered
    # beginning and ending dates.
    if not deploy or (start and stop):
        return SyntaxError('You must specify either a deployment number or beginning and end dates of interest.')
    else:
        if deploy:
            # Determine start and end dates based on the deployment number
            start, stop = get_deployment_dates(site, node, sensor, deploy)
            if not start or not stop:
                exit_text = ('Deployment dates are unavailable for %s-%s-%s, deployment %02d.' % (site, node, sensor,
                                                                                                  deploy))
                raise SystemExit(exit_text)

    # Request the data for download
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    if not r:
        exit_text = ('Request failed for %s-%s-%s. Check request.' % (site, node, sensor))
        raise SystemExit(exit_text)

    # Valid request, start downloading the data
    if deploy:
        phsen = m2m_collect(r, ('.*deployment%04d.*PHSEN.*\\.nc$' % deploy))
    else:
        phsen = m2m_collect(r, '.*PHSEN.*\\.nc$')

    if not phsen:
        exit_text = ('Data unavailable for %s-%s-%s. Check request.' % (site, node, sensor))
        raise SystemExit(exit_text)

    # clean-up and reorganize
    if method in ['telemetered', 'recovered_host']:
        if re.match('.*imodem.*', stream):
            phsen = phsen_imodem(phsen)
        else:
            phsen = phsen_datalogger(phsen)
    else:
        phsen = phsen_instrument(phsen)

    vocab = get_vocabulary(site, node, sensor)[0]
    phsen = update_dataset(phsen, vocab['maxdepth'])

    # save the data to disk
    out_file = os.path.abspath(args.outfile)
    if not os.path.exists(os.path.dirname(out_file)):
        os.makedirs(os.path.dirname(out_file))

    phsen.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
コード例 #2
0
def main():
    # Setup needed parameters for the request, the user would need to vary these to suit their own needs and
    # sites/instruments of interest. Site, node, sensor, stream and delivery method names can be obtained from the
    # Ocean Observatories Initiative web site. The last two parameters (level and instrmt) will set path and naming
    # conventions to save the data to the local disk.
    site = 'CE02SHSM'  # OOI Net site designator
    node = 'SBD12'  # OOI Net node designator
    sensor = '04-PCO2AA000'  # OOI Net sensor designator
    stream = 'pco2a_a_dcl_instrument_air'  # OOI Net stream name
    method = 'telemetered'  # OOI Net data delivery method
    level = 'buoy'  # local directory name, level below site
    instrmt = 'pco2a'  # local directory name, instrument below level

    # We are after telemetered data. Determine list of deployments and use the last, presumably currently active,
    # deployment to determine the start and end dates for our request.
    vocab = get_vocabulary(site, node, sensor)[0]
    deployments = list_deployments(site, node, sensor)
    deploy = deployments[-1]
    start, stop = get_deployment_dates(site, node, sensor, deploy)

    # request and download the data -- air measurements
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    air = m2m_collect(r, ('.*deployment%04d.*PCO2A.*air.*\\.nc$' % deploy))

    # request and download the data -- water measurements
    r = m2m_request(site, node, sensor, method, 'pco2a_a_dcl_instrument_water',
                    start, stop)
    water = m2m_collect(r, ('.*deployment%04d.*PCO2A.*water.*\\.nc$' % deploy))

    # clean-up and reorganize the air and water datasets
    air = pco2a_datalogger(air, True)
    air = update_dataset(air, vocab['maxdepth'])
    water = pco2a_datalogger(water, True)
    water = update_dataset(water, vocab['maxdepth'])

    # save the data -- utilize groups for the air and water datasets
    out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(),
                            level, instrmt)
    out_path = os.path.abspath(out_path)
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' %
                (site.lower(), level, instrmt, deploy, method, stream))
    nc_out = os.path.join(out_path, out_file)
    air.to_netcdf(nc_out,
                  mode='w',
                  format='NETCDF4',
                  engine='h5netcdf',
                  encoding=ENCODINGS,
                  group='air')
    water.to_netcdf(nc_out,
                    mode='a',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS,
                    group='water')
コード例 #3
0
def main():
    # Setup needed parameters for the request, the user would need to vary these to suit their own needs and
    # sites/instruments of interest. Site, node, sensor, stream and delivery method names can be obtained from the
    # Ocean Observatories Initiative web site. The last two will set path and naming conventions to save the data
    # to the local disk
    site = 'CE02SHSM'  # OOI Net site designator
    node = 'RID26'  # OOI Net node designator
    sensor = '06-PHSEND000'  # OOI Net sensor designator
    stream = 'phsen_abcdef_dcl_instrument'  # OOI Net stream name
    method = 'telemetered'  # OOI Net data delivery method
    level = 'nsif'  # local directory name, level below site
    instrmt = 'phsen'  # local directory name, instrument below level

    # We are after telemetered data. Determine list of deployments and use the last, presumably currently active,
    # deployment to determine the start and end dates for our request.
    vocab = get_vocabulary(site, node, sensor)[0]
    deployments = list_deployments(site, node, sensor)
    deploy = deployments[-1]
    start, stop = get_deployment_dates(site, node, sensor, deploy)

    # request and download the data
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    phsen = m2m_collect(r, '.*PHSEN.*\\.nc$')
    phsen = phsen.where(phsen.deployment == deploy,
                        drop=True)  # limit to the deployment of interest

    # clean-up and reorganize
    phsen = phsen_datalogger(phsen)
    phsen = update_dataset(phsen, vocab['maxdepth'])

    # save the data
    out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(),
                            level, instrmt)
    out_path = os.path.abspath(out_path)
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' %
                (site.lower(), level, instrmt, deploy, method, stream))
    nc_out = os.path.join(out_path, out_file)

    phsen.to_netcdf(nc_out,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)
コード例 #4
0
def main():
    # Setup needed parameters for the request, the user would need to vary these to suit their own needs and
    # sites/instruments of interest. Site, node, sensor, stream and delivery method names can be obtained from the
    # Ocean Observatories Initiative web site. The last two will set path and naming conventions to save the data
    # to the local disk
    site = 'CE01ISSM'  # OOI Net site designator
    node = 'SBD17'  # OOI Net node designator
    sensor = '06-FLORTD000'  # OOI Net sensor designator
    stream = 'flort_sample'  # OOI Net stream name
    method = 'recovered_inst'  # OOI Net data delivery method
    level = 'buoy'  # local directory name, level below site
    instrmt = 'flort'  # local directory name, instrument below level

    # We are after recovered instrument data. Determine list of deployments and use a more recent deployment to
    # determine the start and end dates for our request.
    vocab = get_vocabulary(site, node, sensor)[0]
    deployments = list_deployments(site, node, sensor)
    deploy = deployments[5]
    start, stop = get_deployment_dates(site, node, sensor, deploy)

    # request and download the data
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    flort = m2m_collect(r, '.*FLORT.*\\.nc$')
    flort = flort.where(flort.deployment == deploy,
                        drop=True)  # limit to the deployment of interest

    # clean-up and reorganize
    flort = flort_instrument(flort)
    flort = update_dataset(flort, vocab['maxdepth'])

    # save the data
    out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(),
                            level, instrmt)
    out_path = os.path.abspath(out_path)
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' %
                (site.lower(), level, instrmt, deploy, method, stream))
    nc_out = os.path.join(out_path, out_file)

    flort.to_netcdf(nc_out,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)
コード例 #5
0
def main():
    # Setup needed parameters for the request, the user would need to vary these to suit their own needs and
    # sites/instruments of interest. Site, node, sensor and stream names can be obtained from the Ocean Observatories
    # Initiative web site
    site = 'CE01ISSM'  # OOI Net site designator
    node = 'RID16'  # OOI Net node designator
    sensor = '05-PCO2WB000'  # OOI Net sensor designator
    stream = 'pco2w_abc_instrument'  # OOI Net stream name
    method = 'recovered_inst'  # OOI Net data delivery method
    level = 'nsif'  # local directory name, level below site
    instrmt = 'pco2w'  # local directory name, instrument below level

    # We are after recovered host data. Determine list of deployments and use a previous one to determine the
    # start and end dates for our request.
    vocab = get_vocabulary(site, node, sensor)[0]
    deployments = list_deployments(site, node, sensor)
    deploy = deployments[-3]
    start, stop = get_deployment_dates(site, node, sensor, deploy)

    # request and download the data
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    pco2w = m2m_collect(r, '^(?!.*blank).*PCO2W.*nc$')
    pco2w = pco2w.where(pco2w.deployment == deploy,
                        drop=True)  # limit to the deployment of interest

    # clean-up and reorganize
    pco2w = pco2w_instrument(pco2w)
    pco2w = update_dataset(pco2w, vocab['maxdepth'])

    # save the data
    out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(),
                            level, instrmt)
    out_path = os.path.abspath(out_path)
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    out_file = ('%s.%s.%s.deploy%02d.%s.%s.nc' %
                (site.lower(), level, instrmt, deploy, method, stream))
    nc_out = os.path.join(out_path, out_file)

    pco2w.to_netcdf(nc_out,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)
コード例 #6
0
def main():
    # Setup needed parameters for the request, the user would need to vary these to suit their own needs and
    # sites/instruments of interest. Site, node, sensor, stream and delivery method names can be obtained from the
    # Ocean Observatories Initiative web site. The last two parameters (level and instrmt) will set path and naming
    # conventions to save the data to the local disk.
    site = 'CE02SHSP'  # OOI Net site designator
    node = 'SP001'  # OOI Net node designator
    sensor = '07-FLORTJ000'  # OOI Net sensor designator
    stream = 'flort_sample'  # OOI Net stream name
    method = 'recovered_cspp'  # OOI Net data delivery method
    instrmt = 'flort'  # local directory name, instrument below site

    # We are after the recovered data. Determine list of deployments and use data from one of the earlier deployments
    vocab = get_vocabulary(site, node, sensor)[0]
    deployments = list_deployments(site, node, sensor)
    deploy = deployments[-4]

    # download the data from the Gold Copy THREDDS server
    flort = load_gc_thredds(site, node, sensor, method, stream,
                            ('.*deployment%04d.*FLORT.*\\.nc$' % deploy))

    # clean-up and reorganize
    flort = flort_cspp(flort)
    flort = update_dataset(flort, vocab['maxdepth'])

    # save the data
    out_path = os.path.join(CONFIG['base_dir']['m2m_base'], site.lower(),
                            instrmt)
    out_path = os.path.abspath(out_path)
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    out_file = ('%s.%s.deploy%02d.%s.%s.nc' %
                (site.lower(), instrmt, deploy, method, stream))
    nc_out = os.path.join(out_path, out_file)

    flort.to_netcdf(nc_out,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)
コード例 #7
0
def generate_qartod(site, node, sensor, cut_off):
    """
    Load all FLORT data for a defined reference designator (using the site,
    node and sensor names to construct the reference designator) and
    collected via the different data delivery methods and combine them into a
    single data set from which QARTOD test limits for the gross range and
    climatology tests can be calculated.

    :param site: Site designator, extracted from the first part of the
        reference designator
    :param node: Node designator, extracted from the second part of the
        reference designator
    :param sensor: Sensor designator, extracted from the third and fourth part
        of the reference designator
    :param cut_off: string formatted date to use as cut-off for data to add
        to QARTOD test sets
    :return gr_lookup: CSV formatted strings to save to a csv file for the
        QARTOD gross range lookup tables.
    :return clm_lookup: CSV formatted strings to save to a csv file for the
        QARTOD climatology lookup tables.
    :return clm_table: CSV formatted strings to save to a csv file for the
        QARTOD climatology range tables.
    """
    # load the combined data for the different sources of FLORT data
    data = combine_delivery_methods(site, node, sensor)

    # create boolean arrays of the data marked as "fail" by the quality checks and generate initial
    # HITL annotations that can be combined with system annotations to create a cleaned up data set
    # prior to calculating the QARTOD test values
    if node == 'WFP01':
        index = 10  # decimate the WFP data so we can process it
    else:
        index = 1
    chl_fail = data.estimated_chlorophyll_qc_summary_flag.where(
        data.estimated_chlorophyll_qc_summary_flag > 3).notnull()
    blocks = identify_blocks(chl_fail[::index], [18, 72])
    chl_hitl = create_annotations(site, node, sensor, blocks)
    chl_hitl['parameters'] = [[22, 1141] for i in chl_hitl['parameters']]

    cdom_fail = data.fluorometric_cdom_qc_summary_flag.where(
        data.fluorometric_cdom_qc_summary_flag > 3).notnull()
    blocks = identify_blocks(cdom_fail[::index], [18, 72])
    cdom_hitl = create_annotations(site, node, sensor, blocks)
    cdom_hitl['parameters'] = [[23, 1143] for i in cdom_hitl['parameters']]

    beta_fail = data.beta_700_qc_summary_flag.where(
        data.beta_700_qc_summary_flag > 3).notnull()
    blocks = identify_blocks(beta_fail[::index], [18, 72], 24)
    beta_hitl = create_annotations(site, node, sensor, blocks)
    beta_hitl['parameters'] = [[24, 25, 1139] for i in beta_hitl['parameters']]

    # combine the different dictionaries into a single HITL annotation dictionary for later use
    hitl = chl_hitl.copy()
    for d in (cdom_hitl, beta_hitl):
        for key, value in d.items():
            hitl[key] = hitl[key] + d[key]

    # get the current system annotations for the sensor
    annotations = get_annotations(site, node, sensor)
    annotations = pd.DataFrame(annotations)
    if not annotations.empty:
        annotations = annotations.drop(columns=['@class'])
        annotations['beginDate'] = pd.to_datetime(
            annotations.beginDT, unit='ms').dt.strftime('%Y-%m-%dT%H:%M:%S')
        annotations['endDate'] = pd.to_datetime(
            annotations.endDT, unit='ms').dt.strftime('%Y-%m-%dT%H:%M:%S')

    # append the fail annotations to the existing annotations
    annotations = annotations.append(pd.DataFrame(hitl),
                                     ignore_index=True,
                                     sort=False)

    # create an annotation-based quality flag
    data = add_annotation_qc_flags(data, annotations)

    # clean-up the data, NaN-ing values that were marked as fail in the QC checks and/or identified as a block
    # of failed data, and then removing all records where the rollup annotation (every parameter fails) was
    # set to fail.
    data['estimated_chlorophyll'][chl_fail] = np.nan
    if 'fluorometric_chl_a_annotations_qc_results' in data.variables:
        m = data.fluorometric_chl_a_annotations_qc_results == 4
        data['estimated_chlorophyll'][m] = np.nan

    data['fluorometric_cdom'][cdom_fail] = np.nan
    if 'fluorometric_cdom_annotations_qc_results' in data.variables:
        m = data.fluorometric_cdom_annotations_qc_results == 4
        data['fluorometric_cdom'][m] = np.nan

    data['beta_700'][beta_fail] = np.nan
    if 'total_volume_scattering_coefficient_annotations_qc_results' in data.variables:
        m = data.total_volume_scattering_coefficient_annotations_qc_results == 4
        data['beta_700'][m] = np.nan
        data['bback'][m] = np.nan

    if 'rollup_annotations_qc_results' in data.variables:
        data = data.where(data.rollup_annotations_qc_results < 4)

    # if a cut_off date was used, limit data to all data collected up to the cut_off date.
    # otherwise, set the limit to the range of the downloaded data.
    if cut_off:
        cut = parser.parse(cut_off)
        cut = cut.astimezone(pytz.utc)
        end_date = cut.strftime('%Y-%m-%dT%H:%M:%S')
        src_date = cut.strftime('%Y-%m-%d')
    else:
        cut = parser.parse(data.time_coverage_end)
        cut = cut.astimezone(pytz.utc)
        end_date = cut.strftime('%Y-%m-%dT%H:%M:%S')
        src_date = cut.strftime('%Y-%m-%d')

    data = data.sel(time=slice('2014-01-01T00:00:00', end_date))

    # set the parameters and the gross range limits
    parameters = ['bback', 'estimated_chlorophyll', 'fluorometric_cdom']
    limits = [[0, 3], [0, 30], [0, 375]]

    # create the initial gross range entry
    gr_lookup = process_gross_range(data,
                                    parameters,
                                    limits,
                                    site=site,
                                    node=node,
                                    sensor=sensor,
                                    stream='flort_sample')

    # add the stream name and the source comment
    gr_lookup['notes'] = (
        'User range based on data collected through {}.'.format(src_date))

    # based on the site and node, determine if we need a depth based climatology
    depth_bins = np.array([])
    if node in ['SP001', 'WFP01']:
        if site in [
                'CE01ISSP', 'CE02SHSP', 'CE06ISSP', 'CE07SHSP', 'CE09OSPM'
        ]:
            vocab = get_vocabulary(site, node, sensor)[0]
            max_depth = vocab['maxdepth']
            depth_bins = woa_standard_bins()
            m = depth_bins[:, 1] <= max_depth
            depth_bins = depth_bins[m, :]

    # create and format the climatology lookups and tables for the data
    clm_lookup, clm_table = process_climatology(data,
                                                parameters,
                                                limits,
                                                depth_bins=depth_bins,
                                                site=site,
                                                node=node,
                                                sensor=sensor,
                                                stream='flort_sample')

    # add the stream name
    clm_lookup['stream'] = 'flort_sample'

    return annotations, gr_lookup, clm_lookup, clm_table
コード例 #8
0
def main(argv=None):
    args = inputs(argv)
    site = args.site
    node = args.node
    sensor = args.sensor
    method = args.method
    stream = args.stream
    deploy = args.deploy
    start = args.start
    stop = args.stop
    burst = args.burst
    sensor_type = args.sensor_type

    # determine the start and stop times for the data request based on either the deployment number or user entered
    # beginning and ending dates.
    if not deploy or (start and stop):
        return SyntaxError(
            'You must specify either a deployment number or beginning and end dates of interest.'
        )
    else:
        if deploy:
            # Determine start and end dates based on the deployment number
            start, stop = get_deployment_dates(site, node, sensor, deploy)
            if not start or not stop:
                exit_text = (
                    'Deployment dates are unavailable for %s-%s-%s, deployment %02d.'
                    % (site, node, sensor, deploy))
                raise SystemExit(exit_text)

    # Request the data for download
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    if not r:
        exit_text = ('Request failed for %s-%s-%s. Check request.' %
                     (site, node, sensor))
        raise SystemExit(exit_text)

    # Valid request, start downloading the data
    if deploy:
        dosta = m2m_collect(r, ('.*deployment%04d.*DOSTA.*\\.nc$' % deploy))
    else:
        dosta = m2m_collect(r, '.*DOSTA.*\\.nc$')

    if not dosta:
        exit_text = ('Data unavailable for %s-%s-%s. Check request.' %
                     (site, node, sensor))
        raise SystemExit(exit_text)

    if not sensor_type in ['solo', 'ctdbp']:
        exit_text = 'You need to specify the type of DOSTA in order to process: solo or ctdbp'
        raise SystemExit(exit_text)

    # clean-up and reorganize based on the type and data delivery method
    if sensor_type == 'solo':
        dosta = dosta_datalogger(dosta, burst)

    if sensor_type == 'ctdbp':
        if method in ['telemetered', 'recovered_host']:
            dosta = dosta_ctdbp_datalogger(dosta)
        else:
            dosta = dosta_ctdbp_instrument(dosta)

    vocab = get_vocabulary(site, node, sensor)[0]
    dosta = update_dataset(dosta, vocab['maxdepth'])

    # save the data to disk
    out_file = os.path.abspath(args.outfile)
    if not os.path.exists(os.path.dirname(out_file)):
        os.makedirs(os.path.dirname(out_file))

    dosta.to_netcdf(out_file,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)
コード例 #9
0
def main(argv=None):
    args = inputs(argv)
    site = args.site
    node = args.node
    sensor = args.sensor
    method = args.method
    stream = args.stream
    deploy = args.deploy
    start = args.start
    stop = args.stop
    burst = args.burst

    # check if we are specifying a deployment or a specific date and time range
    if not deploy or (start and stop):
        return SyntaxError(
            'You must specify either a deployment number or beginning and end dates of interest.'
        )

    # if we are specifying a deployment number, then get the data from the Gold Copy THREDDS server
    if deploy:
        # download the data for the deployment
        flort = load_gc_thredds(site, node, sensor, method, stream,
                                ('.*deployment%04d.*FLORT.*\\.nc$' % deploy))

        # check to see if we downloaded any data
        if not flort:
            exit_text = (
                'Data unavailable for %s-%s-%s, %s, %s, deployment %d.' %
                (site, node, sensor, method, stream, deploy))
            raise SystemExit(exit_text)
    else:
        # otherwise, request the data for download from OOINet via the M2M API using the specified dates
        r = m2m_request(site, node, sensor, method, stream, start, stop)
        if not r:
            exit_text = (
                'Request failed for %s-%s-%s, %s, %s, from %s to %s.' %
                (site, node, sensor, method, stream, start, stop))
            raise SystemExit(exit_text)

        # Valid M2M request, start downloading the data
        flort = m2m_collect(r, '.*FLORT.*\\.nc$')

        # check to see if we downloaded any data
        if not flort:
            exit_text = (
                'Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' %
                (site, node, sensor, method, stream, start, stop))
            raise SystemExit(exit_text)

    # clean-up and reorganize the data
    if node == 'SP001':
        # this FLORT is part of a CSPP
        flort = flort_cspp(flort)
    elif node == 'WFP01':
        # this FLORT is part of a Wire-Following Profiler
        flort = flort_wfp(flort)
    elif node == 'SBD17':
        # this FLORT is connected to the CTDBP on an EA Inshore Surface Mooring
        flort = flort_instrument(flort)
        if not flort:
            # there was no data after removing all the 0's
            sys.exit()
    else:
        # this FLORT is stand-alone on one of the moorings
        flort = flort_datalogger(flort, burst)

    vocab = get_vocabulary(site, node, sensor)[0]
    flort = update_dataset(flort, vocab['maxdepth'])

    # save the data to disk
    out_file = os.path.abspath(args.outfile)
    if not os.path.exists(os.path.dirname(out_file)):
        os.makedirs(os.path.dirname(out_file))

    flort.to_netcdf(out_file,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)
コード例 #10
0
def main(argv=None):
    # setup the input arguments
    args = inputs(argv)
    site = args.site
    node = args.node
    sensor = args.sensor
    method = args.method
    stream = args.stream
    deploy = args.deploy
    start = args.start
    stop = args.stop
    burst = args.burst

    # determine the start and stop times for the data request based on either the deployment number or user entered
    # beginning and ending dates.
    if not deploy or (start and stop):
        return SyntaxError(
            'You must specify either a deployment number or beginning and end dates of interest.'
        )
    else:
        if deploy:
            # Determine start and end dates based on the deployment number
            start, stop = get_deployment_dates(site, node, sensor, deploy)
            if not start or not stop:
                exit_text = (
                    'Deployment dates are unavailable for %s-%s-%s, deployment %02d.'
                    % (site, node, sensor, deploy))
                raise SystemExit(exit_text)

    # Request the data for download
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    if not r:
        exit_text = ('Request failed for %s-%s-%s. Check request.' %
                     (site, node, sensor))
        raise SystemExit(exit_text)

    # Valid request, start downloading the data
    if re.match(r'.*_air.*', stream):
        if deploy:
            pco2a = m2m_collect(
                r, ('.*deployment%04d.*PCO2A.*air.*\\.nc$' % deploy))
        else:
            pco2a = m2m_collect(r, '.*PCO2A.*air.*\\.nc$')
        nc_group = 'air'
    else:
        if deploy:
            pco2a = m2m_collect(
                r, ('.*deployment%04d.*PCO2A.*water.*\\.nc$' % deploy))
        else:
            pco2a = m2m_collect(r, '.*PCO2A.*water.*\\.nc$')
        nc_group = 'water'

    if not pco2a:
        exit_text = ('Data unavailable for %s-%s-%s. Check request.' %
                     (site, node, sensor))
        raise SystemExit(exit_text)

    # clean-up and reorganize
    pco2a = pco2a_datalogger(pco2a, burst)
    vocab = get_vocabulary(site, node, sensor)[0]
    pco2a = update_dataset(pco2a, vocab['maxdepth'])

    # save the data to disk
    out_file = os.path.abspath(args.outfile)
    if not os.path.exists(os.path.dirname(out_file)):
        os.makedirs(os.path.dirname(out_file))

    if os.path.isfile(out_file):
        pco2a.to_netcdf(out_file,
                        mode='a',
                        format='NETCDF4',
                        engine='h5netcdf',
                        encoding=ENCODINGS,
                        group=nc_group)
    else:
        pco2a.to_netcdf(out_file,
                        mode='w',
                        format='NETCDF4',
                        engine='h5netcdf',
                        encoding=ENCODINGS,
                        group=nc_group)
コード例 #11
0
def main(argv=None):
    # setup the input arguments
    args = inputs(argv)
    site = args.site
    node = args.node
    sensor = args.sensor
    method = args.method
    stream = args.stream
    deploy = args.deploy
    start = args.start
    stop = args.stop

    # check if we are specifying a deployment or a specific date and time range
    if not deploy or (start and stop):
        return SyntaxError(
            'You must specify either a deployment number or beginning and end dates of interest.'
        )

    # if we are specifying a deployment number, then get the data from the Gold Copy THREDDS server
    if deploy:
        # download the data for the deployment
        pco2w = load_gc_thredds(
            site, node, sensor, method, stream,
            ('^(?!.*blank).*deployment%04d.*PCO2W.*\\.nc$' % deploy))

        # check to see if we downloaded any data
        if not pco2w:
            exit_text = (
                'Data unavailable for %s-%s-%s, %s, %s, deployment %d.' %
                (site, node, sensor, method, stream, deploy))
            raise SystemExit(exit_text)
    else:
        # otherwise, request the data for download from OOINet via the M2M API using the specified dates
        r = m2m_request(site, node, sensor, method, stream, start, stop)
        if not r:
            exit_text = (
                'Request failed for %s-%s-%s, %s, %s, from %s to %s.' %
                (site, node, sensor, method, stream, start, stop))
            raise SystemExit(exit_text)

        # Valid M2M request, start downloading the data
        pco2w = m2m_collect(r, '^(?!.*blank).*PCO2W.*\\.nc$')

        # check to see if we downloaded any data
        if not pco2w:
            exit_text = (
                'Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' %
                (site, node, sensor, method, stream, start, stop))
            raise SystemExit(exit_text)

    # clean-up and reorganize
    if method in ['telemetered', 'recovered_host']:
        pco2w = pco2w_datalogger(pco2w)
    else:
        pco2w = pco2w_instrument(pco2w)

    vocab = get_vocabulary(site, node, sensor)[0]
    pco2w = update_dataset(pco2w, vocab['maxdepth'])

    # save the data to disk
    out_file = os.path.abspath(args.outfile)
    if not os.path.exists(os.path.dirname(out_file)):
        os.makedirs(os.path.dirname(out_file))

    pco2w.to_netcdf(out_file,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)
コード例 #12
0
def main(argv=None):
    args = inputs(argv)
    site = args.site
    node = args.node
    sensor = args.sensor
    method = args.method
    stream = args.stream
    deploy = args.deploy
    start = args.start
    stop = args.stop
    burst = args.burst

    # determine the start and stop times for the data request based on either the deployment number or user entered
    # beginning and ending dates.
    if not deploy or (start and stop):
        return SyntaxError(
            'You must specify either a deployment number or beginning and end dates of interest.'
        )
    else:
        if deploy:
            # Determine start and end dates based on the deployment number
            start, stop = get_deployment_dates(site, node, sensor, deploy)
            if not start or not stop:
                exit_text = (
                    'Deployment dates are unavailable for %s-%s-%s, deployment %02d.'
                    % (site, node, sensor, deploy))
                raise SystemExit(exit_text)

    if stream not in ['suna_dcl_recovered']:
        exit_text = (
            'Currently the only stream supported is suna_dcl_recovered, you requested %s.'
            % stream)
        raise SystemExit(exit_text)

    # Request the data for download
    r = m2m_request(site, node, sensor, method, stream, start, stop)
    if not r:
        exit_text = ('Request failed for %s-%s-%s. Check request.' %
                     (site, node, sensor))
        raise SystemExit(exit_text)

    # Valid request, start downloading the data
    if deploy:
        nutnr = m2m_collect(r, ('.*deployment%04d.*NUTNR.*\\.nc$' % deploy))
    else:
        nutnr = m2m_collect(r, '.*NUTNR.*\\.nc$')

    if not nutnr:
        exit_text = ('Data unavailable for %s-%s-%s. Check request.' %
                     (site, node, sensor))
        raise SystemExit(exit_text)

    # clean-up and reorganize
    nutnr = nutnr_datalogger(nutnr, burst)
    vocab = get_vocabulary(site, node, sensor)[0]
    nutnr = update_dataset(nutnr, vocab['maxdepth'])

    # save the data to disk
    out_file = os.path.abspath(args.outfile)
    if not os.path.exists(os.path.dirname(out_file)):
        os.makedirs(os.path.dirname(out_file))

    nutnr.to_netcdf(out_file,
                    mode='w',
                    format='NETCDF4',
                    engine='h5netcdf',
                    encoding=ENCODINGS)