Пример #1
0
def open_file(dods, das=None):
    """Open a file downloaded from a `.dods` response, returning a dataset.

    Optionally, read also the `.das` response to assign attributes to the
    dataset.

    """
    dds = ''
    # This file contains both ascii _and_ binary data
    # Let's handle them separately in sequence
    # Without ignoring errors, the IO library will
    # actually read past the ascii part of the
    # file (despite our break from iteration) and
    # will error out on the binary data
    with open(dods, "rt", buffering=1, encoding='ascii',
              newline='\n', errors='ignore') as f:
        for line in f:
            if line.strip() == 'Data:':
                break
            dds += line
    dataset = build_dataset(dds)
    pos = len(dds) + len('Data:\n')

    with open(dods, "rb") as f:
        f.seek(pos)
        dataset.data = unpack_data(f, dataset)

    if das is not None:
        with open(das) as f:
            add_attributes(dataset, parse_das(f.read()))

    return dataset
Пример #2
0
def open_file(dods, das=None):
    with open(dods) as f:
        dds, data = f.read().split('\nData:\n', 1)
        dataset = build_dataset(dds)
        dataset.data = unpack_data(data, dataset)

    if das is not None:
        with open(das) as f:
            add_attributes(dataset, parse_das(f.read()))

    return dataset
Пример #3
0
def open_dods(url, metadata=False):
    r = requests.get(url)
    dds, data = r.content.split('\nData:\n', 1)
    dataset = build_dataset(dds)
    dataset.data = unpack_data(data, dataset)

    if metadata:
        scheme, netloc, path, query, fragment = urlsplit(url)
        dasurl = urlunsplit((scheme, netloc, path[:-4] + 'das', query, fragment))
        das = requests.get(dasurl).text.encode('utf-8')
        add_attributes(dataset, parse_das(das))

    return dataset
Пример #4
0
def open_dods(url, metadata=False, application=None, session=None):
    """Open a `.dods` response directly, returning a dataset."""
    r = GET(url, application, session)
    dds, data = r.body.split(b'\nData:\n', 1)
    dds = dds.decode(r.content_encoding or 'ascii')
    dataset = build_dataset(dds)
    stream = StreamReader(BytesIO(data))
    dataset.data = unpack_data(stream, dataset)

    if metadata:
        scheme, netloc, path, query, fragment = urlsplit(url)
        dasurl = urlunsplit(
            (scheme, netloc, path[:-4] + 'das', query, fragment))
        das = GET(dasurl, application, session).text
        add_attributes(dataset, parse_das(das))

    return dataset
Пример #5
0
def open_dods(url, metadata=False, application=None, session=None):
    """Open a `.dods` response directly, returning a dataset."""
    r = GET(url, application, session)
    dds, data = r.body.split(b'\nData:\n', 1)
    dds = dds.decode(r.content_encoding or 'ascii')
    dataset = build_dataset(dds)
    stream = StreamReader(BytesIO(data))
    dataset.data = unpack_data(stream, dataset)

    if metadata:
        scheme, netloc, path, query, fragment = urlsplit(url)
        dasurl = urlunsplit(
            (scheme, netloc, path[:-4] + 'das', query, fragment))
        das = GET(dasurl, application, session).text
        add_attributes(dataset, parse_das(das))

    return dataset
Пример #6
0
def open_file(dods, das=None):
    """Open a file downloaded from a `.dods` response, returning a dataset.

    Optionally, read also the `.das` response to assign attributes to the
    dataset.

    """
    dds = ''
    # This file contains both ascii _and_ binary data
    # Let's handle them separately in sequence
    # Without ignoring errors, the IO library will actually
    # read past the ascii part of the
    # file (despite our break from iteration) and will error
    # out on the binary data
    with open(dods,
              "rt",
              buffering=1,
              encoding='ascii',
              newline='\n',
              errors='ignore') as f:
        for line in f:
            if line.strip() == 'Data:':
                break
            dds += line
    dataset = build_dataset(dds)
    pos = len(dds) + len('Data:\n')

    with open(dods, "rb") as f:
        f.seek(pos)
        dataset.data = unpack_data(f, dataset)

    if das is not None:
        with open(das) as f:
            add_attributes(dataset, parse_das(f.read()))

    return dataset