def __init__(self, url): # download DDS/DAS scheme, netloc, path, query, fragment = urlsplit(url) ddsurl = urlunsplit((scheme, netloc, path + '.dds', query, fragment)) dds = requests.get(ddsurl).text.encode('utf-8') dasurl = urlunsplit((scheme, netloc, path + '.das', query, fragment)) das = requests.get(dasurl).text.encode('utf-8') # build the dataset from the DDS and add attributes from the DAS self.dataset = build_dataset(dds) add_attributes(self.dataset, parse_das(das)) # remove any projection from the url, leaving selections projection, selection = parse_ce(query) url = urlunsplit((scheme, netloc, path, '&'.join(selection), fragment)) # now add data proxies for var in walk(self.dataset, BaseType): var.data = BaseProxy(url, var.id, var.descr) for var in walk(self.dataset, SequenceType): var.data = SequenceProxy(url, var.id, var.descr) # apply projections for var in projection: target = self.dataset while var: token, index = var.pop(0) target = target[token] if index and isinstance(target.data, BaseProxy): target.data.slice = fix_slice(index, target.shape)
def __init__(self, url): # download DDS/DAS scheme, netloc, path, query, fragment = urlsplit(url) ddsurl = urlunsplit((scheme, netloc, path + '.dds', query, fragment)) r = requests.get(ddsurl) r.raise_for_status() dds = r.text.encode('utf-8') dasurl = urlunsplit((scheme, netloc, path + '.das', query, fragment)) r = requests.get(dasurl) r.raise_for_status() das = r.text.encode('utf-8') # build the dataset from the DDS and add attributes from the DAS self.dataset = build_dataset(dds) add_attributes(self.dataset, parse_das(das)) # remove any projection from the url, leaving selections projection, selection = parse_ce(query) url = urlunsplit((scheme, netloc, path, '&'.join(selection), fragment)) # now add data proxies for var in walk(self.dataset, BaseType): var.data = BaseProxy(url, var.id, var.descr) for var in walk(self.dataset, SequenceType): var.data = SequenceProxy(url, var.id, var.descr) # apply projections for var in projection: target = self.dataset while var: token, index = var.pop(0) target = target[token] if index and isinstance(target.data, BaseProxy): target.data.slice = fix_slice(index, target.shape)
def open_file(dods, das=None): """Open a file downloaded from a `.dods` response, returning a dataset. Optionally, read also the `.das` response to assign attributes to the dataset. """ dds = '' # This file contains both ascii _and_ binary data # Let's handle them separately in sequence # Without ignoring errors, the IO library will # actually read past the ascii part of the # file (despite our break from iteration) and # will error out on the binary data with open(dods, "rt", buffering=1, encoding='ascii', newline='\n', errors='ignore') as f: for line in f: if line.strip() == 'Data:': break dds += line dataset = build_dataset(dds) pos = len(dds) + len('Data:\n') with open(dods, "rb") as f: f.seek(pos) dataset.data = unpack_data(f, dataset) if das is not None: with open(das) as f: add_attributes(dataset, parse_das(f.read())) return dataset
def __init__(self, url, application=None, session=None, output_grid=True): # download DDS/DAS scheme, netloc, path, query, fragment = urlsplit(url) ddsurl = urlunsplit((scheme, netloc, path + '.dds', query, fragment)) r = GET(ddsurl, application, session) raise_for_status(r) dds = r.text dasurl = urlunsplit((scheme, netloc, path + '.das', query, fragment)) r = GET(dasurl, application, session) raise_for_status(r) das = r.text # build the dataset from the DDS and add attributes from the DAS self.dataset = build_dataset(dds) add_attributes(self.dataset, parse_das(das)) # remove any projection from the url, leaving selections projection, selection = parse_ce(query) url = urlunsplit((scheme, netloc, path, '&'.join(selection), fragment)) # now add data proxies for var in walk(self.dataset, BaseType): var.data = BaseProxy(url, var.id, var.dtype, var.shape, application=application, session=session) for var in walk(self.dataset, SequenceType): template = copy.copy(var) var.data = SequenceProxy(url, template, application=application, session=session) # apply projections for var in projection: target = self.dataset while var: token, index = var.pop(0) target = target[token] if isinstance(target, BaseType): target.data.slice = fix_slice(index, target.shape) elif isinstance(target, GridType): index = fix_slice(index, target.array.shape) target.array.data.slice = index for s, child in zip(index, target.maps): target[child].data.slice = (s, ) elif isinstance(target, SequenceType): target.data.slice = index # retrieve only main variable for grid types: for var in walk(self.dataset, GridType): var.set_output_grid(output_grid)
def dump(): import sys import pprint dods = sys.stdin.read() dds, xdrdata = dods.split('\nData:\n', 1) dataset = build_dataset(dds) data = unpack_data(xdrdata, dataset) pprint.pprint(data)
def open_file(dods, das=None): with open(dods) as f: dds, data = f.read().split('\nData:\n', 1) dataset = build_dataset(dds) dataset.data = unpack_data(data, dataset) if das is not None: with open(das) as f: add_attributes(dataset, parse_das(f.read())) return dataset
def open_dods(url, metadata=False): r = requests.get(url) dds, data = r.content.split('\nData:\n', 1) dataset = build_dataset(dds) dataset.data = unpack_data(data, dataset) if metadata: scheme, netloc, path, query, fragment = urlsplit(url) dasurl = urlunsplit((scheme, netloc, path[:-4] + 'das', query, fragment)) das = requests.get(dasurl).text.encode('utf-8') add_attributes(dataset, parse_das(das)) return dataset
def dump(): # pragma: no cover """Unpack dods response into lists. Return pretty-printed data. """ dods = sys.stdin.read() dds, xdrdata = dods.split(b'\nData:\n', 1) xdr_stream = io.BytesIO(xdrdata) dds = dds.decode('ascii') dataset = build_dataset(dds) data = unpack_data(xdr_stream, dataset) pprint.pprint(data)
def dump(): # pragma: no cover """Unpack dods response into lists. Return pretty-printed data. """ dods = sys.stdin.read() dds, xdrdata = dods.split(b"\nData:\n", 1) xdr_stream = io.BytesIO(xdrdata) dds = dds.decode("ascii") dataset = build_dataset(dds) data = unpack_data(xdr_stream, dataset) pprint.pprint(data)
def __init__(self, url, application=None, session=None, output_grid=True): # download DDS/DAS scheme, netloc, path, query, fragment = urlsplit(url) ddsurl = urlunsplit((scheme, netloc, path + ".dds", query, fragment)) r = GET(ddsurl, application, session) raise_for_status(r) dds = r.text dasurl = urlunsplit((scheme, netloc, path + ".das", query, fragment)) r = GET(dasurl, application, session) raise_for_status(r) das = r.text # build the dataset from the DDS and add attributes from the DAS self.dataset = build_dataset(dds) add_attributes(self.dataset, parse_das(das)) # remove any projection from the url, leaving selections projection, selection = parse_ce(query) url = urlunsplit((scheme, netloc, path, "&".join(selection), fragment)) # now add data proxies for var in walk(self.dataset, BaseType): var.data = BaseProxy(url, var.id, var.dtype, var.shape, application=application, session=session) for var in walk(self.dataset, SequenceType): template = copy.copy(var) var.data = SequenceProxy(url, template, application=application, session=session) # apply projections for var in projection: target = self.dataset while var: token, index = var.pop(0) target = target[token] if isinstance(target, BaseType): target.data.slice = fix_slice(index, target.shape) elif isinstance(target, GridType): index = fix_slice(index, target.array.shape) target.array.data.slice = index for s, child in zip(index, target.maps): target[child].data.slice = (s,) elif isinstance(target, SequenceType): target.data.slice = index # retrieve only main variable for grid types: for var in walk(self.dataset, GridType): var.set_output_grid(output_grid)
def open_dods(url, metadata=False, application=None, session=None): """Open a `.dods` response directly, returning a dataset.""" r = GET(url, application, session) dds, data = r.body.split(b'\nData:\n', 1) dds = dds.decode(r.content_encoding or 'ascii') dataset = build_dataset(dds) stream = StreamReader(BytesIO(data)) dataset.data = unpack_data(stream, dataset) if metadata: scheme, netloc, path, query, fragment = urlsplit(url) dasurl = urlunsplit( (scheme, netloc, path[:-4] + 'das', query, fragment)) das = GET(dasurl, application, session).text add_attributes(dataset, parse_das(das)) return dataset
def open_file(dods, das=None): """Open a file downloaded from a `.dods` response, returning a dataset. Optionally, read also the `.das` response to assign attributes to the dataset. """ dds = '' # This file contains both ascii _and_ binary data # Let's handle them separately in sequence # Without ignoring errors, the IO library will actually # read past the ascii part of the # file (despite our break from iteration) and will error # out on the binary data with open(dods, "rt", buffering=1, encoding='ascii', newline='\n', errors='ignore') as f: for line in f: if line.strip() == 'Data:': break dds += line dataset = build_dataset(dds) pos = len(dds) + len('Data:\n') with open(dods, "rb") as f: f.seek(pos) dataset.data = unpack_data(f, dataset) if das is not None: with open(das) as f: add_attributes(dataset, parse_das(f.read())) return dataset
def setUp(self): """Parse the whole dataset.""" self.dataset = build_dataset(DDS)
def setUp(self): """Load a dataset and apply DAS to it.""" self.dataset = build_dataset(DDS) attributes = parse_das(DAS) add_attributes(self.dataset, attributes)
def setUp(self): dataset = build_dataset(DDS) attributes = parse_das(DAS) self.dataset = add_attributes(dataset, attributes)