Esempio n. 1
0
    def __init__(self, url):
        # download DDS/DAS
        scheme, netloc, path, query, fragment = urlsplit(url)
        ddsurl = urlunsplit((scheme, netloc, path + '.dds', query, fragment))
        r = requests.get(ddsurl)
        r.raise_for_status()
        dds = r.text.encode('utf-8')
        dasurl = urlunsplit((scheme, netloc, path + '.das', query, fragment))
        r = requests.get(dasurl)
        r.raise_for_status()
        das = r.text.encode('utf-8')

        # build the dataset from the DDS and add attributes from the DAS
        self.dataset = build_dataset(dds)
        add_attributes(self.dataset, parse_das(das))

        # remove any projection from the url, leaving selections
        projection, selection = parse_ce(query)
        url = urlunsplit((scheme, netloc, path, '&'.join(selection), fragment))

        # now add data proxies
        for var in walk(self.dataset, BaseType):
            var.data = BaseProxy(url, var.id, var.descr)
        for var in walk(self.dataset, SequenceType):
            var.data = SequenceProxy(url, var.id, var.descr)

        # apply projections
        for var in projection:
            target = self.dataset
            while var:
                token, index = var.pop(0)
                target = target[token]
                if index and isinstance(target.data, BaseProxy):
                    target.data.slice = fix_slice(index, target.shape)
Esempio n. 2
0
def open_file(dods, das=None):
    """Open a file downloaded from a `.dods` response, returning a dataset.

    Optionally, read also the `.das` response to assign attributes to the
    dataset.

    """
    dds = ''
    # This file contains both ascii _and_ binary data
    # Let's handle them separately in sequence
    # Without ignoring errors, the IO library will
    # actually read past the ascii part of the
    # file (despite our break from iteration) and
    # will error out on the binary data
    with open(dods, "rt", buffering=1, encoding='ascii',
              newline='\n', errors='ignore') as f:
        for line in f:
            if line.strip() == 'Data:':
                break
            dds += line
    dataset = build_dataset(dds)
    pos = len(dds) + len('Data:\n')

    with open(dods, "rb") as f:
        f.seek(pos)
        dataset.data = unpack_data(f, dataset)

    if das is not None:
        with open(das) as f:
            add_attributes(dataset, parse_das(f.read()))

    return dataset
Esempio n. 3
0
    def __init__(self, url):
        # download DDS/DAS
        scheme, netloc, path, query, fragment = urlsplit(url)
        ddsurl = urlunsplit((scheme, netloc, path + '.dds', query, fragment))
        dds = requests.get(ddsurl).text.encode('utf-8')
        dasurl = urlunsplit((scheme, netloc, path + '.das', query, fragment))
        das = requests.get(dasurl).text.encode('utf-8')

        # build the dataset from the DDS and add attributes from the DAS
        self.dataset = build_dataset(dds)
        add_attributes(self.dataset, parse_das(das))

        # remove any projection from the url, leaving selections
        projection, selection = parse_ce(query)
        url = urlunsplit((scheme, netloc, path, '&'.join(selection), fragment))

        # now add data proxies
        for var in walk(self.dataset, BaseType):
            var.data = BaseProxy(url, var.id, var.descr)
        for var in walk(self.dataset, SequenceType):
            var.data = SequenceProxy(url, var.id, var.descr)

        # apply projections
        for var in projection:
            target = self.dataset
            while var:
                token, index = var.pop(0)
                target = target[token]
                if index and isinstance(target.data, BaseProxy):
                    target.data.slice = fix_slice(index, target.shape)
Esempio n. 4
0
    def __init__(self, url, application=None, session=None, output_grid=True):
        # download DDS/DAS
        scheme, netloc, path, query, fragment = urlsplit(url)

        ddsurl = urlunsplit((scheme, netloc, path + '.dds', query, fragment))
        r = GET(ddsurl, application, session)
        raise_for_status(r)
        dds = r.text

        dasurl = urlunsplit((scheme, netloc, path + '.das', query, fragment))
        r = GET(dasurl, application, session)
        raise_for_status(r)
        das = r.text

        # build the dataset from the DDS and add attributes from the DAS
        self.dataset = build_dataset(dds)
        add_attributes(self.dataset, parse_das(das))

        # remove any projection from the url, leaving selections
        projection, selection = parse_ce(query)
        url = urlunsplit((scheme, netloc, path, '&'.join(selection), fragment))

        # now add data proxies
        for var in walk(self.dataset, BaseType):
            var.data = BaseProxy(url,
                                 var.id,
                                 var.dtype,
                                 var.shape,
                                 application=application,
                                 session=session)
        for var in walk(self.dataset, SequenceType):
            template = copy.copy(var)
            var.data = SequenceProxy(url,
                                     template,
                                     application=application,
                                     session=session)

        # apply projections
        for var in projection:
            target = self.dataset
            while var:
                token, index = var.pop(0)
                target = target[token]
                if isinstance(target, BaseType):
                    target.data.slice = fix_slice(index, target.shape)
                elif isinstance(target, GridType):
                    index = fix_slice(index, target.array.shape)
                    target.array.data.slice = index
                    for s, child in zip(index, target.maps):
                        target[child].data.slice = (s, )
                elif isinstance(target, SequenceType):
                    target.data.slice = index

        # retrieve only main variable for grid types:
        for var in walk(self.dataset, GridType):
            var.set_output_grid(output_grid)
Esempio n. 5
0
def open_file(dods, das=None):
    with open(dods) as f:
        dds, data = f.read().split('\nData:\n', 1)
        dataset = build_dataset(dds)
        dataset.data = unpack_data(data, dataset)

    if das is not None:
        with open(das) as f:
            add_attributes(dataset, parse_das(f.read()))

    return dataset
Esempio n. 6
0
def open_dods(url, metadata=False):
    r = requests.get(url)
    dds, data = r.content.split('\nData:\n', 1)
    dataset = build_dataset(dds)
    dataset.data = unpack_data(data, dataset)

    if metadata:
        scheme, netloc, path, query, fragment = urlsplit(url)
        dasurl = urlunsplit((scheme, netloc, path[:-4] + 'das', query, fragment))
        das = requests.get(dasurl).text.encode('utf-8')
        add_attributes(dataset, parse_das(das))

    return dataset
Esempio n. 7
0
File: dap.py Progetto: pydap/pydap
    def __init__(self, url, application=None, session=None, output_grid=True):
        # download DDS/DAS
        scheme, netloc, path, query, fragment = urlsplit(url)

        ddsurl = urlunsplit((scheme, netloc, path + ".dds", query, fragment))
        r = GET(ddsurl, application, session)
        raise_for_status(r)
        dds = r.text

        dasurl = urlunsplit((scheme, netloc, path + ".das", query, fragment))
        r = GET(dasurl, application, session)
        raise_for_status(r)
        das = r.text

        # build the dataset from the DDS and add attributes from the DAS
        self.dataset = build_dataset(dds)
        add_attributes(self.dataset, parse_das(das))

        # remove any projection from the url, leaving selections
        projection, selection = parse_ce(query)
        url = urlunsplit((scheme, netloc, path, "&".join(selection), fragment))

        # now add data proxies
        for var in walk(self.dataset, BaseType):
            var.data = BaseProxy(url, var.id, var.dtype, var.shape, application=application, session=session)
        for var in walk(self.dataset, SequenceType):
            template = copy.copy(var)
            var.data = SequenceProxy(url, template, application=application, session=session)

        # apply projections
        for var in projection:
            target = self.dataset
            while var:
                token, index = var.pop(0)
                target = target[token]
                if isinstance(target, BaseType):
                    target.data.slice = fix_slice(index, target.shape)
                elif isinstance(target, GridType):
                    index = fix_slice(index, target.array.shape)
                    target.array.data.slice = index
                    for s, child in zip(index, target.maps):
                        target[child].data.slice = (s,)
                elif isinstance(target, SequenceType):
                    target.data.slice = index

        # retrieve only main variable for grid types:
        for var in walk(self.dataset, GridType):
            var.set_output_grid(output_grid)
Esempio n. 8
0
def open_dods(url, metadata=False, application=None, session=None):
    """Open a `.dods` response directly, returning a dataset."""
    r = GET(url, application, session)
    dds, data = r.body.split(b'\nData:\n', 1)
    dds = dds.decode(r.content_encoding or 'ascii')
    dataset = build_dataset(dds)
    stream = StreamReader(BytesIO(data))
    dataset.data = unpack_data(stream, dataset)

    if metadata:
        scheme, netloc, path, query, fragment = urlsplit(url)
        dasurl = urlunsplit(
            (scheme, netloc, path[:-4] + 'das', query, fragment))
        das = GET(dasurl, application, session).text
        add_attributes(dataset, parse_das(das))

    return dataset
Esempio n. 9
0
def open_dods(url, metadata=False, application=None, session=None):
    """Open a `.dods` response directly, returning a dataset."""
    r = GET(url, application, session)
    dds, data = r.body.split(b'\nData:\n', 1)
    dds = dds.decode(r.content_encoding or 'ascii')
    dataset = build_dataset(dds)
    stream = StreamReader(BytesIO(data))
    dataset.data = unpack_data(stream, dataset)

    if metadata:
        scheme, netloc, path, query, fragment = urlsplit(url)
        dasurl = urlunsplit(
            (scheme, netloc, path[:-4] + 'das', query, fragment))
        das = GET(dasurl, application, session).text
        add_attributes(dataset, parse_das(das))

    return dataset
Esempio n. 10
0
def open_file(dods, das=None):
    """Open a file downloaded from a `.dods` response, returning a dataset.

    Optionally, read also the `.das` response to assign attributes to the
    dataset.

    """
    dds = ''
    # This file contains both ascii _and_ binary data
    # Let's handle them separately in sequence
    # Without ignoring errors, the IO library will actually
    # read past the ascii part of the
    # file (despite our break from iteration) and will error
    # out on the binary data
    with open(dods,
              "rt",
              buffering=1,
              encoding='ascii',
              newline='\n',
              errors='ignore') as f:
        for line in f:
            if line.strip() == 'Data:':
                break
            dds += line
    dataset = build_dataset(dds)
    pos = len(dds) + len('Data:\n')

    with open(dods, "rb") as f:
        f.seek(pos)
        dataset.data = unpack_data(f, dataset)

    if das is not None:
        with open(das) as f:
            add_attributes(dataset, parse_das(f.read()))

    return dataset
class CSVHandler(BaseHandler):

    """This is a simple handler for CSV files."""

    __version__ = get_distribution("pydap.handlers.csv").version
    extensions = re.compile(r"^.*\.csv$", re.IGNORECASE)

    def __init__(self, filepath):
        BaseHandler.__init__(self)

        try:
            with open(filepath, 'Ur') as fp:
                reader = csv.reader(fp, quoting=csv.QUOTE_NONNUMERIC)
                vars = reader.next()
        except Exception, exc:
            message = 'Unable to open file {filepath}: {exc}'.format(
                filepath=filepath, exc=exc)
            raise OpenFileError(message)

        self.additional_headers.append(
            ('Last-modified',
                (formatdate(
                    time.mktime(
                        time.localtime(os.stat(filepath)[ST_MTIME]))))))

        # build dataset
        name = os.path.split(filepath)[1]
        self.dataset = DatasetType(name)

        # add sequence and children for each column
        seq = self.dataset['sequence'] = SequenceType('sequence')
        for var in vars:
            seq[var] = BaseType(var)

        # set the data
        seq.data = CSVData(filepath, copy.copy(seq))

        # add extra attributes
        metadata = "{0}.json".format(filepath)
        if os.path.exists(metadata):
            with open(metadata) as fp:
                attributes = json.load(fp)
            add_attributes(self.dataset, attributes)
Esempio n. 12
0
 def setUp(self):
     """Load a dataset and apply DAS to it."""
     self.dataset = build_dataset(DDS)
     attributes = parse_das(DAS)
     add_attributes(self.dataset, attributes)
Esempio n. 13
0
 def setUp(self):
     dataset = build_dataset(DDS)
     attributes = parse_das(DAS)
     self.dataset = add_attributes(dataset, attributes)
Esempio n. 14
0
 def setUp(self):
     """Load a dataset and apply DAS to it."""
     self.dataset = build_dataset(DDS)
     attributes = parse_das(DAS)
     add_attributes(self.dataset, attributes)
Esempio n. 15
0
 def setUp(self):
     dataset = build_dataset(DDS)
     attributes = parse_das(DAS)
     self.dataset = add_attributes(dataset, attributes)