Пример #1
0
    def __getitem__(self, index):
        slice_ = combine_slices(self._slice, fix_slice(index, self.shape))
        scheme, netloc, path, query, fragment = urlsplit(self.url)
        url = urlunsplit((scheme, netloc, path + '.dods',
                          self.id + hyperslab(slice_) + '&' + query, fragment))

        resp, data = request(url)
        dds, xdrdata = data.split('\nData:\n', 1)
        dataset = DDSParser(dds).parse()
        data = data2 = DapUnpacker(xdrdata, dataset).getvalue()

        # Retrieve the data from any parent structure(s).
        for var in walk(dataset):
            if type(var) in (StructureType, DatasetType):
                data = data[0]
            elif var.id == self.id:
                return data

        # Some old servers return the wrong response. :-/
        # I found a server that would return an array to a request
        # for an array inside a grid (instead of a structure with
        # the array); this will take care of it.
        for var in walk(dataset):
            if type(var) in (StructureType, DatasetType):
                data2 = data2[0]
            elif self.id.endswith(var.id):
                return data2
Пример #2
0
    def __iter__(self):
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        if isinstance(self.descr[1], list):
            id = ','.join('%s.%s' % (self.id, d[0]) for d in self.descr[1])
        else:
            id = self.id
        url = urlunsplit(
            (scheme, netloc, path + '.dods',
             id + hyperslab(self.slice) + '&' + '&'.join(self.selection),
             fragment)).rstrip('&')

        # download and unpack data
        r = requests.get(url, prefetch=False)

        # strip dds response
        marker = '\nData:\n'
        buf = []
        while ''.join(buf) != marker:
            chunk = r.raw.read(1)
            if not chunk:
                break
            buf.append(chunk)
            buf = buf[-len(marker):]

        return unpack_sequence(BufferedReader(r.raw), self.descr)
Пример #3
0
    def __getitem__(self, index):
        # build download url
        index = combine_slices(self.slice, fix_slice(index, self.shape))
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        url = urlunsplit(
            (scheme, netloc, path + '.dods',
             self.id + hyperslab(index) + '&' + query, fragment)).rstrip('&')

        # download and unpack data
        r = requests.get(url)
        dds, data = r.content.split('\nData:\n', 1)

        if self.shape:
            # skip size packing
            if self.dtype.char == 'S':
                data = data[4:]
            else:
                data = data[8:]

        # calculate array size
        shape = tuple((s.stop - s.start) / s.step for s in index)
        size = np.prod(shape)

        if self.dtype == np.byte:
            return np.fromstring(data[:size], 'B')
        elif self.dtype.char == 'S':
            out = []
            for word in range(size):
                n = np.fromstring(data[:4], '>I')  # read length
                data = data[4:]
                out.append(data[:n])
                data = data[n + (-n % 4):]
            return np.array(out, 'S')
        else:
            return np.fromstring(data, self.dtype).reshape(shape)
Пример #4
0
    def __getitem__(self, index):
        slice_ = combine_slices(self._slice, fix_slice(index, self.shape))
        scheme, netloc, path, query, fragment = urlsplit(self.url)
        url = urlunsplit((
                scheme, netloc, path + '.dods',
                quote(self.id) + hyperslab(slice_) + '&' + query,
                fragment))

        resp, data = request(url)
        dds, xdrdata = data.split('\nData:\n', 1)
        dataset = DDSParser(dds).parse()
        data = data2 = DapUnpacker(xdrdata, dataset).getvalue()

        # Retrieve the data from any parent structure(s).
        for var in walk(dataset):
            if type(var) in (StructureType, DatasetType):
                data = data[0]
            elif var.id == self.id: 
                return data

        # Some old servers return the wrong response. :-/
        # I found a server that would return an array to a request
        # for an array inside a grid (instead of a structure with
        # the array); this will take care of it.
        for var in walk(dataset):
            if type(var) in (StructureType, DatasetType):
                data2 = data2[0]
            elif self.id.endswith(var.id):
                return data2
Пример #5
0
    def __iter__(self):
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        if isinstance(self.descr[1], list):
            id = ','.join('%s.%s' % (self.id, d[0]) for d in self.descr[1])
        else:
            id = self.id
        url = urlunsplit((
                scheme, netloc, path + '.dods',
                id + hyperslab(self.slice) + '&' + '&'.join(self.selection),
                fragment)).rstrip('&')

        # download and unpack data
        r = requests.get(url, stream=True)
        r.raise_for_status()
        stream = StreamReader(r.iter_content(BLOCKSIZE))

        # strip dds response
        marker = '\nData:\n'
        buf = []
        while ''.join(buf) != marker:
            chunk = stream.read(1)
            if not chunk:
                break
            buf.append(chunk)
            buf = buf[-len(marker):]

        return unpack_sequence(stream, self.descr)
Пример #6
0
Файл: dap.py Проект: pydap/pydap
    def url(self):
        """Return url from where data is fetched."""
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        url = urlunsplit(
            (scheme, netloc, path + ".dods", self.id + hyperslab(self.slice) + "&" + "&".join(self.selection), fragment)
        ).rstrip("&")

        return url
Пример #7
0
    def url(self):
        """Return url from where data is fetched."""
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        url = urlunsplit(
            (scheme, netloc, path + '.dods',
             self.id + hyperslab(self.slice) + '&' + '&'.join(self.selection),
             fragment)).rstrip('&')

        return url
Пример #8
0
Файл: dap.py Проект: pydap/pydap
    def __getitem__(self, index):
        # build download url
        index = combine_slices(self.slice, fix_slice(index, self.shape))
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        url = urlunsplit(
            (scheme, netloc, path + ".dods", quote(self.id) + hyperslab(index) + "&" + query, fragment)
        ).rstrip("&")

        # download and unpack data
        logger.info("Fetching URL: %s" % url)
        r = GET(url, self.application, self.session)
        raise_for_status(r)
        dds, data = r.body.split(b"\nData:\n", 1)
        dds = dds.decode(r.content_encoding or "ascii")

        if self.shape:
            # skip size packing
            if self.dtype.char in "SU":
                data = data[4:]
            else:
                data = data[8:]

        # calculate array size
        shape = tuple(int(np.ceil((s.stop - s.start) / float(s.step))) for s in index)
        size = int(np.prod(shape))

        if self.dtype == np.byte:
            return np.fromstring(data[:size], "B").reshape(shape)
        elif self.dtype.char in "SU":
            out = []
            for word in range(size):
                n = np.asscalar(np.fromstring(data[:4], ">I"))  # read length
                data = data[4:]
                out.append(data[:n])
                data = data[n + (-n % 4) :]
            return np.array([text_type(x.decode("ascii")) for x in out], "S").reshape(shape)
        else:
            try:
                return np.fromstring(data, self.dtype).reshape(shape)
            except ValueError as e:
                if str(e) == "total size of new array must be unchanged":
                    # server-side failure.
                    # it is expected that the user should be mindful of this:
                    raise RuntimeError(
                        (
                            "variable {0} could not be properly "
                            "retrieved. To avoid this "
                            "error consider using open_url(..., "
                            "output_grid=False)."
                        ).format(quote(self.id))
                    )
                else:
                    raise
Пример #9
0
    def __getitem__(self, index):
        # build download url
        index = combine_slices(self.slice, fix_slice(index, self.shape))
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        url = urlunsplit((
            scheme, netloc, path + '.dods',
            quote(self.id) + hyperslab(index) + '&' + query,
            fragment)).rstrip('&')

        # download and unpack data
        logger.info("Fetching URL: %s" % url)
        r = GET(url, self.application, self.session)
        raise_for_status(r)
        dds, data = r.body.split(b'\nData:\n', 1)
        dds = dds.decode(r.content_encoding or 'ascii')

        if self.shape:
            # skip size packing
            if self.dtype.char in 'SU':
                data = data[4:]
            else:
                data = data[8:]

        # calculate array size
        shape = tuple(
            int(np.ceil((s.stop-s.start)/float(s.step))) for s in index)
        size = int(np.prod(shape))

        if self.dtype == np.byte:
            return np.fromstring(data[:size], 'B').reshape(shape)
        elif self.dtype.char in 'SU':
            out = []
            for word in range(size):
                n = np.asscalar(np.fromstring(data[:4], '>I'))  # read length
                data = data[4:]
                out.append(data[:n])
                data = data[n + (-n % 4):]
            return np.array([text_type(x.decode('ascii'))
                             for x in out], 'S').reshape(shape)
        else:
            try:
                return np.fromstring(data, self.dtype).reshape(shape)
            except ValueError as e:
                if str(e) == 'total size of new array must be unchanged':
                    # server-side failure.
                    # it is expected that the user should be mindful of this:
                    raise RuntimeError(
                                ('variable {0} could not be properly '
                                 'retrieved. To avoid this '
                                 'error consider using open_url(..., '
                                 'output_grid=False).').format(quote(self.id)))
                else:
                    raise
Пример #10
0
    def __getitem__(self, key):
        out = copy.deepcopy(self)
        if isinstance(key, ConstraintExpression):
            scheme, netloc, path, query, fragment = urlsplit(self.url)
            out.url = urlunsplit((
                    scheme, netloc, path, str(key & query), fragment))

            if out._slice != (slice(None),):
                warnings.warn('Selection %s will be applied before projection "%s".' % (
                        key, hyperslab(out._slice)))
        elif isinstance(key, basestring):
            out._slice = (slice(None),)
            out.children = ()
            parent = self.id
            if ',' in parent:
                parent = parent.split(',', 1)[0].rsplit('.', 1)[0]
            out.id = '%s%s.%s' % (parent, hyperslab(self._slice), key)
        elif isinstance(key, tuple):
            out.children = key[:]
        else:
            out._slice = combine_slices(self._slice, fix_slice(key, (sys.maxint,)))
        return out
Пример #11
0
    def __getitem__(self, key):
        out = copy.deepcopy(self)
        if isinstance(key, ConstraintExpression):
            scheme, netloc, path, query, fragment = urlsplit(self.url)
            out.url = urlunsplit(
                (scheme, netloc, path, str(key & query), fragment))

            if out._slice != (slice(None), ):
                warnings.warn(
                    'Selection %s will be applied before projection "%s".' %
                    (key, hyperslab(out._slice)))
        elif isinstance(key, basestring):
            out._slice = (slice(None), )
            out.children = ()
            parent = self.id
            if ',' in parent:
                parent = parent.split(',', 1)[0].rsplit('.', 1)[0]
            out.id = '%s%s.%s' % (parent, hyperslab(self._slice), key)
        elif isinstance(key, tuple):
            out.children = key[:]
        else:
            out._slice = combine_slices(self._slice,
                                        fix_slice(key, (sys.maxint, )))
        return out
Пример #12
0
    def __getitem__(self, index):
        # build download url
        index = combine_slices(self.slice, fix_slice(index, self.shape))
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        url = urlunsplit((
            scheme, netloc, path + '.dods',
            quote(self.id) + hyperslab(index) + '&' + query,
            fragment)).rstrip('&')

        # download and unpack data
        logger.info("Fetching URL: %s" % url)
        r = GET(url, self.application, self.session)
        raise_for_status(r)
        dds, data = r.body.split(b'\nData:\n', 1)
        dds = dds.decode(r.content_encoding or 'ascii')

        if self.shape:
            # skip size packing
            if self.dtype.char in 'SU':
                data = data[4:]
            else:
                data = data[8:]

        # calculate array size
        shape = tuple(
            int(np.ceil((s.stop-s.start)/float(s.step))) for s in index)
        size = int(np.prod(shape))

        if self.dtype == np.byte:
            return np.fromstring(data[:size], 'B')
        elif self.dtype.char in 'SU':
            out = []
            for word in range(size):
                n = np.fromstring(data[:4], '>I')  # read length
                data = data[4:]
                out.append(data[:n])
                data = data[n + (-n % 4):]
            return np.array([ text_type(x.decode('ascii')) for x in out ], 'S')
        else:
            return np.fromstring(data, self.dtype).reshape(shape)
Пример #13
0
    def __getitem__(self, index):
        # build download url
        index = combine_slices(self.slice, fix_slice(index, self.shape))
        scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
        url = urlunsplit((
                scheme, netloc, path + '.dods',
                self.id + hyperslab(index) + '&' + query,
                fragment)).rstrip('&')

        # download and unpack data
        r = requests.get(url)
        r.raise_for_status()
        dds, data = r.content.split('\nData:\n', 1)
        
        if self.shape:
            # skip size packing
            if self.dtype.char == 'S':
                data = data[4:]
            else:
                data = data[8:]

        # calculate array size
        shape = tuple((s.stop-s.start)/s.step for s in index)
        size = np.prod(shape)

        if self.dtype == np.byte:
            return np.fromstring(data[:size], 'B')
        elif self.dtype.char == 'S':
            out = []
            for word in range(size):
                n = np.fromstring(data[:4], '>I')  # read length
                data = data[4:]
                out.append(data[:n])
                data = data[n + (-n % 4):]
            return np.array(out, 'S')
        else:
            return np.fromstring(data, self.dtype).reshape(shape)
Пример #14
0
    def __iter__(self):
        scheme, netloc, path, query, fragment = urlsplit(self.url)
        id_ = ','.join('%s.%s' % (self.id, child) for child in self.children) or self.id
        url = urlunsplit((
                scheme, netloc, path + '.dods',
                quote(id_) + hyperslab(self._slice) + '&' + query,
                fragment))

        resp, data = request(url)
        dds, xdrdata = data.split('\nData:\n', 1)
        dataset = DDSParser(dds).parse()
        dataset.data = DapUnpacker(xdrdata, dataset).getvalue()
        dataset._set_id()

        # Strip any projections from the request id.
        id_ = re.sub('\[.*?\]', '', self.id)
        # And return the proper data.
        for var in walk(dataset):
            if var.id == id_:
                data = var.data
                if isinstance(var, SequenceType):
                    order = [var.keys().index(k) for k in self.children]
                    data = reorder(order, data, var._nesting_level)
                return iter(data)
Пример #15
0
    def __iter__(self):
        scheme, netloc, path, query, fragment = urlsplit(self.url)
        id_ = ','.join('%s.%s' % (self.id, child)
                       for child in self.children) or self.id
        url = urlunsplit(
            (scheme, netloc, path + '.dods',
             id_ + hyperslab(self._slice) + '&' + query, fragment))

        resp, data = request(url)
        dds, xdrdata = data.split('\nData:\n', 1)
        dataset = DDSParser(dds).parse()
        dataset.data = DapUnpacker(xdrdata, dataset).getvalue()
        dataset._set_id()

        # Strip any projections from the request id.
        id_ = re.sub('\[.*?\]', '', self.id)
        # And return the proper data.
        for var in walk(dataset):
            if var.id == id_:
                data = var.data
                if isinstance(var, SequenceType):
                    order = [var.keys().index(k) for k in self.children]
                    data = reorder(order, data, var._nesting_level)
                return iter(data)
Пример #16
0
 def __repr__(self):
     return '<%s pointing to variable "%s%s" at "%s">' % (
             self.__class__.__name__, self.id, hyperslab(self._slice), self.url)
Пример #17
0
 def test_no_tuple(self):
     """Test that slices that are not tuples work."""
     slice_ = slice(0)
     self.assertEqual(hyperslab(slice_), "[0:1:%d]" % (MAXSIZE-1))
Пример #18
0
 def __repr__(self):
     id_ = ','.join('%s.%s' % (self.id, child) for child in self.children) or self.id
     return '<%s pointing to variable "%s%s" at "%s">' % (
             self.__class__.__name__, id_, hyperslab(self._slice), self.url)
Пример #19
0
 def test_remove(self):
     """Test that excess slices are removed."""
     slice_ = (slice(0), slice(None))
     self.assertEqual(hyperslab(slice_), "[0:1:%d]" % (MAXSIZE-1))
Пример #20
0
 def test_ndimensional(self):
     """Test n-dimensions slices."""
     slice_ = (slice(1, 10, 1), slice(2, 10, 2))
     self.assertEqual(hyperslab(slice_), "[1:1:9][2:2:9]")
Пример #21
0
 def test_no_tuple(self):
     """Test that slices that are not tuples work."""
     slice_ = slice(0)
     self.assertEqual(hyperslab(slice_), "[0:1:%d]" % (MAXSIZE - 1))
Пример #22
0
 def test_remove(self):
     """Test that excess slices are removed."""
     slice_ = (slice(0), slice(None))
     self.assertEqual(hyperslab(slice_), "[0:1:%d]" % (MAXSIZE - 1))
Пример #23
0
 def test_ndimensional(self):
     """Test n-dimensions slices."""
     slice_ = (slice(1, 10, 1), slice(2, 10, 2))
     self.assertEqual(hyperslab(slice_), "[1:1:9][2:2:9]")
Пример #24
0
 def __repr__(self):
     id_ = ','.join('%s.%s' % (self.id, child)
                    for child in self.children) or self.id
     return '<%s pointing to variable "%s%s" at "%s">' % (
         self.__class__.__name__, id_, hyperslab(self._slice), self.url)
Пример #25
0
 def __repr__(self):
     return '<%s pointing to variable "%s%s" at "%s">' % (
         self.__class__.__name__, self.id, hyperslab(self._slice), self.url)