def calculate_size(dataset): """ Calculate the size of the response. """ length = 0 for var in walk(dataset): # Pydap can't calculate the size of sequences since the data is streamed # directly from the source. Also, strings are encoded individually, so # it's not possible to get their size unless we read everything. if (isinstance(var, SequenceType) or (isinstance(var, BaseType) and var.data.dtype.char == 'S')): return None elif isinstance(var, BaseType): if var.shape: length += 8 # account for array size marker size = np.prod(var.shape) if var.data.dtype == np.byte: length += size + (-size % 4) elif var.data.dtype == np.short: length += size * 4 else: length += size * var.data.dtype.itemsize # account for DDS length += len(''.join(dds_dispatch(dataset))) + len('Data:\n') return str(length)
def calculate_size(dataset): size = 0 for var in walk(dataset): # Pydap can't calculate the size of a dataset with a # Sequence since the data is streamed directly. if (isinstance(var, SequenceType) or (isinstance(var, BaseType) and var.type in [Url, String])): return None elif isinstance(var, BaseType): # account for array size marker if var.shape: size += 8 # calculate size if var.shape == (): vsize = 1 else: vsize = numpy.prod(var.shape) if var.type == Byte: size += -vsize % 4 else: size += vsize * var.type.size # account for DDS size += len(''.join(dds_dispatch(dataset))) + len('Data:\n') return str(size)
def calculate_size(dataset): """ Calculate the size of the response. """ length = 0 for var in walk(dataset): # Pydap can't calculate the size of sequences since the data is streamed # directly from the source. Also, strings are encoded individually, so # it's not possible to get their size unless we read everything. if (isinstance(var, SequenceType) or (isinstance(var, BaseType) and var.data.dtype.char == 'S')): return None elif isinstance(var, BaseType): if var.shape: length += 8 # account for array size marker size = int(np.prod(var.shape)) if var.data.dtype == np.byte: length += size + (-size % 4) elif var.data.dtype == np.short: length += size * 4 else: opendap_size = np.dtype(typemap[var.data.dtype.char]).itemsize length += size * opendap_size # account for DDS length += len(''.join(dds_dispatch(dataset))) + len('Data:\n') return str(length)
def serialize(dataset): # Generate DDS. for line in dds_dispatch(dataset): yield line.encode('utf-8') yield 'Data:\n'.encode('utf-8') for line in DapPacker(dataset): yield line if hasattr(dataset, 'close'): dataset.close()
def serialize(dataset): # Generate DDS. for line in dds_dispatch(dataset): yield line yield 'Data:\n' for line in DapPacker(dataset): yield line if hasattr(dataset, 'close'): dataset.close()
def serialize(dataset): # Generate DDS. for line in dds_dispatch(dataset): yield line.encode("utf-8") yield 45 * '-'.encode("utf-8") yield '\n'.encode("utf-8") for line in dispatch(dataset): yield line.encode("utf-8") if hasattr(dataset, 'close'): dataset.close()
def serialize(dataset): # Generate DDS. for line in dds_dispatch(dataset): yield line yield 45 * '-' yield '\n' for line in dispatch(dataset): yield line if hasattr(dataset, 'close'): dataset.close()
def __iter__(self): # generate DDS for line in dds_dispatch(self.dataset): yield line yield 'Data:\n' for block in dispatch(self.dataset): yield block if hasattr(self.dataset, 'close'): self.dataset.close()