def __getitem__(self, key): """Returns the specified piece of this IOTensor.""" if isinstance(key, slice): return core_ops.io_hdf5_readable_read( self._resource, self._component, self._shape, key.start, key.stop, dtype=self._dtype) item = core_ops.io_hdf5_readable_read( self._resource, key, key + 1, dtype=self._dtype) if tf.shape(item)[0] == 0: raise IndexError("index %s is out of range" % key) return item[0]
def __getitem__(self, key): """Returns the specified piece of this IOTensor.""" # always convert to tuple to process if not isinstance(key, tuple): key = tuple([key]) # get the start and stop of each element indices = [(k.start, k.stop) if isinstance(k, slice) else (k, k + 1) for k in key] # get the start and stop, and use 0 (start) and -1 (stop) if needed indices = list(zip(*indices)) start = [0 if e is None else e for e in indices[0]] stop = [-1 if e is None else e for e in indices[1]] item = core_ops.io_hdf5_readable_read( input=self._filename, shared=self._filename, component=self._component, shape=self._shape, start=start, stop=stop, dtype=self._dtype, container="HDF5IOTensor", ) # in case certain dimension is not slice, then this dimension will need to # collapse as `0`, otherwise `:` or `slice(None, None, None)` indices = [slice(None) if isinstance(k, slice) else 0 for k in key] return item.__getitem__(indices)
def f(start, stop): shape = tf.concat( [tf.convert_to_tensor([stop - start], tf.int64), self._shape[1:]], axis=0) return core_ops.io_hdf5_readable_read( self._resource, start=start, shape=shape, component=self._component, dtype=self._dtype)
def __init__(self, filename, internal=False): with tf.name_scope("HDF5IOTensor") as scope: # TODO: unique shared_name might be removed if HDF5 is thead-safe? resource, columns = core_ops.io_hdf5_readable_init( filename, container=scope, shared_name="%s/%s" % (filename, uuid.uuid4().hex)) columns = [column.decode() for column in columns.numpy().tolist()] elements = [] for column in columns: shape, dtype = core_ops.io_hdf5_readable_spec(resource, column) shape = tf.TensorShape(shape.numpy()) dtype = tf.as_dtype(dtype.numpy()) spec = tf.TensorSpec(shape, dtype, column) if shape.rank == 0: value = core_ops.io_hdf5_readable_read( resource, 0, shape, column, dtype) elements.append( io_tensor_ops.ScalarIOTensor(spec, value, internal=internal)) else: function = _HDF5IOTensorFunction( core_ops.io_hdf5_readable_read, resource, column, shape, dtype) elements.append( io_tensor_ops.BaseIOTensor(spec, function, internal=internal)) spec = tuple([e.spec for e in elements]) super(HDF5IOTensor, self).__init__(spec, columns, elements, internal=internal)
def f(start, stop): return core_ops.io_hdf5_readable_read( self._resource, component=self._component, shape=self._shape, start=start, stop=stop, dtype=self._dtype)
def f(start, stop): return core_ops.io_hdf5_readable_read( input=self._filename, shared=self._filename, component=self._component, shape=self._shape, start=start, stop=stop, dtype=self._dtype, container="HDF5IODataset", )
def to_tensor(self): """Converts this `IOTensor` into a `tf.Tensor`. Args: name: A name prefix for the returned tensors (optional). Returns: A `Tensor` with value obtained from this `IOTensor`. """ return core_ops.io_hdf5_readable_read( self._resource, self._component, self._shape, 0, -1, dtype=self._dtype)
def to_tensor(self): """Converts this `IOTensor` into a `tf.Tensor`. Args: name: A name prefix for the returned tensors (optional). Returns: A `Tensor` with value obtained from this `IOTensor`. """ return core_ops.io_hdf5_readable_read( input=self._filename, shared=self._filename, component=self._component, shape=self._shape, start=0, stop=-1, dtype=self._dtype, container="HDF5IOTensor", )