def __setitem__(self, name, obj): """ Add an object to the group. The name must not already be in use. The action taken depends on the type of object assigned: Named HDF5 object (Dataset, Group, Datatype) A hard link is created at "name" which points to the given object. SoftLink or ExternalLink Create the corresponding link. Numpy ndarray The array is converted to a dataset object, with default settings (contiguous storage, etc.). Numpy dtype Commit a copy of the datatype as a named datatype in the file. Anything else Attempt to convert it to an ndarray and store it. Scalar values are stored as scalar datasets. Raise ValueError if we can't understand the resulting array dtype. """ #name, lcpl = self._e(name, lcpl=True) if isinstance(obj, HLObject): body = {'id': obj.id.uuid} req = "/groups/" + self.id.uuid + "/links/" + name self.PUT(req, body=body) elif isinstance(obj, SoftLink): body = {'h5path': obj.path} req = "/groups/" + self.id.uuid + "/links/" + name self.PUT(req, body=body) #self.id.links.create_soft(name, self._e(obj.path), # lcpl=lcpl, lapl=self._lapl) elif isinstance(obj, ExternalLink): body = {'h5path': obj.path, 'h5domain': obj.filename} req = "/groups/" + self.id.uuid + "/links/" + name self.PUT(req, body=body) #self.id.links.create_external(name, self._e(obj.filename), # self._e(obj.path), lcpl=lcpl, lapl=self._lapl) elif isinstance(obj, numpy.dtype): # print "create named type" type_json = h5json.getTypeItem(obj) #print "type_json:", type_json req = "/datatypes" body = {'type': type_json} rsp = self.POST(req, body=body) body['id'] = rsp['id'] type_id = TypeID(self, body) req = "/groups/" + self.id.uuid + "/links/" + name body = {'id': type_id.uuid} self.PUT(req, body=body) #htype = h5t.py_create(obj) #htype.commit(self.id, name, lcpl=lcpl) else: pass #todo
def create(self, name, data, shape=None, dtype=None): """ Create a new attribute, overwriting any existing attribute. name Name of the new attribute (required) data An array to initialize the attribute (required) shape Shape of the attribute. Overrides data.shape if both are given, in which case the total number of points must be unchanged. dtype Data type of the attribute. Overrides data.dtype if both are given. """ with phil: # First, make sure we have a NumPy array. We leave the data # type conversion for HDF5 to perform. data = numpy.asarray(data, order='C') if shape is None: shape = data.shape use_htype = None # If a committed type is given, we must use it # in the call to h5a.create. if isinstance(dtype, Datatype): use_htype = dtype.id dtype = dtype.dtype elif dtype is None: dtype = data.dtype else: dtype = numpy.dtype(dtype) # In case a string, e.g. 'i8' is passed original_dtype = dtype # We'll need this for top-level array types # Where a top-level array type is requested, we have to do some # fiddling around to present the data as a smaller array of # subarrays. if dtype.subdtype is not None: subdtype, subshape = dtype.subdtype # Make sure the subshape matches the last N axes' sizes. if shape[-len(subshape):] != subshape: raise ValueError("Array dtype shape %s is incompatible with data shape %s" % (subshape, shape)) # New "advertised" shape and dtype shape = shape[0:len(shape)-len(subshape)] dtype = subdtype # Not an array type; make sure to check the number of elements # is compatible, and reshape if needed. else: if numpy.product(shape) != numpy.product(data.shape): raise ValueError("Shape of new attribute conflicts with shape of data") if shape != data.shape: data = data.reshape(shape) # We need this to handle special string types. data = numpy.asarray(data, dtype=dtype) # Make HDF5 datatype and dataspace for the H5A calls if use_htype is None: type_json = h5json.getTypeItem(dtype) #htype = h5t.py_create(original_dtype, logical=True) #htype2 = h5t.py_create(original_dtype) # Must be bit-for-bit representation rather than logical else: htype = use_htype htype2 = None #space = h5s.create_simple(shape) # This mess exists because you can't overwrite attributes in HDF5. # So we write to a temporary attribute first, and then rename. #tempname = uuid.uuid4().hex req = self._req_prefix + name body = {} body['type'] = type_json body['shape'] = shape body['value'] = data.tolist() self._parent.PUT(req, body=body) """
def make_new_dset(parent, shape=None, dtype=None, data=None, chunks=None, compression=None, shuffle=None, fletcher32=None, maxshape=None, compression_opts=None, fillvalue=None, scaleoffset=None, track_times=None): """ Return a new low-level dataset identifier Only creates anonymous datasets. """ # Convert data to a C-contiguous ndarray if data is not None: from . import base data = numpy.asarray(data, order="C", dtype=base.guess_dtype(data)) # Validate shape if shape is None: if data is None: raise TypeError("Either data or shape must be specified") shape = data.shape else: shape = tuple(shape) if data is not None and (numpy.product(shape) != numpy.product(data.shape)): raise ValueError("Shape tuple is incompatible with data") tmp_shape = maxshape if maxshape is not None else shape # Validate chunk shape if isinstance(chunks, tuple) and (-numpy.array([ i>=j for i,j in zip(tmp_shape,chunks) if i is not None])).any(): errmsg = "Chunk shape must not be greater than data shape in any dimension. "\ "{} is not compatible with {}".format(chunks, shape) raise ValueError(errmsg) if isinstance(dtype, Datatype): # Named types are used as-is type_json = dtype.id.type_json else: # Validate dtype if dtype is None and data is None: dtype = numpy.dtype("=f4") elif dtype is None and data is not None: dtype = data.dtype else: dtype = numpy.dtype(dtype) if dtype.kind == 'S' and dtype.metadata['ref']: type_json = {} type_json["class"] = "H5T_REFERENCE" meta_type = dtype.metadata['ref'] if meta_type is Reference: type_json["base"] = "H5T_STD_REF_OBJ" elif meta_type is RegionReference: type_json["base"] = "H5T_STD_REF_DSETREG" else: errmsg = "Unexpected metadata type" raise ValueError(errmsg) else: type_json = h5json.getTypeItem(dtype) #tid = h5t.py_create(dtype, logical=1) # Legacy if any((compression, shuffle, fletcher32, maxshape,scaleoffset)) and chunks is False: raise ValueError("Chunked format required for given storage options") # Legacy if compression is True: if compression_opts is None: compression_opts = 4 compression = 'gzip' # Legacy if compression in _LEGACY_GZIP_COMPRESSION_VALS: if compression_opts is not None: raise TypeError("Conflict in compression options") compression_opts = compression compression = 'gzip' # todo #dcpl = filters.generate_dcpl(shape, dtype, chunks, compression, compression_opts, # shuffle, fletcher32, maxshape, scaleoffset) dcpl = None if fillvalue is not None: fillvalue = numpy.array(fillvalue) #todo #dcpl.set_fill_value(fillvalue) """ if track_times in (True, False): dcpl.set_obj_track_times(track_times) elif track_times is not None: raise TypeError("track_times must be either True or False") """ if maxshape is not None: maxshape = tuple(m if m is not None else 0 for m in maxshape) #sid = h5s.create_simple(shape, maxshape) #dset_id = h5d.create(parent.id, None, tid, sid, dcpl=dcpl) req = "/datasets" body = {'type': type_json } body['shape'] = shape if maxshape is not None: body['maxdims'] = maxshape rsp = parent.POST(req, body=body) json_rep = {} json_rep['id'] = rsp['id'] req = '/datasets/' + rsp['id'] rsp = parent.GET(req) json_rep['shape'] = rsp['shape'] json_rep['type'] = rsp['type'] if 'creationProperties' in rsp: json_rep['creationProperties'] = rsp['creationProperties'] else: json_rep['creationProperties'] = {} dset_id = DatasetID(parent, json_rep) if data is not None: req = "/datasets/" + dset_id.uuid + "/value" body = {} body['value'] = data.tolist() parent.PUT(req, body=body) return dset_id
def __setitem__(self, name, obj): """ Add an object to the group. The name must not already be in use. The action taken depends on the type of object assigned: Named HDF5 object (Dataset, Group, Datatype) A hard link is created at "name" which points to the given object. SoftLink or ExternalLink Create the corresponding link. Numpy ndarray The array is converted to a dataset object, with default settings (contiguous storage, etc.). Numpy dtype Commit a copy of the datatype as a named datatype in the file. Anything else Attempt to convert it to an ndarray and store it. Scalar values are stored as scalar datasets. Raise ValueError if we can't understand the resulting array dtype. """ #name, lcpl = self._e(name, lcpl=True) if isinstance(obj, HLObject): body = {'id': obj.id.uuid } req = "/groups/" + self.id.uuid + "/links/" + name self.PUT(req, body=body) elif isinstance(obj, SoftLink): body = {'h5path': obj.path } req = "/groups/" + self.id.uuid + "/links/" + name self.PUT(req, body=body) #self.id.links.create_soft(name, self._e(obj.path), # lcpl=lcpl, lapl=self._lapl) elif isinstance(obj, ExternalLink): body = {'h5path': obj.path, 'h5domain': obj.filename } req = "/groups/" + self.id.uuid + "/links/" + name self.PUT(req, body=body) #self.id.links.create_external(name, self._e(obj.filename), # self._e(obj.path), lcpl=lcpl, lapl=self._lapl) elif isinstance(obj, numpy.dtype): # print "create named type" type_json = h5json.getTypeItem(obj) #print "type_json:", type_json req = "/datatypes" body = {'type': type_json } rsp = self.POST(req, body=body) body['id'] = rsp['id'] type_id = TypeID(self, body) req = "/groups/" + self.id.uuid + "/links/" + name body = {'id': type_id.uuid } self.PUT(req, body=body) #htype = h5t.py_create(obj) #htype.commit(self.id, name, lcpl=lcpl) else: pass #todo