def testWriteUlongArray(self): a=_pyhl.nodelist() a=_pyhl.nodelist() try: self.addArrayValueNode(a, _pyhl.ATTRIBUTE_ID, "/ulongarray", -1, [4], [1,2,3,4], "ulong", -1) self.fail("Expected TypeError") except TypeError: pass
def testReadWriteVariousTypes(self): a=_pyhl.nodelist() b=_pyhl.node(_pyhl.GROUP_ID,"/info") a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/info/xscale") b.setScalarValue(-1,0.85,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/info/yscale") b.setScalarValue(-1,1.0,"float",-1) a.addNode(b); b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/info/xsize") b.setScalarValue(-1, 240, "int", -1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/info/ysize") b.setScalarValue(-1,480,"long",-1) a.addNode(b) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) self.assertEqual("double", a.fetchNode("/info/xscale").format()) self.assertEqual("float", a.fetchNode("/info/yscale").format()) self.assertEqual("int", a.fetchNode("/info/xsize").format()) self.assertEqual("long", a.fetchNode("/info/ysize").format())
def writeHac(self, fstr, compression=0): nodelist = _pyhl.nodelist() node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/accumulation_count") node.setScalarValue(-1, self.hac.getAttribute("how/count"), "long", -1) nodelist.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/validity_time_of_last_update") node.setScalarValue(-1, int(time.time()), "long", -1) nodelist.addNode(node) node = _pyhl.node(_pyhl.DATASET_ID, "/hit_accum") node.setArrayValue(-1, [self.hac.ysize, self.hac.xsize], self.hac.getData(), "uint", -1) nodelist.addNode(node) fcp = _pyhl.filecreationproperty() fcp.userblock = 0 fcp.sizes = (4, 4) fcp.sym_k = (1, 1) fcp.istore_k = 1 fcp.meta_block_size = 0 path = os.path.split(fstr)[0] if not os.path.isdir(path): os.makedirs(path) nodelist.write(fstr, compression, fcp)
def testWriteNamedCompoundAttribute(self): a=_pyhl.nodelist() rinfo_obj =_rave_info_type.object() rinfo_type=_rave_info_type.type() rinfo_obj.xsize = 98 rinfo_obj.ysize = 97 rinfo_obj.xscale = 120.0 rinfo_obj.yscale = 130.0 rinfo_obj.area_extent = (33.0,32.0,31.0,30.0) self.addTypeNode(a, "/RaveType", rinfo_type.hid()) self.addScalarValueNode(a, _pyhl.ATTRIBUTE_ID, "/attribute", rinfo_type.size(), rinfo_obj.tostring(), "compound", rinfo_type.hid()) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/RaveType") self.assertEqual("UNDEFINED", b.format()) self.assertEqual(_pyhl.TYPE_ID, b.type()) b=a.fetchNode("/attribute") self.assertEqual("compound", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) result = b.compound_data() self.assertEqual(98, result['xsize']) self.assertEqual(97, result['ysize']) self.assertEqual(120, result['xscale']) self.assertEqual(130, result['yscale']) self.assertTrue(numpy.all([33.0,32.0,31.0,30.0]==result['area_extent']))
def testReadWriteSameFile(self): a=_pyhl.nodelist() b=_pyhl.node(_pyhl.GROUP_ID, "/slask") a.addNode(b) a.write(self.TESTFILE) a = _pyhl.read_nodelist(self.TESTFILE) a.write(self.TESTFILE2)
def testWriteOnlyRootGroup(self): a=_pyhl.nodelist() b=_pyhl.node(_pyhl.GROUP_ID, "/") a.addNode(b) try: a.write(self.TESTFILE) self.fail("Expected IOError") except IOError: pass
def _convertVP(self, quantities, filename): nodelist = _pyhl.nodelist() quantities = [str(x) for x in quantities.split(",")] # Populate the nodelist with attributes and datasets needed for DIANA self._addVPInformation(nodelist, quantities) # Create the filename and send the modified file back to b2d self._converted_files.append((nodelist, filename))
def testAddDuplicate_GroupNode(self): nodelist = _pyhl.nodelist(); nodelist.addNode(_pyhl.node(_pyhl.GROUP_ID, "/root")) nodelist.addNode(_pyhl.node(_pyhl.GROUP_ID, "/root/group1")) nodelist.addNode(_pyhl.node(_pyhl.GROUP_ID, "/root/group2")) try: nodelist.addNode(_pyhl.node(_pyhl.GROUP_ID, "/root/group1")) self.fail("Expected IOError") except IOError: pass
def testWriteDoubleArray(self): a=_pyhl.nodelist() self.addArrayValueNode(a, _pyhl.ATTRIBUTE_ID, "/doublearray", -1, [4], [1.1,2.2,3.3,4.4], "double", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/doublearray") self.assertEqual("double", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertTrue(numpy.all([1.1,2.2,3.3,4.4] == b.data()))
def testWriteDouble(self): a=_pyhl.nodelist() self.addScalarValueNode(a, _pyhl.ATTRIBUTE_ID, "/doublevalue", -1, 1.123, "double", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/doublevalue") self.assertEqual("double", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertAlmostEqual(1.123, b.data(), 4)
def testWriteStringArray(self): a=_pyhl.nodelist() self.addArrayValueNode(a, _pyhl.ATTRIBUTE_ID, "/stringarray", -1, [4], ["abc", "def", "ghi", "jkl"], "string", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/stringarray") self.assertEqual("string", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertTrue(numpy.all(["abc", "def", "ghi", "jkl"] == b.data()))
def testWriteLongArray(self): a=_pyhl.nodelist() self.addArrayValueNode(a, _pyhl.ATTRIBUTE_ID, "/longarray", -1, [4], [1,2,3,4], "long", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/longarray") self.assertEqual("long", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertTrue(numpy.all([1,2,3,4] == b.data()))
def testWriteLong(self): a=_pyhl.nodelist() self.addScalarValueNode(a, _pyhl.ATTRIBUTE_ID, "/longvalue", -1, 987654, "long", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/longvalue") self.assertEqual("long", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertEqual(987654, b.data())
def testWriteInt(self): a=_pyhl.nodelist() self.addScalarValueNode(a, _pyhl.ATTRIBUTE_ID, "/intvalue", -1, -123, "int", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/intvalue") self.assertEqual("int", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertEqual(-123, b.data())
def testWriteReference(self): a=_pyhl.nodelist() self.addArrayValueNode(a, _pyhl.DATASET_ID, "/doublearray", -1, [4], [1.1,2.2,3.3,4.4], "double", -1) self.addReference(a, "/reference", "/doublearray") a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/reference") self.assertEqual("string", b.format()) self.assertEqual(_pyhl.REFERENCE_ID, b.type()) self.assertEqual("/doublearray", b.data())
def open(self, filename, mode="w"): "open (filename, 'w')" self.close() self.__dict__['_filename'] = filename if mode == "w": self.__dict__['_nodelist'] = _pyhl.nodelist() self.set_directory("/data") self.make_directory(self._pwd) #elif mode == "a": #self.__dict__['_nodelist'] = pypdb.append (filename) else: raise ValueError("Improper mode: " + mode)
def open (self, filename, mode = "w"): "open (filename, 'w')" self.close () self.__dict__['_filename'] = filename if mode == "w": self.__dict__['_nodelist'] = _pyhl.nodelist() self.set_directory("/data") self.make_directory(self._pwd) #elif mode == "a": #self.__dict__['_nodelist'] = pypdb.append (filename) else: raise ValueError("Improper mode: " + mode)
def testWriteString(self): #execute a=_pyhl.nodelist() self.addScalarValueNode(a, _pyhl.ATTRIBUTE_ID, "/stringvalue", -1, "My String", "string", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/stringvalue") self.assertEqual("string", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertEqual("My String", b.data())
def Array2Tempfile(value): import rave_tempfile _, fstr = rave_tempfile.mktemp() a = _pyhl.nodelist() b = _pyhl.node(_pyhl.GROUP_ID, "/dataset1") a.addNode(b) b = _pyhl.node(_pyhl.DATASET_ID, "/dataset1/data") h5typ = ARRAYTYPES[value.dtype.char] b.setArrayValue(-1, list(value.shape), value, h5typ, -1) a.addNode(b) a.write(fstr) return fstr
def testWriteUcharDataset(self): a=_pyhl.nodelist() c=numpy.arange(100) c=numpy.array(c.astype(numpy.uint8),numpy.uint8) c=numpy.reshape(c,(10,10)).astype(numpy.uint8) self.addArrayValueNode(a, _pyhl.ATTRIBUTE_ID, "/uchardataset", -1, numpy.shape(c), c, "uchar", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/uchardataset") self.assertEqual("uchar", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) self.assertTrue(numpy.all(c == b.data()))
def save(self, filename): """ Writes the RAVE object to file. Arguments: Returns: """ ID = "" a = _pyhl.nodelist() # top level rave_IO.traverse_save(self.info, a, ID, self.data) if self._fcp is None: self.set_fcp() a.write(filename, self._fcp)
def testWriteDoubleDataset(self): a=_pyhl.nodelist() c=numpy.arange(100) c=numpy.array(c.astype(numpy.float64),numpy.float64) c=numpy.reshape(c,(10,10)).astype(numpy.float64) self.addArrayValueNode(a, _pyhl.DATASET_ID, "/doubledataset", -1, numpy.shape(c), c, "double", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/doubledataset") self.assertEqual("double", b.format()) self.assertEqual(_pyhl.DATASET_ID, b.type()) self.assertTrue(numpy.all(c == b.data()))
def testWriteGroup(self): a=_pyhl.nodelist() self.addGroupNode(a, "/group1") self.addGroupNode(a, "/group1/group11") a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/group1") self.assertEqual("UNDEFINED", b.format()) self.assertEqual(_pyhl.GROUP_ID, b.type()) b=a.fetchNode("/group1/group11") self.assertEqual("UNDEFINED", b.format()) self.assertEqual(_pyhl.GROUP_ID, b.type())
def writeFile(): # Create an empty node list instance aList = _pyhl.nodelist() # Create an group called info aNode = _pyhl.node(_pyhl.GROUP_ID, "/info") # Add the node to the nodelist # Remember that the nodelist takes responsibility aList.addNode(aNode) # Insert the attribute xscale in the group "/info" aNode = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/info/xscale") # Set the value to a double with value 10.0 # Note the -1's that has been used since the data not is compaound aNode.setScalarValue(-1, 10.0, "double", -1) aList.addNode(aNode) # Similar for yscale,xsize and ysize aNode = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/info/yscale") aNode.setScalarValue(-1, 20.0, "double", -1) aList.addNode(aNode) aNode = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/info/xsize") aNode.setScalarValue(-1, 10, "int", -1) aList.addNode(aNode) aNode = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/info/ysize") aNode.setScalarValue(-1, 10, "int", -1) aList.addNode(aNode) # Add a description aNode = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/info/description") aNode.setScalarValue(-1, "This is a simple example", "string", -1) aList.addNode(aNode) # Add an array of data myArray = arange(100) myArray = array(myArray.astype('i'), 'i') myArray = reshape(myArray, (10, 10)) aNode = _pyhl.node(_pyhl.DATASET_ID, "/data") # Set the data as an array, note the list with [10,10] which # Indicates that it is an array of 10x10 items aNode.setArrayValue(-1, [10, 10], myArray, "int", -1) aList.addNode(aNode) # And now just write the file as "simple_test.hdf" with # Compression level 9 (highest compression) aList.write("simple_test.hdf", 9)
def testWriteFloatArray(self): a=_pyhl.nodelist() self.addArrayValueNode(a, _pyhl.ATTRIBUTE_ID, "/floatarray", -1, [4], [1.1,2.2,3.3,4.4], "float", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/floatarray") self.assertEqual("float", b.format()) self.assertEqual(_pyhl.ATTRIBUTE_ID, b.type()) c = b.data() self.assertEqual(4, len(c)) self.assertAlmostEqual(1.1, c[0], 4) self.assertAlmostEqual(2.2, c[1], 4) self.assertAlmostEqual(3.3, c[2], 4) self.assertAlmostEqual(4.4, c[3], 4)
def testWriteLongLongArray(self): a=_pyhl.nodelist() try: self.addArrayValueNode(a, _pyhl.ATTRIBUTE_ID, "/llongarray", -1, [4], [1,2,3,4], "llong", -1) self.fail("Expected TypeError") except TypeError: pass a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) try: a.fetchNode("/llongarray") self.fail("Expected IOError") except IOError: pass
def testWriteLongLongDataset(self): a=_pyhl.nodelist() c=numpy.arange(100) c=numpy.array(c.astype(numpy.int64),numpy.int64) c=numpy.reshape(c,(10,10)).astype(numpy.int64) self.addArrayValueNode(a, _pyhl.DATASET_ID, "/llongdataset", -1, numpy.shape(c), c, "llong", -1) a.write(self.TESTFILE) #verify a=_pyhl.read_nodelist(self.TESTFILE) b=a.fetchNode("/llongdataset") if (_varioustests.sizeoflong() < _varioustests.sizeoflonglong()): self.assertEqual("llong", b.format()) else: self.assertEqual("long", b.format()) self.assertEqual(_pyhl.DATASET_ID, b.type()) self.assertTrue(numpy.all(c == b.data()))
def writeFile(): # Create the rave info HDF5 type typedef = _rave_info_type.type() # Create the rave info HDF5 object obj = _rave_info_type.object() # Set the values obj.xsize = 10 obj.ysize = 10 obj.xscale = 150.0 obj.yscale = 150.0 aList = _pyhl.nodelist() # Create a datatype node aNode = _pyhl.node(_pyhl.TYPE_ID, "/MyDatatype") # Make the datatype named aNode.commit(typedef.hid()) aList.addNode(aNode) # Create an attribute containing the compound type aNode = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/myCompoundAttribute") # Note that I use both itemSize and lhid # Also note how I translate the compound object to a string aNode.setScalarValue(typedef.size(), obj.tostring(), "compound", typedef.hid()) aList.addNode(aNode) # Better create a dataset also with the compound type obj.xsize = 1 obj.ysize = 1 aNode = _pyhl.node(_pyhl.DATASET_ID, "/myCompoundDataset") # I use setArrayValue instead aNode.setArrayValue(typedef.size(), [1], obj.tostring(), "compound", typedef.hid()) aList.addNode(aNode) # And finally write the HDF5 file. aList.write("compound_test.hdf")
def create_test_image(): a = _pyhl.nodelist() # First create the palette} b = _pyhl.node(_pyhl.DATASET_ID, "/PALETTE") c = createPalette() b.setArrayValue(-1, [256, 3], c, "uchar", -1) a.addNode(b) b = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/PALETTE/CLASS") b.setScalarValue(-1, "PALETTE", "string", -1) a.addNode(b) b = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/PALETTE/PAL_VERSION") b.setScalarValue(-1, "1.2", "string", -1) a.addNode(b) b = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/PALETTE/PAL_COLORMODEL") b.setScalarValue(-1, "RGB", "string", -1) a.addNode(b) b = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/PALETTE/PAL_TYPE") b.setScalarValue(-1, "STANDARD8", "string", -1) a.addNode(b) # Now create the image to display} b = _pyhl.node(_pyhl.DATASET_ID, "/IMAGE1") c = createImage() b.setArrayValue(1, [256, 256], c, "uchar", -1) a.addNode(b) b = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/IMAGE1/CLASS") b.setScalarValue(-1, "IMAGE", "string", -1) a.addNode(b) b = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/IMAGE1/IMAGE_VERSION") b.setScalarValue(-1, "1.2", "string", -1) a.addNode(b) # Finally insert the reference} b = _pyhl.node(_pyhl.REFERENCE_ID, "/IMAGE1/PALETTE") b.setScalarValue(-1, "/PALETTE", "string", -1) a.addNode(b) a.write("test_image.hdf")
def save(self, filename): """Save the current instance to nordrad hdf format. """ import _pyhl status = 1 msgctype = self.ctype node_list = _pyhl.nodelist() # What node = _pyhl.node(_pyhl.GROUP_ID, "/what") node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/what/object") node.setScalarValue(-1, "IMAGE", "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/what/sets") node.setScalarValue(-1, 1, "int", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/what/version") node.setScalarValue(-1, "H5rad 1.2", "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/what/date") yyyymmdd = self.datestr[0:8] hourminsec = self.datestr[8:12]+'00' node.setScalarValue(-1, yyyymmdd, "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/what/time") node.setScalarValue(-1, hourminsec, "string", -1) node_list.addNode(node) # Where node = _pyhl.node(_pyhl.GROUP_ID, "/where") node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/projdef") node.setScalarValue(-1, msgctype.area.proj4_string, "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/xsize") node.setScalarValue(-1, msgctype.num_of_columns, "int", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/ysize") node.setScalarValue(-1, msgctype.num_of_lines, "int", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/xscale") node.setScalarValue(-1, msgctype.xscale, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/yscale") node.setScalarValue(-1, msgctype.yscale, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/LL_lon") node.setScalarValue(-1, msgctype.ll_lon, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/LL_lat") node.setScalarValue(-1, msgctype.ll_lat, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/UR_lon") node.setScalarValue(-1, msgctype.ur_lon, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/where/UR_lat") node.setScalarValue(-1, msgctype.ur_lat, "float", -1) node_list.addNode(node) # How node = _pyhl.node(_pyhl.GROUP_ID, "/how") node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/how/area") node.setScalarValue(-1, msgctype.region_name, "string", -1) node_list.addNode(node) # image1 node = _pyhl.node(_pyhl.GROUP_ID, "/image1") node_list.addNode(node) node = _pyhl.node(_pyhl.DATASET_ID, "/image1/data") node.setArrayValue(1, [msgctype.num_of_columns, msgctype.num_of_lines], msgctype.cloudtype.astype('B'), "uchar", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.GROUP_ID, "/image1/what") node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/product") #We should eventually try to use the msg-parameters "package", #"product_algorithm_version", and "product_name": node.setScalarValue(1, 'MSGCT', "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/prodpar") node.setScalarValue(1, 0.0, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/quantity") node.setScalarValue(1, "ct", "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/startdate") node.setScalarValue(-1, yyyymmdd, "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/starttime") node.setScalarValue(-1, hourminsec, "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/enddate") node.setScalarValue(-1, yyyymmdd, "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/endtime") node.setScalarValue(-1, hourminsec, "string", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/gain") node.setScalarValue(-1, 1.0, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/offset") node.setScalarValue(-1, 0.0, "float", -1) node_list.addNode(node) node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/nodata") node.setScalarValue(-1, 0.0, "float", -1) node_list.addNode(node) # What we call missingdata in PPS: node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/image1/what/undetect") node.setScalarValue(-1, 20.0, "float", -1) node_list.addNode(node) node_list.write(filename, COMPRESS_LVL) return status
def createVHLHDF_READ_DATAFILE(self): a=_pyhl.nodelist() b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/stringvalue") b.setScalarValue(-1,"My String","string",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/charvalue") b.setScalarValue(-1,123,"char",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/scharvalue") b.setScalarValue(-1,45,"schar",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/ucharvalue") b.setScalarValue(-1,99,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/shortvalue") b.setScalarValue(-1,4321,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/ushortvalue") b.setScalarValue(-1,9999,"ushort",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/intvalue") b.setScalarValue(-1,989898,"int",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/uintvalue") b.setScalarValue(-1,987654,"uint",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/longvalue") b.setScalarValue(-1,-123456789,"long",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/ulongvalue") b.setScalarValue(-1,123456789,"ulong",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/llongvalue") b.setScalarValue(-1,-123456789012,"llong",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/ullongvalue") b.setScalarValue(-1,123456789012,"ullong",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/floatvalue") b.setScalarValue(-1,12.65,"float",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/doublevalue") b.setScalarValue(-1,12999.8989,"double",-1) a.addNode(b) #Not supported yet #b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/ldoublevalue") #b.setScalarValue(-1,65765.762525,"ldouble",-1) #a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/hsizevalue") b.setScalarValue(-1,65765,"hsize",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/herrvalue") b.setScalarValue(-1,12,"herr",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/hboolvalue") b.setScalarValue(-1,0,"herr",-1) a.addNode(b) b=_pyhl.node(_pyhl.GROUP_ID, "/group1") a.addNode(b) b=_pyhl.node(_pyhl.GROUP_ID, "/group1/group11") a.addNode(b) # #DATASETS FOR char, schar, uchar, short, ushort, int, uint, long, float, double # b=_pyhl.node(_pyhl.DATASET_ID, "/group1/chardset") c = self.createDataset([5,5],numpy.character) b.setArrayValue(1,[5,5],c,"char",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/schardset") c = self.createDataset([5,5],numpy.int8) b.setArrayValue(1,[5,5],c,"schar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/uchardset") c = self.createDataset([5,5],numpy.uint8) b.setArrayValue(1,[5,5],c,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/shortdset") c = self.createDataset([5,5],numpy.int16) b.setArrayValue(1,[5,5],c,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/ushortdset") c = self.createDataset([5,5],numpy.uint16) b.setArrayValue(1,[5,5],c,"ushort",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/intdset") c = self.createDataset([5,5],numpy.int32) b.setArrayValue(1,[5,5],c,"int",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/uintdset") c = self.createDataset([5,5],numpy.uint32) b.setArrayValue(1,[5,5],c,"uint",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/longdset") c = self.createDataset([5,5],numpy.int64) b.setArrayValue(1,[5,5],c,"long",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/floatdset") c = self.createDataset([5,5],numpy.float32) b.setArrayValue(1,[5,5],c,"float",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/group1/doubledset") c = self.createDataset([5,5],numpy.float64) b.setArrayValue(1,[5,5],c,"double",-1) a.addNode(b) # ARRAYS OF TYPE string, double, float, int and long b=_pyhl.node(_pyhl.DATASET_ID, "/stringarray") c = ["ABC", "def", "EFG"] b.setArrayValue(1,[len(c)],c,"string",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/doublearray") c = [1.0, 2.1, 3.2] b.setArrayValue(1,[len(c)],c,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/floatarray") c = [1.1, 2.2, 3.3] b.setArrayValue(1,[len(c)],c,"float",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/intarray") c = [1, 2, 3, 4] b.setArrayValue(1,[len(c)],c,"int",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID, "/longarray") c = [2, 3, 4, 5, 6] b.setArrayValue(1,[len(c)],c,"long",-1) a.addNode(b) # Create a dataset with an attribute and a reference in it b=_pyhl.node(_pyhl.DATASET_ID, "/dataset1") c = [1, 2, 3, 4] b.setArrayValue(1,[len(c)],c,"int",-1) a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/dataset1/attribute1") b.setScalarValue(-1,989898,"int",-1) a.addNode(b) b=_pyhl.node(_pyhl.REFERENCE_ID,"/dataset1/doublearray") b.setScalarValue(-1,"/doublearray","string",-1) a.addNode(b) # REFERENCES POINTING AT DIFFERENT PLACES b=_pyhl.node(_pyhl.GROUP_ID, "/references") a.addNode(b) b=_pyhl.node(_pyhl.REFERENCE_ID,"/references/doublearray") b.setScalarValue(-1,"/doublearray","string",-1) a.addNode(b) b=_pyhl.node(_pyhl.REFERENCE_ID,"/references/floatdset") b.setScalarValue(-1,"/group1/floatdset","string",-1) a.addNode(b) b=_pyhl.node(_pyhl.REFERENCE_ID,"/references/group1") b.setScalarValue(-1,"/group1","string",-1) a.addNode(b) #b=_pyhl.node(_pyhl.REFERENCE_ID,"/references/tosomethingdefinedafterthis") #b.setScalarValue(-1,"/groupdefinedafterreferences","string",-1) #a.addNode(b) #b=_pyhl.node(_pyhl.GROUP_ID, "/groupdefinedafterreferences") #a.addNode(b) b=_pyhl.node(_pyhl.REFERENCE_ID,"/rootreferencetolongarray") b.setScalarValue(-1,"/longarray","string",-1) a.addNode(b) #Add a compound type as well rinfo_type=_rave_info_type.type() rinfo_obj=_rave_info_type.object() rinfo_obj.xsize=10 rinfo_obj.ysize=10 rinfo_obj.xscale=150.0 rinfo_obj.yscale=150.0 b=_pyhl.node(_pyhl.TYPE_ID,"/RaveDatatype") b.commit(rinfo_type.hid()) a.addNode(b) b=_pyhl.node(_pyhl.GROUP_ID, "/compoundgroup") a.addNode(b) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/compoundgroup/attribute") b.setScalarValue(rinfo_type.size(),rinfo_obj.tostring(),"compound",rinfo_type.hid()) a.addNode(b) rinfo_obj.xsize=99 rinfo_obj.ysize=109 rinfo_obj.area_extent=(10.0, 20.0, 30.0, 40.0) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/compoundgroup/attribute2") b.setScalarValue(rinfo_type.size(),rinfo_obj.tostring(),"compound",rinfo_type.hid()) a.addNode(b) rinfo_obj.xscale=170.0 rinfo_obj.area_extent=(10.0, 20.0, 35.0, 40.0) b=_pyhl.node(_pyhl.DATASET_ID,"/compoundgroup/dataset") b.setArrayValue(rinfo_type.size(),[1],rinfo_obj.tostring(),"compound",rinfo_type.hid()) a.addNode(b) # Create a compund array containing 2x2 items rinfo_obj2 =_rave_info_type.object() rinfo_obj2.xsize = 98 rinfo_obj2.ysize = 97 rinfo_obj2.xscale = 120.0 rinfo_obj2.yscale = 130.0 rinfo_obj2.area_extent = (33.0,32.0,31.0,30.0) rinfo_obj3 =_rave_info_type.object() rinfo_obj3.xsize = 88 rinfo_obj3.ysize = 87 rinfo_obj3.xscale = 100.0 rinfo_obj3.yscale = 110.0 rinfo_obj3.area_extent = (43.0,42.0,41.0,40.0) rinfo_obj4 =_rave_info_type.object() rinfo_obj4.xsize = 78 rinfo_obj4.ysize = 77 rinfo_obj4.xscale = 90.0 rinfo_obj4.yscale = 91.0 rinfo_obj4.area_extent = (53.0,52.0,51.0,50.0) b=_pyhl.node(_pyhl.DATASET_ID,"/compoundgroup/dataset2") str = rinfo_obj.tostring() + rinfo_obj2.tostring() + rinfo_obj3.tostring() + rinfo_obj4.tostring() b.setArrayValue(rinfo_type.size(),[2,2],str,"compound",rinfo_type.hid()) a.addNode(b) # Create an unamed compound value rinfo_unnamed_type=_rave_info_type.type() rinfo_unnamed_obj=_rave_info_type.object() rinfo_unnamed_obj.xsize = 1 rinfo_unnamed_obj.ysize = 2 rinfo_unnamed_obj.xscale = 10.0 rinfo_unnamed_obj.yscale = 20.0 rinfo_unnamed_obj.area_extent = (1.0, 2.0, 3.0, 4.0) b=_pyhl.node(_pyhl.ATTRIBUTE_ID,"/compoundgroup/unnamed_type_attribute") b.setScalarValue(rinfo_unnamed_type.size(),rinfo_unnamed_obj.tostring(),"compound",rinfo_unnamed_type.hid()) a.addNode(b) a.write(self.VHLHDF_READ_DATAFILE)
def writeCloudsatCwcAvhrrMatchObj(filename,ca_obj,compress_lvl): import _pyhl status = -1 a=_pyhl.nodelist() shape = [ca_obj.cloudsatcwc.longitude.shape[0]] # Match-Up - time difference: # ==== b=_pyhl.node(_pyhl.DATASET_ID,"/diff_sec_1970") b.setArrayValue(1,shape,ca_obj.diff_sec_1970,"double",-1) a.addNode(b) # AVHRR # ==== b=_pyhl.node(_pyhl.GROUP_ID,"/avhrr") a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/longitude") shape = ca_obj.avhrr.longitude.shape b.setArrayValue(1,shape,ca_obj.avhrr.longitude,"float",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/latitude") b.setArrayValue(1,shape,ca_obj.avhrr.latitude,"float",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/sec_1970") b.setArrayValue(1,shape,ca_obj.avhrr.sec_1970,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/ctth_pressure") b.setArrayValue(1,shape,ca_obj.avhrr.ctth_pressure,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/ctth_temperature") b.setArrayValue(1,shape,ca_obj.avhrr.ctth_temperature,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/ctth_height") b.setArrayValue(1,shape,ca_obj.avhrr.ctth_height,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/cloudtype") b.setArrayValue(1,shape,ca_obj.avhrr.cloudtype,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/bt11micron") b.setArrayValue(1,shape,ca_obj.avhrr.bt11micron,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/bt12micron") b.setArrayValue(1,shape,ca_obj.avhrr.bt12micron,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/surftemp") b.setArrayValue(1,shape,ca_obj.avhrr.surftemp,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/avhrr/satz") b.setArrayValue(1,shape,ca_obj.avhrr.satz,"double",-1) a.addNode(b) # CWC-RVOD # ==== shapecwc = [ca_obj.cloudsatcwc.longitude.shape[0]] shape2dcwc = ca_obj.cloudsatcwc.Height.shape #shapeTAIcwc = [ca_obj.cloudsatcwc.TAI_start.shape[0]] shapeTAIcwc = [ca_obj.cloudsatcwc.Temp_min_mixph_K.shape[0]] shapecwclong = [ca_obj.cloudsatcwc.Profile_time.shape[0]] # Geolocation b=_pyhl.node(_pyhl.GROUP_ID,"/cloudsatcwc") a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/longitude") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.longitude,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/latitude") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.latitude,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/elevation") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.elevation,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/Height") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.Height,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/avhrr_linnum") b.setArrayValue(1,shape,ca_obj.cloudsatcwc.avhrr_linnum,"int",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/avhrr_pixnum") b.setArrayValue(1,shape,ca_obj.cloudsatcwc.avhrr_pixnum,"int",-1) a.addNode(b) # International Atomic Time (TAI) seconds from Jan 1, 1993: b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/Profile_time") b.setArrayValue(1,shapecwclong,ca_obj.cloudsatcwc.Profile_time,"double",-1) a.addNode(b) #b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/TAI_start") #b.setArrayValue(1,shapeTAIcwc,ca_obj.cloudsatcwc.TAI_start,"double",-1) #a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/sec_1970") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.sec_1970,"double",-1) a.addNode(b) # The data b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/Data_quality") b.setArrayValue(1,shapecwclong,ca_obj.cloudsatcwc.Data_quality,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/Data_targetID") b.setArrayValue(1,shapecwclong,ca_obj.cloudsatcwc.Data_targetID,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_liq_water_path") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.RVOD_liq_water_path,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_liq_water_path_uncertainty") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.RVOD_liq_water_path_uncertainty,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_ice_water_path") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.RVOD_ice_water_path,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_ice_water_path_uncertainty") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.RVOD_ice_water_path_uncertainty,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/LO_RVOD_liquid_water_path") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.LO_RVOD_liquid_water_path,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/LO_RVOD_liquid_water_path_uncertainty") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.LO_RVOD_liquid_water_path_uncertainty,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/IO_RVOD_ice_water_path") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.IO_RVOD_ice_water_path,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/IO_RVOD_ice_water_path_uncertainty") b.setArrayValue(1,shapecwc,ca_obj.cloudsatcwc.IO_RVOD_ice_water_path_uncertainty,"uchar",-1) a.addNode(b) ########################################################################################################################################## #pdb.set_trace() b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_liq_water_content") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.RVOD_liq_water_content,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_liq_water_content_uncertainty") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.RVOD_liq_water_content_uncertainty,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_ice_water_content") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.RVOD_ice_water_content,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/RVOD_ice_water_content_uncertainty") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.RVOD_ice_water_content_uncertainty,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/LO_RVOD_liquid_water_content") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.LO_RVOD_liquid_water_content,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/LO_RVOD_liquid_water_content_uncertainty") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.LO_RVOD_liquid_water_content_uncertainty,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/IO_RVOD_ice_water_content") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.IO_RVOD_ice_water_content,"short",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/IO_RVOD_ice_water_content_uncertainty") b.setArrayValue(1,shape2dcwc,ca_obj.cloudsatcwc.IO_RVOD_ice_water_content_uncertainty,"uchar",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/Temp_min_mixph_K") b.setArrayValue(1,shapeTAIcwc,ca_obj.cloudsatcwc.Temp_min_mixph_K,"double",-1) a.addNode(b) b=_pyhl.node(_pyhl.DATASET_ID,"/cloudsatcwc/Temp_max_mixph_K") b.setArrayValue(1,shapeTAIcwc,ca_obj.cloudsatcwc.Temp_max_mixph_K,"double",-1) a.addNode(b) status = a.write(filename,compress_lvl) return status