Exemplo n.º 1
0
 def __init__(self,schema):
     '''
     Constructor        
     
      @param schema: XML schema for the map file
     '''
     self.schema=schema #Etree uses full schema name spaces
     #self.linear_buffer=buffer 
     self.tools=utils()
Exemplo n.º 2
0
    def __init__(self,schema,node):
        '''
        Constructor: this class holds the information to be used on the SDS extraction.
        
        @var node: lxml node element containing SDS metadata
        @var schema: QName for the current map schema 
        '''
        self.dimension_info=[]
        self.schema=schema        
        self.dimension_sizes = node.find(self.schema + "dataDimensionSizes").text.split(" ")
        try:
            self.dimension_sizes=node.find(self.schema + "allocatedDimensionSizes").text.split(" ")
        except:
            #allocated dim size not used
            pass

        self.data_type=node.find(self.schema + "datum")        
        self.mapped_type=self.data_type.attrib["dataType"]
        self.byte_order="bigEndian"
        try:
            self.byte_order=self.data_type.attrib["byteOrder"]
        except:
            #single byte data or endianness not present
            pass
        aux=utils()
        self.py_format,self.py_data_size,self.py_endianness=aux.getPythonFormat(self.mapped_type,self.byte_order)
        
        self.sds_data=node.find(self.schema + "arrayData")
        self.spatial_storage=self.sds_data.getchildren()[0].tag    
        self.byte_compression="None"
        self.dimension_order="1" 
        self.dimensions=[]   
        
        for items in node.getiterator(tag=self.schema+"dimensionRef"):
            self.dimension_info.append("Dimension index " + items.attrib["dimensionIndex"] + ": " + items.attrib["name"])
        
        self.dimension_info.insert(0, self.mapped_type)
                
        try:
            self.byte_compression=self.sds_data.attrib["compressionType"]
            self.dimension_order=self.sds_data.attrib["fastestVaryingDimensionIndex"]     
        except:
            pass
        self.chunk_dimension_sizes=None
        self.chunk_iter_node=None
        if self.spatial_storage==self.schema+"chunks":
            self.chunk_dimension_sizes=self.sds_data.getchildren()[0].find(self.schema + "chunkDimensionSizes").text.split(" ")
            self.chunk_iter_node=self.sds_data.getchildren()[0]
        
        if self.dimension_order=="0":
            for dim in self.dimension_sizes[::-1]:
                self.dimensions.append(int(dim))
        else:            
            for dim in self.dimension_sizes:
                self.dimensions.append(int(dim))
                
        self.grid_size=reduce(operator.mul, self.dimensions)#number of items in the ar   
Exemplo n.º 3
0
    def __init__(self, map_file, operation, hdf_object, output_format, verbose):
        """
        Constructor:
        
        @param map_file: HDF4 xml map file generated by the HDF4 map writer
        @param operation: read / dump content of the HDF file
        @param hdf_object: target objects inside the HDf file (SDS, VData, RIS, ALL)
        @param output_format: binary / ascii in CSV files /numpy table
        
        
        """
        self.xml_file = map_file
        self.depth = 0

        # sds_dump_headers aggregates dimension's information as well as the data type but it slows down the dumping process
        # if the user just needs the data without the headers use --no-headers that applies to SDS arrays
        self.dump_format = output_format
        self.tree = None
        self.verbose = verbose

        try:
            self.tree = etree.parse(self.xml_file).getroot()  # Parse the XML document and get the root tag
        except:
            print "The Map file could not be found or contains not well-formed XML, please verify it", self.xml_file
            return None

        self.schema = "{http://www.hdfgroup.org/HDF4/XML/schema/HDF4map/1.0.1}"

        try:
            file_node_info = self.hdf_file_name = self.tree.find(self.schema + "HDF4FileInformation").getchildren()
            hdf_file_name = file_node_info[0].text
            hdf_file_path = file_node_info[1].getchildren()[1].text
            self.hdf_file_name = hdf_file_path + "/" + hdf_file_name
        except:
            print "The HDF file described in the map file was not found or has an incorrect path "
            return None

        self.hdf_object = hdf_object
        self.hdf_operation = operation
        self.map_path = os.path.relpath(self.xml_file).replace(self.xml_file, "")
        self.hdf_handler = HDFfile(self.schema, self.hdf_file_name)
        self.group_stack = []
        self.external_files = {}  # Will store the references to external files
        self.palletes = {}  # Will store the references to RIS palletes

        self.vdata = VData(self.schema)
        self.SDS = SDS()
        self.vdata_table = []  # This list will store the VData tables.
        self.dataValidator = dataValidator()

        self.utils = utils()
        self.return_code = 0
Exemplo n.º 4
0
 def __init__(self,schema,hdf_fileName):
     '''
     Constructor
     
     @param schema: XML schema for the map file
     @param hdf_filename: location of the HDF file, this location is written on the map file.
     '''
     self.utils=utils()
     self.schema=schema #Etree uses full schema name spaces
     self.external_handler=[]
     self.file_handler=None
     self.ndimensional_SDS=None
     
     try: #it opens the HDF creating a file object
         file_path=path.normpath( hdf_fileName)
         self.file_handler=file(file_path,"rb")            
     except:
         print "HDF file not found: " + hdf_fileName, os.path.abspath(os.path.curdir)
         exit(1)        
Exemplo n.º 5
0
 def __init__(self):
     '''
     Constructor        
     '''
     self.utils=utils()