示例#1
0
def write_data_to_HDF(array,
                      attribute={},
                      name="",
                      folder="ResAmp/",
                      bands='l3m_data'):
    """ Crea un nuevo HDF en la carpeta ResAmp, con el nombre pasado por parametro
    o un nombre generado con sus attributos en caso contrario y con el array
     pasado por parametro como dataset y con los atributos pasados por parametro"""
    if "" == name:
        name = name_generator(attribute, name)
        attribute["Product Name"] = name
        file = SD(folder + name + "", SDC.WRITE | SDC.CREATE)
    else:
        if os.path.isfile(folder + name):
            file = SD(folder + name + "", SDC.WRITE)
        else:
            file = SD(folder + name + "", SDC.WRITE | SDC.CREATE)
    file.create(bands, SDC.FLOAT64, array.shape)
    data = file.select(bands)
    for x in range(len(array)):
        data[x] = array[x]
    for x in attribute:
        at = attribute[x]
        try:
            if (at.dtype == np.dtype('int16') or at.dtype == np.dtype('int32')
                    or at.dtype == np.dtype('uint8')
                    or at.dtype == np.dtype('uint16') or type(at) == int):
                new_attr = file.attr(x)
                new_attr.set(SDC.INT32, int(at))
            elif (at.dtype == np.dtype('float32')
                  or at.dtype == np.dtype('float64') or type(at) == float):
                new_attr = file.attr(x)
                new_attr.set(SDC.FLOAT64, at)
            else:
                at = np.str(at)
                file.__setattr__(x, at)
        except AttributeError:
            #at = np.str(at)
            file.__setattr__(x, at)
        except TypeError:
            print(type(at))
            print(x)
            print(at)
    print("Guardando: ", name)
示例#2
0
def mosaic(*arg,**args):
	# This function will take files tranfered in *arg and will mosaic them together and produce a new file
	# mosaic(file1,[file2,file3,...],endfile)
	# If only file1 and endfile is given, the function will only produce a copy without any other modification
	try:
		log=args["log"]
	except KeyError:
		log=logMod.Log("",nolog=True)
	if len(arg)>2:
		lfile = arg[:-1]				# This is the list of the NAME of the files to merge
		newfilename = arg[-1]			# This is the final file NAME
		# Should eventually check if files exists and can be read ***IMPROVE***
		lfHDF = []						# This is the list of the FILES to merge
		latt = []							# This is the list of the ATTRIBUTE "StructMetadata.0" of the files
		for fil in lfile:
			try:
				a=SD(fil,SDC.READ)
			except TypeError:
				a=SD(fil.encode('ascii','ignore'),SDC.READ)
			lfHDF.append(a)
			#print("hoho")
			latt.append(atribute(lfHDF[-1].attributes()["StructMetadata.0"],fil,dsi=[0,]))
			
		
		
		## Listing all the GRIDS that the new file will have
		gridlist = []						# This is the list of GRIDS to include in the final file

		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			gridlist += attOfF.listgridname()[1]		# listgridname return a list of all the grids name

		# remove double entry
		gridlist = list(set(gridlist))


		## Listing all the DATASETS that the new file will have
		dslist = []						# This is the list of DATASETS to include in the final file
		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			dslist = attOfF.orderedDS()
			
		# remove double entry
		# dslist = list(set(dslist))


		
		## Validation of commoun information
###############################################################################
# Some informations have to be the same for each file or else no mosaic can   #
# be made for exemple two files with not the same projection type can't be    #
# merged together. ***IMPROVE*** Maybe in the future we could transform file  #
# so that they have the same projection or some other thing.                  #
###############################################################################

		# List of parameter to check to insure that they are the same
		paramMustSim = ["Projection","ProjParams","SphereCode"]
		# Dictionary that will keep all the informations about every file
		paramMustSimDict = {}

		for grid in gridlist:
			# Verification of a grid

			first = True			# Variable that will enable the construction of the dict
			paramdict = {}		# Dictionary that keep the actual value that have to be the same
			
			for attOfF in latt:
				# Verification of a file
				bigG = attOfF.getgridbyname(grid)		# Getting all the attributes in the grid of a file
				if bigG is not None:
					# If the grid exists in that file
					if first :
						# If this is the first time that a file is check for that grid
						first = False
						for p in paramMustSim:
							# Checking every parameters that must be the same
							paramdict[p] = bigG.variable[p]
							
						# Validation of same Dtype for each datafield
						go = bigG.GROUP["DataField"].OBJECT
						for r in go:
							paramdict[go[r].variable["DataFieldName"]]=go[r].variable["DataType"]
					else:
						# If it's not the first time that a file is check for that grid
						for p in paramMustSim:
							# Checking every parameters that must be the same
							if not paramdict[p]==bigG.variable[p]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
						# Validation of same Dtype for each datafield
						go=bigG.GROUP["DataField"].OBJECT
						for r in go:
							if not paramdict[go[r].variable["DataFieldName"]]==go[r].variable["DataType"]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
			# Keep all this info for later it's going to be useful
			paramMustSimDict[grid]=paramdict
				
				



		## Determination of new informations
###############################################################################
# Some properties have to be calculated in order to merge. This section is    #
# doing just that                                                             #
###############################################################################

		gridResolX={}			# Getting the RESOLUTION in the X direction for each grid
		gridResolY={}			# Getting the RESOLUTION in the Y direction for each grid
		extremeup={}			# Getting the UPPER coordinates for each grid
		extremedown={}			# Getting the LOWEST coordinates for each grid
		extremeleft={}			# Getting the LEFTMOST coordinates for each grid
		extremeright={}			# Getting the RIGHTMOST coordinates for each grid
		gridDimX={}				# Getting the DIMENSIONS of X direction for each grid
		gridDimY={}				# Getting the DIMENSIONS of Y direction for each grid
		NoValueDS={}			# Getting the fill value of each dataset
		dtypeDS={}				# Getting the DTYPE for each dataset
		dstogrid={}				# Knowing wich is the GRID for each dataset
		filGridULC={}			# Getting the upper left corner of each file for each grid

		for grid in gridlist:
			# For each grid
			filGridULC[grid]={}			# Adding a dictionary for each grid that will contain information on every file
			for attOfF in latt:
				### Determination of resolution of each grid
				# ***IMPROVE*** Should check if bigd is none
				bigG=attOfF.getgridbyname(grid)				# Getting all the attributes in the grid of a file
				
				# Get extreme grid point
				ulp=eval(bigG.variable["UpperLeftPointMtrs"])
				lrp=eval(bigG.variable["LowerRightMtrs"])
				
				# Get grid dimmension
				dimx=int(bigG.variable["XDim"])
				dimy=int(bigG.variable["YDim"])
				
				# Calculate grid resolution
				gridResolX[grid]=(lrp[0]-ulp[0])/dimx
				gridResolY[grid]=(ulp[1]-lrp[1])/dimy
				
				### Determination of new extreme coordinates for each grid
				# up
				try:
					if extremeup[grid]< ulp[1]:
						extremeup[grid]=ulp[1]
				except KeyError:
					extremeup[grid]=ulp[1]
				# down
				try:
					if extremedown[grid]> lrp[1]:
						extremedown[grid]=lrp[1]
				except KeyError:
					extremedown[grid]=lrp[1]
				# left
				try:
					if extremeleft[grid]> ulp[0]:
						extremeleft[grid]=ulp[0]
				except KeyError:
					extremeleft[grid]=ulp[0]
				# right
				try:
					if extremeright[grid]< lrp[0]:
						extremeright[grid]=lrp[0]
				except KeyError:
					extremeright[grid]=lrp[0]
				### Detetermination of dataset to grid name
				if bigG is not None:
					go=bigG.GROUP["DataField"].OBJECT
					for r in go:
						dstogrid[ go[r].variable["DataFieldName"] ] = grid
				## Determination of ULC for each grid in each file
				filGridULC[grid][attOfF.name] = ulp
			## determination of new dimension for each grid
			gridDimY[grid] = int((extremeup[grid]-extremedown[grid])/gridResolY[grid])
			gridDimX[grid] = int((extremeright[grid]-extremeleft[grid])/gridResolX[grid])

		for ds in dslist:
			# For each dataset
			for sd in lfHDF:
				# For each hdf file
				
				# Try opening dataset
				try:
					sds = sd.select(eval(ds))
					# Get fill value
					NoValueDS[ds] = sds.getfillvalue()
					# Get dtype
					dtypeDS[ds] = sds.info()[3]
				except:
					log.log('e',Nom,"no dataset")



		## Start creating new file
###############################################################################
# This is the actual part were stuf appens                                    #
###############################################################################

		# This part is the same for every file in any circumstances
		########## absolute ########################
		
		# Open new file
		try:
			hdf = HDF(newfilename, HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename, SDC.WRITE | SDC.CREATE )
		except TypeError:
			hdf = HDF(newfilename.encode('ascii','ignore'), HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename.encode('ascii','ignore'), SDC.WRITE | SDC.CREATE )
		
		v=hdf.vgstart()
		vg={}
		vg1={}
		vg2={}
		
		## rewrite the gridlist
		gridlist = []
		for ds in dslist:
			if dstogrid[ds] not in gridlist:
				gridlist.append(dstogrid[ds])
				
		for grid in gridlist:
			vg[grid]=v.attach(-1,write=1)
			vg[grid]._class="GRID"
			vg[grid]._name=eval(grid)
			vg1[grid]=v.attach(-1,write=1)
			vg2[grid]=v.attach(-1,write=1)
			vg1[grid]._class="GRID Vgroup"
			vg1[grid]._name="Data Fields"
			vg2[grid]._class="GRID Vgroup"
			vg2[grid]._name="Grid Attributes"
			vg[grid].insert(vg1[grid])
			vg[grid].insert(vg2[grid])
		########## absolute ########################


		# Create dataset with the right size
		for ds in dslist:
			theGrid=dstogrid[ds]
			# Get grid name of data set
			sds = sd.create(eval(ds),dtypeDS[ds],(gridDimY[theGrid],gridDimX[theGrid]))
			
			# Set fill value
			fv=NoValueDS[ds]
			try:
				sds.setfillvalue(NoValueDS[ds])
			except OverflowError:
				log.log('e',Nom,"setfillvalue")
				sds.setfillvalue(0)
			## write real data
			for fil in range(len(latt)):
				try:
					# Determine were the data will be writen
					ulc = filGridULC[theGrid][latt[fil].name]
					# Determine the position on the grid
					y = (extremeup[theGrid]-ulc[1])/(extremeup[theGrid]-extremedown[theGrid])
					x = (ulc[0]-extremeleft[theGrid])/(extremeright[theGrid]-extremeleft[theGrid])
					y = int(y*gridDimY[theGrid])
					x = int(x*gridDimX[theGrid])
					# read data from files
					osds = lfHDF[fil].select(eval(ds))
					sh = osds[:].shape
					sds[y:y+sh[0],x:x+sh[1]] = osds[:]
					osds.endaccess()
				except:
					pass
			# Close sds
			vg1[dstogrid[ds]].add(HC.DFTAG_NDG,sds.ref())
			sds.endaccess()


    
		for g in vg1:
			vg1[g].detach()
			vg2[g].detach()
			vg[g].detach()

		# Create attribute table for the file
		attstr="GROUP=GridStructure\n"
		gridcount=1
		for gr in gridlist:
			# Start group grid
			attstr+="\tGROUP=GRID_%i\n"%gridcount
			# Add grid name
			attstr+="\t\tGridName=%s\n"%gr
			# Add dimention
			attstr+="\t\tXDim=%i\n"%gridDimX[gr]
			attstr+="\t\tYDim=%i\n"%gridDimY[gr]
			# Add UpperLeftPointMtrs
			attstr+="\t\tUpperLeftPointMtrs=(%f,%f)\n"%(extremeleft[gr],extremeup[gr])
			# Add lrp
			attstr+="\t\tLowerRightMtrs=(%f,%f)\n"%(extremeright[gr],extremedown[gr])
			# Add projection
			attstr+="\t\tProjection=%s\n"%paramMustSimDict[gr]["Projection"]
			# ProjParams
			attstr+="\t\tProjParams=%s\n"%paramMustSimDict[gr]["ProjParams"]
			# SphereCode
			attstr+="\t\tSphereCode=%s\n"%paramMustSimDict[gr]["SphereCode"]

			
			attstr+="""\t\tGROUP=Dimension
		\t\tEND_GROUP=Dimension
		\t\tGROUP=DataField\n"""

			## Add data sets
			# create list of ds for current grid
			lsdsgr=[]
			dsnum=1
			for ds in dslist:
				if dstogrid[ds] == gr:
					# Add object
					attstr+="\t\t\tOBJECT=DataField_%i\n"%dsnum
					# datafield name
					attstr+="\t\t\t\tDataFieldName=%s\n"%ds
					# datatype
					attstr+="\t\t\t\tDataType=%s\n"%paramMustSimDict[gr][ds]
					# dim
					attstr+='\t\t\t\tDimList=("YDim","XDim")\n'
					attstr+="\t\t\tEND_OBJECT=DataField_%i\n"%dsnum
					dsnum+=1
			attstr+="\t\tEND_GROUP=DataField\n"
			attstr+="""\t\tGROUP=MergedFields
		\t\tEND_GROUP=MergedFields\n"""
			attstr+="\tEND_GROUP=GRID_%i\n"%gridcount
			gridcount+=1
		attstr+="""END_GROUP=GridStructure
		GROUP=PointStructure
		END_GROUP=PointStructure
		END"""
		# adding attribute to new file
		att=sd.attr('StructMetadata.0')
		att.set(SDC.CHAR,attstr)
		sd.end()
		hdf.close()
		
		# This should return something somehow
	elif len(arg)>1:
		afile = arg[0]				# This is the list of the NAME of the files to merge
		newfilename = arg[1]			# This is the final file NAME
		# Create a copy
		from shutil import copyfile
		copyfile(afile,newfilename)
示例#3
0
def compress(*arg, **karg):
    # This function compress the file with a given compress parameter
    # compress(file,comression parameter)
    oldfile = arg[0]
    newfilename = arg[0] + "t"

    try:
        log = karg["log"]
    except KeyError:
        log = logMod.Log("", nolog=True)

    keepall = True

    # Should eventually check if files exists and can be read ***IMPROVE***
    try:
        HDFF = SD(oldfile, SDC.READ)  # This is the list of the FILES to merge
    except TypeError:
        HDFF = SD(oldfile.encode('ascii', 'ignore'),
                  SDC.READ)  # This is the list of the FILES to merge
    # This is the list of the ATTRIBUTE "StructMetadata.0" of the files
    attOfF = atribute(HDFF.attributes()["StructMetadata.0"],
                      oldfile,
                      dsi=[
                          0,
                      ])

    ## Listing all the GRIDS that the new file will have

    gridlist = attOfF.listgridname()[
        1]  # listgridname return a list of all the grids name

    ## Listing all the DATASETS that the new file will have

    # Should check if any grid ***IMPROVE***
    dslist = attOfF.orderedDS()

    ## Validation of commoun information
    ###############################################################################
    # Some informations have to be the same for each file or else no mosaic can   #
    # be made for exemple two files with not the same projection type can't be    #
    # merged together. ***IMPROVE*** Maybe in the future we could transform file  #
    # so that they have the same projection or some other thing.                  #
    ###############################################################################

    # List of parameter to check to insure that they are the same
    paramMustSim = ["Projection", "ProjParams", "SphereCode"]
    # Dictionary that will keep all the informations about every file
    paramMustSimDict = {}

    for grid in gridlist:
        # Verification of a grid

        paramdict = {
        }  # Dictionary that keep the actual value that have to be the same

        bigG = attOfF.getgridbyname(
            grid)  # Getting all the attributes in the grid of a file
        if bigG is not None:
            # If the grid exists in that file

            for p in paramMustSim:
                # Checking every parameters that must be the same
                paramdict[p] = bigG.variable[p]

            # Validation of same Dtype for each datafield
            go = bigG.GROUP["DataField"].OBJECT
            for r in go:
                paramdict[go[r].variable["DataFieldName"]] = go[r].variable[
                    "DataType"]

        # Keep all this info for later it's going to be useful
        paramMustSimDict[grid] = paramdict

# LAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

## Determination of new informations
###############################################################################
# Some properties have to be calculated in order to merge. This section is    #
# doing just that                                                             #
###############################################################################

    gridResolX = {}  # Getting the RESOLUTION in the X direction for each grid
    gridResolY = {}  # Getting the RESOLUTION in the Y direction for each grid
    extremeup = {}  # Getting the UPPER coordinates for each grid
    extremedown = {}  # Getting the LOWEST coordinates for each grid
    extremeleft = {}  # Getting the LEFTMOST coordinates for each grid
    extremeright = {}  # Getting the RIGHTMOST coordinates for each grid
    gridDimX = {}  # Getting the DIMENSIONS of X direction for each grid
    gridDimY = {}  # Getting the DIMENSIONS of Y direction for each grid
    NoValueDS = {}  # Getting the fill value of each dataset
    dtypeDS = {}  # Getting the DTYPE for each dataset
    dstogrid = {}  # Knowing wich is the GRID for each dataset
    filGridULC = {}  # Getting the upper left corner of each file for each grid

    for grid in gridlist:
        # For each grid
        filGridULC[grid] = {
        }  # Adding a dictionary for each grid that will contain information on every file

        ### Determination of resolution of each grid
        # ***IMPROVE*** Should check if bigd is none

        bigG = attOfF.getgridbyname(
            grid)  # Getting all the attributes in the grid of a file

        # Get extreme grid point
        ulp = eval(bigG.variable["UpperLeftPointMtrs"])
        lrp = eval(bigG.variable["LowerRightMtrs"])

        # Get grid dimmension (PIXEL)
        dimx = int(bigG.variable["XDim"])
        dimy = int(bigG.variable["YDim"])

        # Calculate grid resolution
        gridResolX[grid] = (lrp[0] - ulp[0]) / dimx
        gridResolY[grid] = (ulp[1] - lrp[1]) / dimy

        ### Determination of new extreme coordinates for each grid
        # up
        extremeup[grid] = ulp[1]
        # down
        extremedown[grid] = lrp[1]
        # left
        extremeleft[grid] = ulp[0]
        # right
        extremeright[grid] = lrp[0]
        #print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",int(LOC - UPC),int(RMC - LMC),"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")

        ### Detetermination of dataset to grid name
        if bigG is not None:
            go = bigG.GROUP["DataField"].OBJECT
            for r in go:
                dstogrid[go[r].variable["DataFieldName"]] = grid

        ### Determination of new extreme coordinates for each grid

        ## Determination of ULC for each grid in each file
        filGridULC[grid][attOfF.name] = ulp
        ## determination of new dimension for each grid (pixel)
        gridDimY[grid] = int(dimy)
        gridDimX[grid] = int(dimx)

    for ds in dslist:
        # For each dataset

        # For each hdf file

        # Try opening dataset
        try:
            sds = HDFF.select(eval(ds))
            # Get fill value
            NoValueDS[ds] = sds.getfillvalue()
            # Get dtype
            dtypeDS[ds] = sds.info()[3]
        except:
            log.log('e', Nom, "no dataset")

    ## Start creating new file


###############################################################################
# This is the actual part were stuf appens                                    #
###############################################################################

# This part is the same for every file in any circumstances
########## absolute ########################

# Open new file
    try:
        hdf = HDF(newfilename, HC.WRITE | HC.CREATE | HC.TRUNC)
        sd = SD(newfilename, SDC.WRITE | SDC.CREATE)
    except TypeError:
        hdf = HDF(newfilename.encode('ascii', 'ignore'),
                  HC.WRITE | HC.CREATE | HC.TRUNC)
        sd = SD(newfilename.encode('ascii', 'ignore'), SDC.WRITE | SDC.CREATE)
    v = hdf.vgstart()
    vg = {}
    vg1 = {}
    vg2 = {}

    ## rewrite the gridlist
    gridlist = []
    for ds in dslist:
        if dstogrid[ds] not in gridlist:
            gridlist.append(dstogrid[ds])

    for grid in gridlist:
        vg[grid] = v.attach(-1, write=1)
        vg[grid]._class = "GRID"
        vg[grid]._name = eval(grid)
        vg1[grid] = v.attach(-1, write=1)
        vg2[grid] = v.attach(-1, write=1)
        vg1[grid]._class = "GRID Vgroup"
        vg1[grid]._name = "Data Fields"
        vg2[grid]._class = "GRID Vgroup"
        vg2[grid]._name = "Grid Attributes"
        vg[grid].insert(vg1[grid])
        vg[grid].insert(vg2[grid])
    ########## absolute ########################

    # Create dataset with the right size
    for ds in dslist:
        theGrid = dstogrid[ds]
        # Get grid name of data set
        # Make sure that the set is asked to be there
        sds = sd.create(eval(ds), dtypeDS[ds],
                        (gridDimY[theGrid], gridDimX[theGrid]))

        # Set fill value
        fv = NoValueDS[ds]

        try:
            sds.setfillvalue(NoValueDS[ds])
        except OverflowError:
            log.log('e', Nom, "dataset fillvaluet")
            sds.setfillvalue(0)
        # Set compression
        try:
            sds.setcompress(*arg[1])  # args depend on compression type
        except HDF4Error as msg:
            log.log('e', Nom, "Error compressing the dataset")
            sds_id.endaccess()
            sd_id.end()
            return
        ## write real data

        try:
            # read data from files
            osds = HDFF.select(eval(ds))
            # And write it in the new sds
            sds[:, :] = osds[:, :]

            osds.endaccess()
        except:
            pass
        # Close sds
        vg1[dstogrid[ds]].add(HC.DFTAG_NDG, sds.ref())
        sds.endaccess()

    for g in vg1:
        vg1[g].detach()
        vg2[g].detach()
        vg[g].detach()

    # Create attribute table for the file
    attstr = "GROUP=GridStructure\n"
    gridcount = 1
    for gr in gridlist:
        # Start group grid
        attstr += "\tGROUP=GRID_%i\n" % gridcount
        # Add grid name
        attstr += "\t\tGridName=%s\n" % gr
        # Add dimention
        attstr += "\t\tXDim=%i\n" % gridDimX[gr]
        attstr += "\t\tYDim=%i\n" % gridDimY[gr]
        # Add UpperLeftPointMtrs
        attstr += "\t\tUpperLeftPointMtrs=(%f,%f)\n" % (extremeleft[gr],
                                                        extremeup[gr])
        # Add lrp
        attstr += "\t\tLowerRightMtrs=(%f,%f)\n" % (extremeright[gr],
                                                    extremedown[gr])
        # Add projection
        attstr += "\t\tProjection=%s\n" % paramMustSimDict[gr]["Projection"]
        # ProjParams
        attstr += "\t\tProjParams=%s\n" % paramMustSimDict[gr]["ProjParams"]
        # SphereCode
        attstr += "\t\tSphereCode=%s\n" % paramMustSimDict[gr]["SphereCode"]

        attstr += """\t\tGROUP=Dimension
	\t\tEND_GROUP=Dimension
	\t\tGROUP=DataField\n"""

        ## Add data sets
        # create list of ds for current grid
        lsdsgr = []
        dsnum = 1
        for ds in dslist:
            if dstogrid[ds] == gr:
                # Add object
                attstr += "\t\t\tOBJECT=DataField_%i\n" % dsnum
                # datafield name
                attstr += "\t\t\t\tDataFieldName=%s\n" % ds
                # datatype
                attstr += "\t\t\t\tDataType=%s\n" % paramMustSimDict[gr][ds]
                # dim
                attstr += '\t\t\t\tDimList=("YDim","XDim")\n'
                attstr += "\t\t\tEND_OBJECT=DataField_%i\n" % dsnum
                dsnum += 1
        attstr += "\t\tEND_GROUP=DataField\n"
        attstr += """\t\tGROUP=MergedFields
	\t\tEND_GROUP=MergedFields\n"""
        attstr += "\tEND_GROUP=GRID_%i\n" % gridcount
        gridcount += 1
    attstr += """END_GROUP=GridStructure
	GROUP=PointStructure
	END_GROUP=PointStructure
	END"""
    # adding attribute to new file
    att = sd.attr('StructMetadata.0')
    att.set(SDC.CHAR, attstr)
    sd.end()
    hdf.close()

    # delete old file
    os.remove(oldfile)
    # rename new file to old file name
    os.rename(newfilename, oldfile)
示例#4
0
def write_NDACC_HDF4_O3(metafile, mandatory_variables, optional_variables = {},pathout = "./"):
    

    # We read the meta-data file

    fi = open(metafile,'r')

    li = fi.readline()
    gattname = []
    gattval = []
    attributes = {}
    while not (re.match("^! Variable Attributes", li)):
        li = li.strip()
        l = li.split("=")
        if(2 == len(l)):
            gattname.append(l[0].strip())
            val = l[1].strip()
            if '' == val:
                val = "  " # To prevent any problem with null strings
            gattval.append(val)
            attributes[l[0].strip()] = val
        li = fi.readline()

    varlist = {}
    varname = ""
    while li.strip() != "!END":
        li = li.strip()
        if li[0] == "!":
             li = fi.readline()
             continue
        l = li.split("=")
        if l[0].strip() == "VAR_NAME":
            varname = l[1].strip()
            varlist[varname] = {}
        else:
            val = l[1].strip()
            if '' == val:
                val = "  " # To prevent any problem with null strings
            varlist[varname][l[0].strip()] = val
        li = fi.readline()
    # We create the output file name
    output = pathout 
    fileout = ""
    attributes["DATA_START_DATE"] = MJD2KToDateTimeString(mandatory_variables["datetimestart"][0])
    attributes["DATA_STOP_DATE"] = MJD2KToDateTimeString(mandatory_variables["datetimestop"][-1])
    attributes["FILE_GENERATION_DATE"]= datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")

    tmp = attributes["DATA_DISCIPLINE"].split(";")
    fileout += tmp[-1].lower() +"_"
    fileout += attributes["DATA_SOURCE"].lower() + "_hires_"
    fileout += attributes["DATA_LOCATION"].lower().split(";")[0] + "_"
    fileout += attributes["DATA_START_DATE"].lower() +"_"
    fileout += attributes["DATA_STOP_DATE"].lower() + "_"
    fileout += attributes["DATA_FILE_VERSION"].lower()
    fileout += ".hdf"
    output += fileout
    attributes["FILE_NAME"] = fileout
    print( "Output file: ", output)
    
    # We create the main output file
    d = SD(output, SDC.WRITE|SDC.CREATE)

#    for varname in varlist.keys():
#       maxi = varlist[varname]["VAR_VALID_MAX"]
#       mini = varlist[varname]["VAR_VALID_MIN"]
#       fill = varlist[varname]["VAR_FILL_VALUE"]
#       typ = varlist[varname]["VAR_DATA_TYPE"]
      # if "STRING" == typ:
      #    return value
    
#       if "REAL" == typ:
#           varlist[varname]["VAR_VALID_MAX"] = float(maxi)
#           varlist[varname]["VAR_VALID_MIN"] = float(mini)
#           varlist[varname]["VAR_FILL_VALUE"] = float(fill)
    
#       if "DOUBLE" == typ:
#           varlist[varname]["VAR_VALID_MAX"] = float(maxi)
#           varlist[varname]["VAR_VALID_MIN"] = float(mini)
#           varlist[varname]["VAR_FILL_VALUE"] = float(fill)

#       if "SHORT" == typ:
#           varlist[varname]["VAR_VALID_MAX"] = int(maxi)
#           varlist[varname]["VAR_VALID_MIN"] = int(mini)
#           varlist[varname]["VAR_FILL_VALUE"] = int(fill)

#       if "INTEGER" == typ:
#           varlist[varname]["VAR_VALID_MAX"] = long(maxi)
#           varlist[varname]["VAR_VALID_MIN"] = long(mini)
#           varlist[varname]["VAR_FILL_VALUE"] = long(fill)






    for varname in varlist.keys():
        value = []
        # We add the mandatory data into the varlist values  


        if "LATITUDE.INSTRUMENT" == varname:
            value =  mandatory_variables["lat"]
        if "LONGITUDE.INSTRUMENT" == varname:
            value =  mandatory_variables["lon"]
        if "ALTITUDE.INSTRUMENT" == varname:
            value =  mandatory_variables["elev"]
        if "DATETIME" == varname:
            value =  mandatory_variables["datetime"]
        if "DATETIME.START" == varname:
            value =  mandatory_variables["datetimestart"]
        if "DATETIME.STOP" == varname:
            value =  mandatory_variables["datetimestop"]
        if "INTEGRATION.TIME" == varname:
            value =  mandatory_variables["integhrs"]
        if "ALTITUDE" == varname:
            value =  mandatory_variables["z"]
        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL" == varname:
            value =  mandatory_variables["o3nd"]
        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_UNCERTAINTY.COMBINED.STANDARD" == varname:
            value =  mandatory_variables["uo3nd"]
        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_UNCERTAINTY.RANDOM.STANDARD" == varname:
            value =  mandatory_variables["uo3ndrand"]
        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_UNCERTAINTY.SYSTEMATIC.STANDARD" == varname:
            value =  mandatory_variables["uo3ndsyst"]
        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_RESOLUTION.ALTITUDE.IMPULSE.RESPONSE.FWHM" == varname:
            value =  mandatory_variables["dz"]
        if "O3.MIXING.RATIO.VOLUME_DERIVED" == varname:
            value =  mandatory_variables["o3mr"]
        if "O3.MIXING.RATIO.VOLUME_DERIVED_UNCERTAINTY.COMBINED.STANDARD" == varname:
            value =  mandatory_variables["uo3mr"]
        if "O3.MIXING.RATIO.VOLUME_DERIVED_UNCERTAINTY.RANDOM.STANDARD" == varname:
            value =  mandatory_variables["uo3mrrand"]
        if "O3.MIXING.RATIO.VOLUME_DERIVED_UNCERTAINTY.SYSTEMATIC.STANDARD" == varname:
            value =  mandatory_variables["uo3mrsyst"]
        if "PRESSURE_INDEPENDENT" == varname:
            value =  mandatory_variables["xp"]
        if "TEMPERATURE_INDEPENDENT" == varname:
            value =  mandatory_variables["xt"]
        if "PRESSURE_INDEPENDENT_SOURCE" == varname:
            value =  mandatory_variables["xpsce"]
        if "TEMPERATURE_INDEPENDENT_SOURCE" == varname:
            value =  mandatory_variables["xtsce"]

        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_RESOLUTION.ALTITUDE.DF.CUTOFF" == varname:
            try:
                value =  optional_variables["dzdf"]
            except: # Skip to the next variable if not present
                continue

        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_RESOLUTION.ALTITUDE.ORIGINATOR" == varname:
            try:
                value =  optional_variables["dzorig"]
            except: # Skip to the next variable if not present
                continue

        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_UNCERTAINTY.ORIGINATOR" == varname:
            try:
                value =  optional_variables["uo3ndorig"]
            except: # Skip to the next variable if not present
                continue

        if "O3.MIXING.RATIO.VOLUME_DERIVED_UNCERTAINTY.ORIGINATOR" == varname:
            try:
                value =  optional_variables["uo3mrorig"]
            except: # Skip to the next variable if not present
                continue

        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_RESOLUTION.ALTITUDE.DISTANCE.FROM.IMPULSE" == varname:
            try:
                value =  optional_variables["irdist"]
            except: # Skip to the next variable if not present
                continue

        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_RESOLUTION.ALTITUDE.IMPULSE.RESPONSE" == varname:
            try:
                value =  optional_variables["ir"]
            except: # Skip to the next variable if not present
                continue

        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_RESOLUTION.ALTITUDE.DF.NORMALIZED.FREQUENCY" == varname:
            try:
                value =  optional_variables["tffreq"]
            except: # Skip to the next variable if not present
                continue

        if "O3.NUMBER.DENSITY_ABSORPTION.DIFFERENTIAL_RESOLUTION.ALTITUDE.DF.TRANSFER.FUNCTION" == varname:
            try:
                value =  optional_variables["tf"]
            except: # Skip to the next variable if not present
                continue

        if "SOURCE.PRODUCT" == varname:
            try:
                value =  optional_variables["sceprod"]
            except: # Skip to the next variable if not present
                print( " We skip SOURCE PRODUCT")
                continue
        print( varname)


        # We add the optional data into the varlist values


        # We check the bounds for the values

    
        # We save the data
        typ = varlist[varname]["VAR_DATA_TYPE"]
        print( "We write the variable", varname)
        value = BoundarySave(varlist[varname], value)

        maxi = varlist[varname]["VAR_VALID_MAX"]
        mini = varlist[varname]["VAR_VALID_MIN"]
        fill = varlist[varname]["VAR_FILL_VALUE"]
        #maxi = maxi.strip()
        #mini = mini.strip()
        #fill = fill.strip()

        if "STRING" == typ:
            valsize = 1
            if (varname == "TEMPERATURE_INDEPENDENT_SOURCE") or (varname == "PRESSURE_INDEPENDENT_SOURCE"):
               valsize = "1" #len(mandatory_variables["z"])
            try:
               v = d.create(varname, SDC.CHAR, value.shape)
               setattr(v, "VAR_SIZE", str(valsize))
            except:
               v = d.create(varname, SDC.CHAR, (1))
               setattr(v, "VAR_SIZE",str(valsize))
            g1 = v.attr("VAR_VALID_MAX")

            maxi = maxi.strip()
            mini = mini.strip()
            fill = fill.strip()
            if maxi == "":
                maxi = " "
            if mini == "":
                mini = " "
            if fill == "":
                fill = " "

            g1.set(SDC.CHAR, maxi)
            g2 = v.attr("VAR_VALID_MIN")
            g2.set(SDC.CHAR, mini)
            g3 = v.attr("VAR_FILL_VALUE")
            g3.set(SDC.CHAR, fill)


        valsize = ""
        try:
           valsize = str(value.shape[0]) #";".join([str(i) for i in value.shape])
           if value.shape[1] >1:
              valsize += ";" + str(value.shape[1])

           print( valsize)
           print( value.shape)
        except:
           try:
              valsize = str(len(value))
           except:
              valsize = "1"




        if "REAL" == typ:
            try:
               v = d.create(varname, SDC.FLOAT32, value.shape)
            except:
               v = d.create(varname, SDC.FLOAT32, (1))
            try:
                value = value.astype(float32)
            except:
                value = float32(value)
                pass
            setattr(v, "VAR_SIZE", valsize)
            g1 = v.attr("VAR_VALID_MAX")
            g1.set(SDC.FLOAT32, float(maxi))
            g2 = v.attr("VAR_VALID_MIN")
            g2.set(SDC.FLOAT32, float(mini))
            g3 = v.attr("VAR_FILL_VALUE")
            g3.set(SDC.FLOAT32, float(fill))


        if "DOUBLE" == typ:
            try:
               v = d.create(varname, SDC.FLOAT64, value.shape)
            except:
               v = d.create(varname, SDC.FLOAT64, (1))
            setattr(v, "VAR_SIZE", valsize)
#            v = d.create(varname, SDC.FLOAT64, value.shape)
 #           v = d.create(varname, SDC.CHAR, value.shape)
            g1 = v.attr("VAR_VALID_MAX")
            g1.set(SDC.FLOAT64, float(maxi))
            g2 = v.attr("VAR_VALID_MIN")
            g2.set(SDC.FLOAT64, float(mini))
            g3 = v.attr("VAR_FILL_VALUE")
            g3.set(SDC.FLOAT64, float(fill))
        if "SHORT" == typ:
            try:
               v = d.create(varname, SDC.INT16, value.shape)
            except:
               v = d.create(varname, SDC.INT16, (1))
            setattr(v, "VAR_SIZE", valsize)
#            v = d.create(varname, SDC.INT16, value.shape)
 #           v = d.create(varname, SDC.CHAR, value.shape)
            g1 = v.attr("VAR_VALID_MAX")
            g1.set(SDC.INT16, int(maxi))
            g2 = v.attr("VAR_VALID_MIN")
            g2.set(SDC.INT16, int(mini))
            g3 = v.attr("VAR_FILL_VALUE")
            g3.set(SDC.INT16, int(fill))
        if "INTEGER" == typ:
            try:
               v = d.create(varname, SDC.INT32, value.shape)
            except:
               v = d.create(varname, SDC.INT32, (1))
            setattr(v, "VAR_SIZE", valsize)

            #v = d.create(varname, SDC.INT32, value.shape)
            g1 = v.attr("VAR_VALID_MAX")
            g1.set(SDC.INT32, int(maxi))
            g2 = v.attr("VAR_VALID_MIN")
            g2.set(SDC.INT32, int(mini))
            g3 = v.attr("VAR_FILL_VALUE")
            g3.set(SDC.INT32, int(fill))        
        print( "We set the attributes")
            
        setattr(v, "VAR_NAME", varname)

 #       try:
#            if len(v.shape) < 2:
        #        setattr(v, "VAR_SIZE", int(v.size()))
  #          else:
#                setattr(v, "VAR_SIZE", )))

        for k in varlist[varname]:
            if not(k == "VAR_VALID_MAX" or k == "VAR_VALID_MIN" or  k == "VAR_FILL_VALUE" or k == "VAR_SIZE"):
                tmp =  varlist[varname][k]
                tmp = tmp.strip()
                if tmp == "":
                    tmp = " "

                setattr(v, k, tmp)
#        try:
#                print "We set the value", value.min(), value.max(), value.dtype, value.shape
#                
#        except:
#                print "We set the value", value
        v[:] = value
#                v[k] = varlist[varname][k]
#        print "We continue"
    # We populate the output file from the meta-data file
    
    for i in attributes.keys():
        print( "ATTRIBUTES", i)
        tmp = d.attr(i)
        tmp2 = attributes[i]
        tmp2.strip()
        if tmp2 == "":
            tmp2 = " "
        tmp.set(SDC.CHAR, tmp2) #attributes[i])
    
    return