Beispiel #1
0
def read_vdata(FILENAME):
    """Reads all vdata fields in an HDF-EOS file and places those fields 
    in a dictionary.
    
    PARAMETERS: 
    -----------
    FILENAME: Name of HDF-EOS file. 

    OUTPUTS: 
    --------
    vdata_dict: Dictionary of vdata fields.  
    """

    # Prepare to read the data.
    f = HDF(FILENAME, SDC.READ)  # Open the file
    vs = f.vstart()  # Start the vdata interface
    data_info_list = vs.vdatainfo()  # List the vdata fields
    vdata_fieldnames = [a[0] for a in data_info_list]  # Get the names

    # Load the data, place in dictionary
    vdata_dict = {}
    for field in vdata_fieldnames:
        vdata_dict[field] = np.squeeze(np.asarray(vs.attach(field)[:]))

    # terminate the vdata interface, close the file.
    vs.end()
    f.close()

    return vdata_dict
Beispiel #2
0
def get_solor_angle(myd1km_filename_str):
    try:  # open hdf
        myd021km_data = SD(myd1km_filename_str)
        # 1.SDS Reading
        zenith_sds = myd021km_data.select('SensorZenith')  # 经纬度
        zenith = zenith_sds.get()  # 2
        azimuth_sds = myd021km_data.select('SensorAzimuth')  # 经纬度
        azimuth = azimuth_sds.get()

        return zenith * 0.01, azimuth * 0.01

        f = HDF(myd1km_filename_str, SDC.READ)
        vs = f.vstart()
        data_info_list = vs.vdatainfo()
        # Vset table
        # L1B swam matedata
        L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
        # Read [Swath/scan type]
        sd_info = L1B_Swath_Matedata_VD.inquire()
        all_metadata = L1B_Swath_Matedata_VD.read(sd_info[0])
        L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
        vs.end()  # Use with, so __del__ can do this.
        f.close()
        return all_metadata

    except HDF4Error:
        print("Unexpected error:(get_solor_angle)", sys.exc_info()[0])
        print('READ ERROR......:' + myd1km_filename_str)
        return None
def h4lookup(path, swath = "Earth UV-2 Swath"):
    '''
    only look-up datasets, ignore vdata and
    "WavelengthReferenceColumn" is that.
    '''
    hdf = HDF(path)
    v = hdf.vgstart()
    s2_vg = v.attach(swath)
    geo_tag, geo_ref = s2_vg.tagrefs()[0]
    dat_tag, dat_ref = s2_vg.tagrefs()[1]
    s2_vg.detach()
    #--------------------------------------------
    # found geoloaction & data fields
    #--------------------------------------------
    geo_vgs = v.attach(geo_ref); dat_vgs = v.attach(dat_ref)
    gvg_tagrefs = geo_vgs.tagrefs(); dvg_tagrefs = dat_vgs.tagrefs()
    geo_vgs.detach(); dat_vgs.detach()
    tagrefs_list = gvg_tagrefs + dvg_tagrefs
    refs_dict = {}
    #--------------------------------------------
    # create dict in which keys are names in hdf and values are refs
    #--------------------------------------------
    sd = SD(path)
    for tr in tagrefs_list:
        tag, ref = tr
        if tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            refs_dict[sds.info()[0]] = ref
    sds.endaccess(); sd.end(); v.end(); hdf.close()
    return refs_dict
Beispiel #4
0
def load_vd(fname, varnames):
    '''return list containing vdata structures specified by list varnames,
	contained in hdf4 file with filename fname'''
    f = HDF(fname)
    data_list = []
    vs = f.vstart()
    for name in varnames:
        vd = vs.attach(name)
        data_list.append(vd[:])
        vd.detach()
    vs.end()
    f.close()
    return data_list
def read_data_from_hdf(inputfile):
    wavelengths = []
    depth = []
    downwelling_downcast = []
    downwelling_upcast = []
    upwelling_downcast = []
    upwelling_upcast = []

    # open the hdf file read-only
    # Initialize the SD, V and VS interfaces on the file.
    hdf = HDF(inputfile)
    vs = hdf.vstart()
    v  = hdf.vgstart()

    # Attach and read the contents of the Profiler vgroup

    vg = v.attach(v.find('Profiler'))

    for tag, ref in vg.tagrefs():
        assert(tag == HC.DFTAG_VH)
        vd = vs.attach(ref)
        nrecs, intmode, fields, size, name = vd.inquire()

        if name == "ED_hyperspectral_downcast":
            x = vd.read(nrecs)
            downwelling_downcast = np.asarray([i[3:] for i in x])
            wavelengths = np.asarray([float(x) for x in fields[3:]])
            depth = np.asarray([i[2] for i in x])
        elif name == "ED_hyperspectral_upcast":
            downwelling_upcast = np.asarray([i[3:] for i in vd.read(nrecs)])
        elif name == "LU_hyperspectral_downcast":
            upwelling_downcast = np.asarray([i[3:] for i in vd.read(nrecs)])
        elif name == "LU_hyperspectral_upcast":
            upwelling_upcast = np.asarray([i[3:] for i in vd.read(nrecs)])

        vd.detach()

    # Close vgroup
    vg.detach()

    #clean up
    v.end()
    vs.end()
    hdf.close()

    return wavelengths, depth, downwelling_downcast, downwelling_upcast, \
            upwelling_downcast, upwelling_upcast
Beispiel #6
0
def HDFread1D(filename, variable):
    """
    Read HDF file in vs in simple mode
    """
    # Read the file
    f = HDF(filename)
    # Initialize v mode
    vs = f.vstart()
    # extrect data
    var = vs.attach(variable)
    Var = np.array(var[:]).ravel()
    # Close the file
    var.detach()
    vs.end()
    f.close()

    return Var
def HDFread(filename, variable, Class=None):
    """
    Extract the data for non scientific data in V mode of hdf file
    """
    hdf = HDF(filename, HC.READ)

    # Initialize the SD, V and VS interfaces on the file.
    sd = SD(filename)
    vs = hdf.vstart()
    v = hdf.vgstart()

    # Encontrar el puto id de las Geolocation Fields
    if Class == None:
        ref = v.findclass('SWATH Vgroup')
    else:
        ref = v.findclass(Class)

    # Open all data of the class
    vg = v.attach(ref)
    # All fields in the class
    members = vg.tagrefs()

    nrecs = []
    names = []
    for tag, ref in members:
        # Vdata tag
        vd = vs.attach(ref)
        # nrecs, intmode, fields, size, name = vd.inquire()
        nrecs.append(vd.inquire()[0])  # number of records of the Vdata
        names.append(vd.inquire()[-1])  # name of the Vdata
        vd.detach()

    idx = names.index(variable)
    var = vs.attach(members[idx][1])
    V = var.read(nrecs[idx])
    var.detach()
    # Terminate V, VS and SD interfaces.
    v.end()
    vs.end()
    sd.end()
    # Close HDF file.
    hdf.close()

    return np.array(V).ravel()
Beispiel #8
0
 def get_train_data(self,
                    index_tuple,
                    band_select=[
                        0, 1, 2, 3, 4, 6, 7, 18, 19, 20, 21, 22, 23, 24, 25,
                        26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37
                    ]):
     '''
     [Varification & Night eliminite]
     :param index_tuple:
     :return:
     '''
     # [band_num, rowsize]
     raw_data = self.get_by_index(index_tuple, verify=False)
     # Verify
     raw_data = raw_data[band_select, ...]
     valid_flag = np.ones([raw_data.shape[1]], dtype=bool)
     for idx in range(raw_data.shape[0]):
         valid_flag = valid_flag & (raw_data[idx, ...] > 32767)
     # Daytime mode
     try:  # open hdf
         f = HDF(self.filename, SDC.READ)
         vs = f.vstart()
         data_info_list = vs.vdatainfo()
         # Vset table
         # L1B swam matedata
         L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
         # Read [Swath/scan type]
         svath_matedata = L1B_Swath_Matedata_VD[:]
         for idx in range(valid_flag.shape[0]):
             if svath_matedata[int(
                 (index_tuple[0][idx]) / 10)][2] == 'D   ':
                 valid_flag[idx] = True
             else:
                 valid_flag[idx] = False
         L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
         vs.end()  # Use with, so __del__ can do this.
         f.close()
     except ValueError:
         print("Unexpected error:", sys.exc_info()[0])
         print('READ ERROR......(%d):' % self.filename)
         return 0, 0, False
     raw_data = self.get_by_index(index_tuple, verify=False, scaled=True)
     raw_data = raw_data[band_select, ...]
     return raw_data, valid_flag
Beispiel #9
0
def get_swath_metadata(myd1km_filename_str):
    try:  # open hdf
        f = HDF(myd1km_filename_str, SDC.READ)
        vs = f.vstart()
        data_info_list = vs.vdatainfo()
        # Vset table
        # L1B swam matedata
        L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
        # Read [Swath/scan type]
        sd_info = L1B_Swath_Matedata_VD.inquire()
        all_metadata = L1B_Swath_Matedata_VD.read(sd_info[0])
        L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
        vs.end()  # Use with, so __del__ can do this.
        f.close()
        return all_metadata

    except HDF4Error:
        print("Unexpected error:", sys.exc_info()[0])
        print('READ ERROR......:' + myd1km_filename_str)
        return None
Beispiel #10
0
    def look(self, path, mem_list, lp_list):
        data = {}
        for name in mem_list:  # subdata sets type data
            tag = lp_list[name][0]
            ref = lp_list[name][1]
            if tag == HC.DFTAG_NDG:
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                data[name] = np.float64(sds.get())
                sds.endaccess()
                sd.end()
            elif tag == HC.DFTAG_VH:  #vd type data
                hdf = HDF(path)
                vs = hdf.vstart()
                vd = vs.attach(ref)
                nrecs, intmode, fields, size, name = vd.inquire()
                data[name] = np.full(nrecs, np.float64(vd.read()[0]))
                vs.end()
                hdf.close()

        return data
def read_vd_hdf2(file_in,var_in):
    '''Read data stored as a VD variable in input HDF-EOS file.'''
    '''Inputs are the file path and the required variable name.'''
    '''Outputs are the data (numpy array).                     '''

    #--information from input file--
    hdf  = HDF(file_in)      # the HDF file
    vs   = hdf.vstart()      # initialize VS interface on HDF file
    ### vdinfo = vs.vdatainfo() # return info about all vdatas
    vd   = vs.attach(var_in) # open a vdata given its name
    dimsz = vd.inquire()[0]  # return 5 elements: 
                             #   1. # of records
                             #   2. interlace mode
                             #   3. list of vdata field names
                             #   4. size in bytes of the vdata record
                             #   5. name of the vdata
    var_data = np.array(vd.read(dimsz)) #read a number of records
    vd.detach()              # close the vdata
    vs.end()                 # terminate the vdata interface
    hdf.close()              # close the HDF file
    return var_data,dimsz
Beispiel #12
0
def get_myd1km_sci_time(myd1km_filename_str):
    try:  # open hdf
        f = HDF(myd1km_filename_str, SDC.READ)
        vs = f.vstart()
        data_info_list = vs.vdatainfo()
        # Vset table
        # L1B swam matedata
        L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
        # Read [Swath/scan type]
        begin = L1B_Swath_Matedata_VD[0]
        begin = begin[4]
        end = L1B_Swath_Matedata_VD[-1]
        end = end[4]
        L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
        vs.end()  # Use with, so __del__ can do this.
        f.close()
        return begin, end, True

    except HDF4Error:
        print("Unexpected error:", sys.exc_info()[0])
        print('READ ERROR......:' + myd1km_filename_str)
        return 0, 0, False
from pyhdf.HDF import *
from pyhdf.VS import *

f = HDF('inventory.hdf',         # Open file 'inventory.hdf' in write mode
            HC.WRITE|HC.CREATE)  # creating it if it does not exist
vs = f.vstart()                  # init vdata interface
vd = vs.attach('INVENTORY', 1)   # attach vdata 'INVENTORY' in write mode

# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list where
# number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 3 done')

# Update record at index 1 (second record)
vd[1]  = ('Z4367', 'surprise', 10, 3.1, 44.5)
# Update record at index 4, and those after
vd[4:] = (
          ('QR231', 'toy', 12, 2.5, 45),
          ('R3389', 'robot', 3, 45, 2000),
          ('R3390', 'robot2', 8, 55, 2050)
         )
vd.detach()               # "close" the vdata
vs.end()                  # terminate the vdata interface
f.close()                 # close the HDF file
Beispiel #14
0
    def get_train_data_map(self,
                           index_tuple,
                           kernel_radius=5,
                           band_select=[
                               0, 1, 2, 3, 4, 6, 7, 18, 19, 20, 21, 22, 23, 24,
                               25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
                               37
                           ]):
        '''
        [Varification & Night eliminite]
        :param index_tuple:
        :return:
        '''
        # [band_num, rowsize]
        selected_band_num = len(band_select)
        kernel_size = 2 * kernel_radius + 1
        # Verify
        valid_flag = np.ones([index_tuple[0].shape[0]], dtype=bool)
        # Daytime mode
        try:  # open hdf
            f = HDF(self.filename, SDC.READ)
            vs = f.vstart()
            data_info_list = vs.vdatainfo()
            # Vset table
            # L1B swam matedata
            L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
            # Read [Swath/scan type]
            svath_matedata = L1B_Swath_Matedata_VD[:]
            for idx in range(valid_flag.shape[0]):
                if svath_matedata[int(
                    (index_tuple[0][idx]) / 10)][2] == 'D   ':
                    valid_flag[idx] = True
                else:
                    valid_flag[idx] = False
            L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
            vs.end()  # Use with, so __del__ can do this.
            f.close()
        except ValueError:
            print("Unexpected error:", sys.exc_info()[0])
            print('READ ERROR......(%d):' % self.filename)
            return 0, 0, False
        if np.sum(valid_flag) == 0:
            return None, valid_flag

        raw_data = np.empty([
            index_tuple[0].shape[0], selected_band_num, kernel_size,
            kernel_size
        ])
        # Create a Mat of kernel
        kernel_i = np.empty([kernel_size, kernel_size], dtype=np.int16)
        for i in range(kernel_size):
            for j in range(kernel_size):
                kernel_i[i, j] = i
        kernel_i -= kernel_radius
        kernel_i = np.reshape(kernel_i, kernel_i.size)
        kernel_j = np.transpose(kernel_i)
        kernel_j = np.reshape(kernel_j, kernel_j.size)

        for idx in range(index_tuple[0].shape[0]):
            if not valid_flag[idx]:
                continue
            temp_kernel_i = kernel_i + index_tuple[0][idx]
            if np.sum((temp_kernel_i < 0) | (temp_kernel_i >= self.width)) > 0:
                valid_flag[idx] = False
                continue
            temp_kernel_j = kernel_j + index_tuple[1][idx]
            if np.sum((temp_kernel_j < 0)
                      | (temp_kernel_j >= self.height)) > 0:
                valid_flag[idx] = False
                continue
            temp = (self.all_band[..., temp_kernel_i,
                                  temp_kernel_j])[band_select, ...]
            if np.sum(temp[..., int(kernel_size * kernel_size * 0.5)] > 32767):
                valid_flag[idx] = False
            else:
                indeces = temp > 32767
                temp[indeces] = 0
                raw_data[idx, ...] = temp.reshape(selected_band_num,
                                                  kernel_size, kernel_size)
        for idx in range(selected_band_num):
            raw_data[:, idx, :, :] = (
                raw_data[:, idx, :, :] -
                self.offset[band_select[idx]]) * self.scale[band_select[idx]]
        return raw_data, valid_flag
Beispiel #15
0
# Attach the vdata and the dataset.
vd = vs.attach('INVENTORY')
sds = sd.select('ARR_3x3')

# Create vgroup named 'TOTAL'.
vg = v.create('TOTAL')

# Add vdata to the vgroup
vg.insert(vd)
# We could also have written this:
# vgroup.add(vd._tag, vd._refnum)
# or this:
# vgroup.add(HC.DFTAG_VH, vd._refnum)

# Add dataset to the vgroup
vg.add(HC.DFTAG_NDG, sds.ref())

# Close vgroup, vdata and dataset.
vg.detach()  # vgroup
vd.detach()  # vdata
sds.endaccess()  # dataset

# Terminate V, VS and SD interfaces.
v.end()  # V interface
vs.end()  # VS interface
sd.end()  # SD interface

# Close HDF file.
hdf.close()
Beispiel #16
0
          print "unhandled tag,ref",tag,ref
    
    # Close vgroup
    vg.detach()

# Open HDF file in readonly mode.
filename = sys.argv[1]
hdf = HDF(filename)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename)
vs = hdf.vstart()
v  = hdf.vgstart()

# Scan all vgroups in the file.
ref = -1
while 1:
    try:
        ref = v.getid(ref)
    except HDF4Error,msg:    # no more vgroup
        break
    describevg(ref)

# Terminate V, VS and SD interfaces.
v.end()
vs.end()
sd.end()

# Close HDF file.
hdf.close()
Beispiel #17
0
def mosaic(*arg,**args):
	# This function will take files tranfered in *arg and will mosaic them together and produce a new file
	# mosaic(file1,[file2,file3,...],endfile)
	# If only file1 and endfile is given, the function will only produce a copy without any other modification
	try:
		log=args["log"]
	except KeyError:
		log=logMod.Log("",nolog=True)
	if len(arg)>2:
		lfile = arg[:-1]				# This is the list of the NAME of the files to merge
		newfilename = arg[-1]			# This is the final file NAME
		# Should eventually check if files exists and can be read ***IMPROVE***
		lfHDF = []						# This is the list of the FILES to merge
		latt = []							# This is the list of the ATTRIBUTE "StructMetadata.0" of the files
		for fil in lfile:
			try:
				a=SD(fil,SDC.READ)
			except TypeError:
				a=SD(fil.encode('ascii','ignore'),SDC.READ)
			lfHDF.append(a)
			#print("hoho")
			latt.append(atribute(lfHDF[-1].attributes()["StructMetadata.0"],fil,dsi=[0,]))
			
		
		
		## Listing all the GRIDS that the new file will have
		gridlist = []						# This is the list of GRIDS to include in the final file

		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			gridlist += attOfF.listgridname()[1]		# listgridname return a list of all the grids name

		# remove double entry
		gridlist = list(set(gridlist))


		## Listing all the DATASETS that the new file will have
		dslist = []						# This is the list of DATASETS to include in the final file
		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			dslist = attOfF.orderedDS()
			
		# remove double entry
		# dslist = list(set(dslist))


		
		## Validation of commoun information
###############################################################################
# Some informations have to be the same for each file or else no mosaic can   #
# be made for exemple two files with not the same projection type can't be    #
# merged together. ***IMPROVE*** Maybe in the future we could transform file  #
# so that they have the same projection or some other thing.                  #
###############################################################################

		# List of parameter to check to insure that they are the same
		paramMustSim = ["Projection","ProjParams","SphereCode"]
		# Dictionary that will keep all the informations about every file
		paramMustSimDict = {}

		for grid in gridlist:
			# Verification of a grid

			first = True			# Variable that will enable the construction of the dict
			paramdict = {}		# Dictionary that keep the actual value that have to be the same
			
			for attOfF in latt:
				# Verification of a file
				bigG = attOfF.getgridbyname(grid)		# Getting all the attributes in the grid of a file
				if bigG is not None:
					# If the grid exists in that file
					if first :
						# If this is the first time that a file is check for that grid
						first = False
						for p in paramMustSim:
							# Checking every parameters that must be the same
							paramdict[p] = bigG.variable[p]
							
						# Validation of same Dtype for each datafield
						go = bigG.GROUP["DataField"].OBJECT
						for r in go:
							paramdict[go[r].variable["DataFieldName"]]=go[r].variable["DataType"]
					else:
						# If it's not the first time that a file is check for that grid
						for p in paramMustSim:
							# Checking every parameters that must be the same
							if not paramdict[p]==bigG.variable[p]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
						# Validation of same Dtype for each datafield
						go=bigG.GROUP["DataField"].OBJECT
						for r in go:
							if not paramdict[go[r].variable["DataFieldName"]]==go[r].variable["DataType"]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
			# Keep all this info for later it's going to be useful
			paramMustSimDict[grid]=paramdict
				
				



		## Determination of new informations
###############################################################################
# Some properties have to be calculated in order to merge. This section is    #
# doing just that                                                             #
###############################################################################

		gridResolX={}			# Getting the RESOLUTION in the X direction for each grid
		gridResolY={}			# Getting the RESOLUTION in the Y direction for each grid
		extremeup={}			# Getting the UPPER coordinates for each grid
		extremedown={}			# Getting the LOWEST coordinates for each grid
		extremeleft={}			# Getting the LEFTMOST coordinates for each grid
		extremeright={}			# Getting the RIGHTMOST coordinates for each grid
		gridDimX={}				# Getting the DIMENSIONS of X direction for each grid
		gridDimY={}				# Getting the DIMENSIONS of Y direction for each grid
		NoValueDS={}			# Getting the fill value of each dataset
		dtypeDS={}				# Getting the DTYPE for each dataset
		dstogrid={}				# Knowing wich is the GRID for each dataset
		filGridULC={}			# Getting the upper left corner of each file for each grid

		for grid in gridlist:
			# For each grid
			filGridULC[grid]={}			# Adding a dictionary for each grid that will contain information on every file
			for attOfF in latt:
				### Determination of resolution of each grid
				# ***IMPROVE*** Should check if bigd is none
				bigG=attOfF.getgridbyname(grid)				# Getting all the attributes in the grid of a file
				
				# Get extreme grid point
				ulp=eval(bigG.variable["UpperLeftPointMtrs"])
				lrp=eval(bigG.variable["LowerRightMtrs"])
				
				# Get grid dimmension
				dimx=int(bigG.variable["XDim"])
				dimy=int(bigG.variable["YDim"])
				
				# Calculate grid resolution
				gridResolX[grid]=(lrp[0]-ulp[0])/dimx
				gridResolY[grid]=(ulp[1]-lrp[1])/dimy
				
				### Determination of new extreme coordinates for each grid
				# up
				try:
					if extremeup[grid]< ulp[1]:
						extremeup[grid]=ulp[1]
				except KeyError:
					extremeup[grid]=ulp[1]
				# down
				try:
					if extremedown[grid]> lrp[1]:
						extremedown[grid]=lrp[1]
				except KeyError:
					extremedown[grid]=lrp[1]
				# left
				try:
					if extremeleft[grid]> ulp[0]:
						extremeleft[grid]=ulp[0]
				except KeyError:
					extremeleft[grid]=ulp[0]
				# right
				try:
					if extremeright[grid]< lrp[0]:
						extremeright[grid]=lrp[0]
				except KeyError:
					extremeright[grid]=lrp[0]
				### Detetermination of dataset to grid name
				if bigG is not None:
					go=bigG.GROUP["DataField"].OBJECT
					for r in go:
						dstogrid[ go[r].variable["DataFieldName"] ] = grid
				## Determination of ULC for each grid in each file
				filGridULC[grid][attOfF.name] = ulp
			## determination of new dimension for each grid
			gridDimY[grid] = int((extremeup[grid]-extremedown[grid])/gridResolY[grid])
			gridDimX[grid] = int((extremeright[grid]-extremeleft[grid])/gridResolX[grid])

		for ds in dslist:
			# For each dataset
			for sd in lfHDF:
				# For each hdf file
				
				# Try opening dataset
				try:
					sds = sd.select(eval(ds))
					# Get fill value
					NoValueDS[ds] = sds.getfillvalue()
					# Get dtype
					dtypeDS[ds] = sds.info()[3]
				except:
					log.log('e',Nom,"no dataset")



		## Start creating new file
###############################################################################
# This is the actual part were stuf appens                                    #
###############################################################################

		# This part is the same for every file in any circumstances
		########## absolute ########################
		
		# Open new file
		try:
			hdf = HDF(newfilename, HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename, SDC.WRITE | SDC.CREATE )
		except TypeError:
			hdf = HDF(newfilename.encode('ascii','ignore'), HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename.encode('ascii','ignore'), SDC.WRITE | SDC.CREATE )
		
		v=hdf.vgstart()
		vg={}
		vg1={}
		vg2={}
		
		## rewrite the gridlist
		gridlist = []
		for ds in dslist:
			if dstogrid[ds] not in gridlist:
				gridlist.append(dstogrid[ds])
				
		for grid in gridlist:
			vg[grid]=v.attach(-1,write=1)
			vg[grid]._class="GRID"
			vg[grid]._name=eval(grid)
			vg1[grid]=v.attach(-1,write=1)
			vg2[grid]=v.attach(-1,write=1)
			vg1[grid]._class="GRID Vgroup"
			vg1[grid]._name="Data Fields"
			vg2[grid]._class="GRID Vgroup"
			vg2[grid]._name="Grid Attributes"
			vg[grid].insert(vg1[grid])
			vg[grid].insert(vg2[grid])
		########## absolute ########################


		# Create dataset with the right size
		for ds in dslist:
			theGrid=dstogrid[ds]
			# Get grid name of data set
			sds = sd.create(eval(ds),dtypeDS[ds],(gridDimY[theGrid],gridDimX[theGrid]))
			
			# Set fill value
			fv=NoValueDS[ds]
			try:
				sds.setfillvalue(NoValueDS[ds])
			except OverflowError:
				log.log('e',Nom,"setfillvalue")
				sds.setfillvalue(0)
			## write real data
			for fil in range(len(latt)):
				try:
					# Determine were the data will be writen
					ulc = filGridULC[theGrid][latt[fil].name]
					# Determine the position on the grid
					y = (extremeup[theGrid]-ulc[1])/(extremeup[theGrid]-extremedown[theGrid])
					x = (ulc[0]-extremeleft[theGrid])/(extremeright[theGrid]-extremeleft[theGrid])
					y = int(y*gridDimY[theGrid])
					x = int(x*gridDimX[theGrid])
					# read data from files
					osds = lfHDF[fil].select(eval(ds))
					sh = osds[:].shape
					sds[y:y+sh[0],x:x+sh[1]] = osds[:]
					osds.endaccess()
				except:
					pass
			# Close sds
			vg1[dstogrid[ds]].add(HC.DFTAG_NDG,sds.ref())
			sds.endaccess()


    
		for g in vg1:
			vg1[g].detach()
			vg2[g].detach()
			vg[g].detach()

		# Create attribute table for the file
		attstr="GROUP=GridStructure\n"
		gridcount=1
		for gr in gridlist:
			# Start group grid
			attstr+="\tGROUP=GRID_%i\n"%gridcount
			# Add grid name
			attstr+="\t\tGridName=%s\n"%gr
			# Add dimention
			attstr+="\t\tXDim=%i\n"%gridDimX[gr]
			attstr+="\t\tYDim=%i\n"%gridDimY[gr]
			# Add UpperLeftPointMtrs
			attstr+="\t\tUpperLeftPointMtrs=(%f,%f)\n"%(extremeleft[gr],extremeup[gr])
			# Add lrp
			attstr+="\t\tLowerRightMtrs=(%f,%f)\n"%(extremeright[gr],extremedown[gr])
			# Add projection
			attstr+="\t\tProjection=%s\n"%paramMustSimDict[gr]["Projection"]
			# ProjParams
			attstr+="\t\tProjParams=%s\n"%paramMustSimDict[gr]["ProjParams"]
			# SphereCode
			attstr+="\t\tSphereCode=%s\n"%paramMustSimDict[gr]["SphereCode"]

			
			attstr+="""\t\tGROUP=Dimension
		\t\tEND_GROUP=Dimension
		\t\tGROUP=DataField\n"""

			## Add data sets
			# create list of ds for current grid
			lsdsgr=[]
			dsnum=1
			for ds in dslist:
				if dstogrid[ds] == gr:
					# Add object
					attstr+="\t\t\tOBJECT=DataField_%i\n"%dsnum
					# datafield name
					attstr+="\t\t\t\tDataFieldName=%s\n"%ds
					# datatype
					attstr+="\t\t\t\tDataType=%s\n"%paramMustSimDict[gr][ds]
					# dim
					attstr+='\t\t\t\tDimList=("YDim","XDim")\n'
					attstr+="\t\t\tEND_OBJECT=DataField_%i\n"%dsnum
					dsnum+=1
			attstr+="\t\tEND_GROUP=DataField\n"
			attstr+="""\t\tGROUP=MergedFields
		\t\tEND_GROUP=MergedFields\n"""
			attstr+="\tEND_GROUP=GRID_%i\n"%gridcount
			gridcount+=1
		attstr+="""END_GROUP=GridStructure
		GROUP=PointStructure
		END_GROUP=PointStructure
		END"""
		# adding attribute to new file
		att=sd.attr('StructMetadata.0')
		att.set(SDC.CHAR,attstr)
		sd.end()
		hdf.close()
		
		# This should return something somehow
	elif len(arg)>1:
		afile = arg[0]				# This is the list of the NAME of the files to merge
		newfilename = arg[1]			# This is the final file NAME
		# Create a copy
		from shutil import copyfile
		copyfile(afile,newfilename)
Beispiel #18
0
#----------------------------------------------------------------------------------------#
# Read HDF Files (VD data) Latitude & Longitude

f = HDF(file_path + file_name, SDC.READ)
vs = f.vstart()

Latitude = vs.attach('Latitude')
Longitude = vs.attach('Longitude')

a = Latitude[:]

Latitude.detach()
Longitude.detach()
vs.end()
f.close()

#----------------------------------------------------------------------------------------#
# SDS Data

file = SD(file_path + file_name, SDC.READ)

file_info = file.info()
print(file_info)  # number of sds and metadata

print('---------- CloudLayerBase ----------')

sds_obj = file.select('CloudLayerBase')  # select sds

CloudLayerBase = sds_obj.get()
Beispiel #19
0
for fieldName in vd._fields:     # loop over all field names
    try:
        # instantiate field and obtain value of attribute 'unit'
        v = vd.field(fieldName).unit
        print("%s: %s" % (fieldName, v), end=' ')
    except:                      # no 'unit' attribute: ignore
        pass
print("")
print("")

# Display table header.
header = "%-7s %-12s %3s %4s %8s" % tuple(vd._fields)
print("-" * len(header))
print(header)
print("-" * len(header))

# Loop over the vdata records, displaying each record as a table row.
# Current record position is 0 after attaching the vdata.
while True:
    try:
        rec = vd.read()          # read next record
        # equivalent to:
     #  rec = vd[vd.tell()]
        print("%-7s %-12s %3d %4.1f %8.2f" % tuple(rec[0]))
    except HDF4Error:             # end of vdata reached
        break

vd.detach()               # "close" the vdata
vs.end()                  # terminate the vdata interface
f.close()                 # close the HDF file
Beispiel #20
0
def PFlux(year1, month1, day1):
    filename = external_dir + "swepam_data_1day.hdf"
    hdf = HDF(filename)

    #Initialize the V interface on the HDF file.
    v = hdf.vgstart()
    vs = hdf.vstart()
    #Scan all vgroups in the file
    ref = -1
    refnum = v.getid(ref)
    vg = v.attach(refnum)
    #print "----------------"
    #print "name:", vg._name, "class:",vg._class, "tag,ref:",
    #print vg._tag, vg._refnum
    # Show the number of members of each main object type.
    #print "members: ", vg._nmembers,
    #print "datasets:", vg.nrefs(HC.DFTAG_NDG),
    #print "vdataset:  ", vg.nrefs(HC.DFTAG_VH),
    #print "vgroups: ", vg.nrefs(HC.DFTAG_VG)

    # Read the contents of the vgroup.
    members = vg.tagrefs()
    # Display info about each member.
    index = -1
    for tag, ref in members:
        index += 1
        #    print "member index", index
        # Vdata tag
        if tag == HC.DFTAG_VH:
            vd = vs.attach(ref)
            nrecs, intmode, fields, size, name = vd.inquire()
            #      print "  vdata:",name, "tag,ref:",tag, ref
            #      print "    fields:",fields
            #      print "    nrecs:",nrecs
            vd.detach()
    # SDS tag
        elif tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            name, rank, dims, type, nattrs = sds.info()
            #      print "  dataset:",name, "tag,ref:", tag, ref
            #      print "    dims:",dims
            #      print "    type:",type
            sds.endaccess()

    # VS tag
        elif tag == HC.DFTAG_VG:
            vg0 = v.attach(ref)
            #      print "  vgroup:", vg0._name, "tag,ref:", tag, ref
            vg0.detach()

    # Unhandled tag
        else:
            print("unhandled tag,ref", tag, ref)

    # Close vgroup

    members = vg.tagrefs()
    (tag, ref) in members
    vd = vs.attach(ref)
    nrecs, intmode, fields, size, name = vd.inquire()
    alldata = vd.read(nrecs)
    vd.detach()
    vg.detach()
    v.end()
    hdf.close()

    data = np.array(alldata)

    # input
    #  (y,m,d) = (1998,3,1)
    year = data[:, 0]
    day = data[:, 1]
    hr = data[:, 2]
    min = data[:, 3]
    sec = data[:, 4]
    fp_year = data[:, 5]
    fp_doy = data[:, 6]
    pdensity = data[:, 8]
    speed = data[:, 11]

    start = datetime.date(int(year[0]), 1,
                          1) + datetime.timedelta(int(day[0]) - 1)
    end = datetime.date(int(year[-1]), 1,
                        1) + datetime.timedelta(int(day[-1]) - 1)
    #  print "star Date and End Date:", start, end

    delta1 = datetime.date(year1, month1, day1) - start
    index = delta1.days
    #  print "index ....", index
    if index >= (nrecs - 1):
        print("ERROR: the input time is too new")
#  break
    elif index < 0:
        print("ERROR: the input time is too old")
#  break
    else:
        avePF = 0
        num = 0
        for i in range(0, 90):
            j = index - i
            if j < 0:
                print("ERROR: the index is out of the array")
                break
            else:
                if pdensity[j] > 0 and speed[j] > 0:
                    avePF = avePF + pdensity[j] * speed[j]
                    num = num + 1
        avePF = avePF / num


#    print "the 90 days average proton flux is:",avePF, num
    return avePF
Beispiel #21
0
def genCloudSatFigure(filepath, data_field, title, label, plot_type, cmap):
    """Ingest CloudSat overpasses (HDF) and plot
    parameters of interest like snowfall rates as
    observed by the CPR.
    """

    # Load and unpack HDF
    f = HDF(filepath) 
    vs = f.vstart() 
    Latitude = vs.attach('Latitude')
    Longitude = vs.attach('Longitude')
    Time = vs.attach('Profile_time')
    UTC = vs.attach('UTC_start')
    
    if plot_type == PlotType.SFCS:
        snowfall_rate_sfc = vs.attach('snowfall_rate_sfc')
        c = snowfall_rate_sfc[:]
        c = flatten(c)
        snowfall_rate_sfc.detach()
    
    if plot_type == PlotType.TEMP:
        EC_Height = vs.attach('EC_height')
        b = EC_Height[:]
        b = flatten(b)
        EC_Height.detach()

    a = Time[:]
    a = flatten(a)
    d = Longitude[:]
    d = flatten(d)
    utc_start = UTC[0][0]
    
    Latitude.detach() # "close" the vdata
    Longitude.detach() # "close" the vdata
    Time.detach() # "close" the vdata
    UTC.detach() # "close" the vdata
    vs.end() # terminate the vdata interface
    f.close() 

    #---------- Read HDF Files ----------#

    cpr_2b_geoprof = SD(filepath, SDC.READ)
    offset = 28000 # Position along granule
    span = 1000 # Plot length

    if plot_type != PlotType.SFCS:
        if plot_type == PlotType.REFL or plot_type == PlotType.MASK or plot_type == PlotType.SNOW:
            cpr_2b_geoprof_height = cpr_2b_geoprof.select('Height')
            cpr_2b_geoprof_height_data = cpr_2b_geoprof_height.get()

        cpr_2b_geoprof_radar_reflectivity = cpr_2b_geoprof.select(data_field)
        cpr_2b_geoprof_radar_reflectivity_data = cpr_2b_geoprof_radar_reflectivity.get()

        if plot_type == PlotType.TEMP or plot_type == PlotType.SNOW:
            cpr_2b_geoprof_radar_reflectivity_data[cpr_2b_geoprof_radar_reflectivity_data < 0] = math.nan
        elif plot_type == PlotType.POWR:
            cpr_2b_geoprof_radar_reflectivity_data[cpr_2b_geoprof_radar_reflectivity_data < 0] = math.nan

        fillvalue = 15360
        missing = -8888

        img = np.zeros((span,125))

        if plot_type == PlotType.REFL:
            img.fill(-30)

        factor = 1
        if plot_type == PlotType.REFL:
            factor = 0.01

        for i in np.arange(span):
            for j in np.arange(125):
                if plot_type == PlotType.TEMP:
                    k = int( 125 * (b[j] + 5000) / 35000 )
                else:
                    k = int( 125 * (cpr_2b_geoprof_height_data[i+offset,j] + 5000) / 35000 )

                if cpr_2b_geoprof_radar_reflectivity_data[i+offset,j] > -3000 and \
                    cpr_2b_geoprof_radar_reflectivity_data[i+offset,j] < 2100:
                    img[i,k] = cpr_2b_geoprof_radar_reflectivity_data[i+offset,j] * factor

        # Begin plotting granule
        fig = plt.figure(figsize=(18, 6))
        ax = plt.subplot(111)
        im = ax.imshow(img.T, interpolation='bilinear', cmap=cmap, origin='lower', extent=[0,200,-10,60])

        plt.title(title)
        plt.ylabel('Height (km)')
        plt.xlabel('Time')
        plt.ylim(0,20)
        pylab.yticks([0,5,10,15,20],[0,5,10,15,20])
        position_tick_labels = [str(round(a[offset]+utc_start, 3)), str(round(a[offset+200]+utc_start, 3)), str(round(a[offset+400]+utc_start, 3)), str(round(a[offset+600]+utc_start, 3)), str(round(a[offset+800]+utc_start, 3)), str(round(a[offset+1000]+utc_start, 3))]
        pylab.xticks([0,40,80,120,160, 200], position_tick_labels)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="3%", pad=0.10)

        if plot_type == PlotType.MASK:
            plt.colorbar(im, cax=cax, boundaries=[-10,0,10,20,30,40], ticks=[-10,0,10,20,30,40], label=label)
        elif plot_type == PlotType.TEMP:
            plt.colorbar(im, cax=cax, label=label, boundaries=[200, 220, 240, 260, 280], ticks=[200, 220, 240, 260, 280])
        else:
            plt.colorbar(im, cax=cax, label=label)

        plt.savefig("cloudsat_radar_reflectivity.png")
        plt.show()
    else:
        fig = plt.figure(figsize=(15.5, 2.4))
        c = c[offset:offset+span]
        index = np.arange(len(c))
        plt.plot(index, c, color="#6699ff")
        plt.fill(index, c, color="#6699ff")
        plt.grid()
        plt.title("CloudSat Surface Snowfall")
        plt.xlabel("Time")
        plt.ylabel("Rate (mm / hr)")
        plt.ylim(0,3)
        plt.xlim(0,span)
        
        position_tick_labels = [str(round(a[offset]+utc_start, 3)), str(round(a[offset+200]+utc_start, 3)), str(round(a[offset+400]+utc_start, 3)), str(round(a[offset+600]+utc_start, 3)), str(round(a[offset+800]+utc_start, 3)), str(round(a[offset+1000]+utc_start, 3))]
        pylab.xticks([0,200,400,600,800,1000], position_tick_labels)
        
        plt.show()
Beispiel #22
0
def compress(*arg, **karg):
    # This function compress the file with a given compress parameter
    # compress(file,comression parameter)
    oldfile = arg[0]
    newfilename = arg[0] + "t"

    try:
        log = karg["log"]
    except KeyError:
        log = logMod.Log("", nolog=True)

    keepall = True

    # Should eventually check if files exists and can be read ***IMPROVE***
    try:
        HDFF = SD(oldfile, SDC.READ)  # This is the list of the FILES to merge
    except TypeError:
        HDFF = SD(oldfile.encode('ascii', 'ignore'),
                  SDC.READ)  # This is the list of the FILES to merge
    # This is the list of the ATTRIBUTE "StructMetadata.0" of the files
    attOfF = atribute(HDFF.attributes()["StructMetadata.0"],
                      oldfile,
                      dsi=[
                          0,
                      ])

    ## Listing all the GRIDS that the new file will have

    gridlist = attOfF.listgridname()[
        1]  # listgridname return a list of all the grids name

    ## Listing all the DATASETS that the new file will have

    # Should check if any grid ***IMPROVE***
    dslist = attOfF.orderedDS()

    ## Validation of commoun information
    ###############################################################################
    # Some informations have to be the same for each file or else no mosaic can   #
    # be made for exemple two files with not the same projection type can't be    #
    # merged together. ***IMPROVE*** Maybe in the future we could transform file  #
    # so that they have the same projection or some other thing.                  #
    ###############################################################################

    # List of parameter to check to insure that they are the same
    paramMustSim = ["Projection", "ProjParams", "SphereCode"]
    # Dictionary that will keep all the informations about every file
    paramMustSimDict = {}

    for grid in gridlist:
        # Verification of a grid

        paramdict = {
        }  # Dictionary that keep the actual value that have to be the same

        bigG = attOfF.getgridbyname(
            grid)  # Getting all the attributes in the grid of a file
        if bigG is not None:
            # If the grid exists in that file

            for p in paramMustSim:
                # Checking every parameters that must be the same
                paramdict[p] = bigG.variable[p]

            # Validation of same Dtype for each datafield
            go = bigG.GROUP["DataField"].OBJECT
            for r in go:
                paramdict[go[r].variable["DataFieldName"]] = go[r].variable[
                    "DataType"]

        # Keep all this info for later it's going to be useful
        paramMustSimDict[grid] = paramdict

# LAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

## Determination of new informations
###############################################################################
# Some properties have to be calculated in order to merge. This section is    #
# doing just that                                                             #
###############################################################################

    gridResolX = {}  # Getting the RESOLUTION in the X direction for each grid
    gridResolY = {}  # Getting the RESOLUTION in the Y direction for each grid
    extremeup = {}  # Getting the UPPER coordinates for each grid
    extremedown = {}  # Getting the LOWEST coordinates for each grid
    extremeleft = {}  # Getting the LEFTMOST coordinates for each grid
    extremeright = {}  # Getting the RIGHTMOST coordinates for each grid
    gridDimX = {}  # Getting the DIMENSIONS of X direction for each grid
    gridDimY = {}  # Getting the DIMENSIONS of Y direction for each grid
    NoValueDS = {}  # Getting the fill value of each dataset
    dtypeDS = {}  # Getting the DTYPE for each dataset
    dstogrid = {}  # Knowing wich is the GRID for each dataset
    filGridULC = {}  # Getting the upper left corner of each file for each grid

    for grid in gridlist:
        # For each grid
        filGridULC[grid] = {
        }  # Adding a dictionary for each grid that will contain information on every file

        ### Determination of resolution of each grid
        # ***IMPROVE*** Should check if bigd is none

        bigG = attOfF.getgridbyname(
            grid)  # Getting all the attributes in the grid of a file

        # Get extreme grid point
        ulp = eval(bigG.variable["UpperLeftPointMtrs"])
        lrp = eval(bigG.variable["LowerRightMtrs"])

        # Get grid dimmension (PIXEL)
        dimx = int(bigG.variable["XDim"])
        dimy = int(bigG.variable["YDim"])

        # Calculate grid resolution
        gridResolX[grid] = (lrp[0] - ulp[0]) / dimx
        gridResolY[grid] = (ulp[1] - lrp[1]) / dimy

        ### Determination of new extreme coordinates for each grid
        # up
        extremeup[grid] = ulp[1]
        # down
        extremedown[grid] = lrp[1]
        # left
        extremeleft[grid] = ulp[0]
        # right
        extremeright[grid] = lrp[0]
        #print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",int(LOC - UPC),int(RMC - LMC),"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")

        ### Detetermination of dataset to grid name
        if bigG is not None:
            go = bigG.GROUP["DataField"].OBJECT
            for r in go:
                dstogrid[go[r].variable["DataFieldName"]] = grid

        ### Determination of new extreme coordinates for each grid

        ## Determination of ULC for each grid in each file
        filGridULC[grid][attOfF.name] = ulp
        ## determination of new dimension for each grid (pixel)
        gridDimY[grid] = int(dimy)
        gridDimX[grid] = int(dimx)

    for ds in dslist:
        # For each dataset

        # For each hdf file

        # Try opening dataset
        try:
            sds = HDFF.select(eval(ds))
            # Get fill value
            NoValueDS[ds] = sds.getfillvalue()
            # Get dtype
            dtypeDS[ds] = sds.info()[3]
        except:
            log.log('e', Nom, "no dataset")

    ## Start creating new file


###############################################################################
# This is the actual part were stuf appens                                    #
###############################################################################

# This part is the same for every file in any circumstances
########## absolute ########################

# Open new file
    try:
        hdf = HDF(newfilename, HC.WRITE | HC.CREATE | HC.TRUNC)
        sd = SD(newfilename, SDC.WRITE | SDC.CREATE)
    except TypeError:
        hdf = HDF(newfilename.encode('ascii', 'ignore'),
                  HC.WRITE | HC.CREATE | HC.TRUNC)
        sd = SD(newfilename.encode('ascii', 'ignore'), SDC.WRITE | SDC.CREATE)
    v = hdf.vgstart()
    vg = {}
    vg1 = {}
    vg2 = {}

    ## rewrite the gridlist
    gridlist = []
    for ds in dslist:
        if dstogrid[ds] not in gridlist:
            gridlist.append(dstogrid[ds])

    for grid in gridlist:
        vg[grid] = v.attach(-1, write=1)
        vg[grid]._class = "GRID"
        vg[grid]._name = eval(grid)
        vg1[grid] = v.attach(-1, write=1)
        vg2[grid] = v.attach(-1, write=1)
        vg1[grid]._class = "GRID Vgroup"
        vg1[grid]._name = "Data Fields"
        vg2[grid]._class = "GRID Vgroup"
        vg2[grid]._name = "Grid Attributes"
        vg[grid].insert(vg1[grid])
        vg[grid].insert(vg2[grid])
    ########## absolute ########################

    # Create dataset with the right size
    for ds in dslist:
        theGrid = dstogrid[ds]
        # Get grid name of data set
        # Make sure that the set is asked to be there
        sds = sd.create(eval(ds), dtypeDS[ds],
                        (gridDimY[theGrid], gridDimX[theGrid]))

        # Set fill value
        fv = NoValueDS[ds]

        try:
            sds.setfillvalue(NoValueDS[ds])
        except OverflowError:
            log.log('e', Nom, "dataset fillvaluet")
            sds.setfillvalue(0)
        # Set compression
        try:
            sds.setcompress(*arg[1])  # args depend on compression type
        except HDF4Error as msg:
            log.log('e', Nom, "Error compressing the dataset")
            sds_id.endaccess()
            sd_id.end()
            return
        ## write real data

        try:
            # read data from files
            osds = HDFF.select(eval(ds))
            # And write it in the new sds
            sds[:, :] = osds[:, :]

            osds.endaccess()
        except:
            pass
        # Close sds
        vg1[dstogrid[ds]].add(HC.DFTAG_NDG, sds.ref())
        sds.endaccess()

    for g in vg1:
        vg1[g].detach()
        vg2[g].detach()
        vg[g].detach()

    # Create attribute table for the file
    attstr = "GROUP=GridStructure\n"
    gridcount = 1
    for gr in gridlist:
        # Start group grid
        attstr += "\tGROUP=GRID_%i\n" % gridcount
        # Add grid name
        attstr += "\t\tGridName=%s\n" % gr
        # Add dimention
        attstr += "\t\tXDim=%i\n" % gridDimX[gr]
        attstr += "\t\tYDim=%i\n" % gridDimY[gr]
        # Add UpperLeftPointMtrs
        attstr += "\t\tUpperLeftPointMtrs=(%f,%f)\n" % (extremeleft[gr],
                                                        extremeup[gr])
        # Add lrp
        attstr += "\t\tLowerRightMtrs=(%f,%f)\n" % (extremeright[gr],
                                                    extremedown[gr])
        # Add projection
        attstr += "\t\tProjection=%s\n" % paramMustSimDict[gr]["Projection"]
        # ProjParams
        attstr += "\t\tProjParams=%s\n" % paramMustSimDict[gr]["ProjParams"]
        # SphereCode
        attstr += "\t\tSphereCode=%s\n" % paramMustSimDict[gr]["SphereCode"]

        attstr += """\t\tGROUP=Dimension
	\t\tEND_GROUP=Dimension
	\t\tGROUP=DataField\n"""

        ## Add data sets
        # create list of ds for current grid
        lsdsgr = []
        dsnum = 1
        for ds in dslist:
            if dstogrid[ds] == gr:
                # Add object
                attstr += "\t\t\tOBJECT=DataField_%i\n" % dsnum
                # datafield name
                attstr += "\t\t\t\tDataFieldName=%s\n" % ds
                # datatype
                attstr += "\t\t\t\tDataType=%s\n" % paramMustSimDict[gr][ds]
                # dim
                attstr += '\t\t\t\tDimList=("YDim","XDim")\n'
                attstr += "\t\t\tEND_OBJECT=DataField_%i\n" % dsnum
                dsnum += 1
        attstr += "\t\tEND_GROUP=DataField\n"
        attstr += """\t\tGROUP=MergedFields
	\t\tEND_GROUP=MergedFields\n"""
        attstr += "\tEND_GROUP=GRID_%i\n" % gridcount
        gridcount += 1
    attstr += """END_GROUP=GridStructure
	GROUP=PointStructure
	END_GROUP=PointStructure
	END"""
    # adding attribute to new file
    att = sd.attr('StructMetadata.0')
    att.set(SDC.CHAR, attstr)
    sd.end()
    hdf.close()

    # delete old file
    os.remove(oldfile)
    # rename new file to old file name
    os.rename(newfilename, oldfile)