Exemple #1
0
    def hdf4lookup(self, path, swath):
        hdf = HDF(path)
        sd = SD(path)
        vs = hdf.vstart()
        v = hdf.vgstart()

        vg = v.attach(swath)
        vg_members = vg.tagrefs()
        vg0_members = {}
        for tag, ref in vg_members:
            vg0 = v.attach(ref)
            if tag == HC.DFTAG_VG:
                vg0_members[vg0._name] = vg0.tagrefs()
            vg0.detach
        vg.detach

        lookup_dict = {}
        for key in vg0_members.keys():
            for tag, ref in vg0_members[key]:
                if tag == HC.DFTAG_NDG:
                    # f = open(swath + '.txt', 'a'); f.writelines('#' + key + '#' + '\n'); f.close()
                    sds = sd.select(sd.reftoindex(ref))
                    name = sds.info()[0]
                    lookup_dict[name] = [tag, ref]
                    sds.endaccess()
                elif tag == HC.DFTAG_VH:
                    vd = vs.attach(ref)
                    nrecs, intmode, fields, size, name = vd.inquire()
                    lookup_dict[name] = [tag, ref]
        v.end()
        vs.end()
        sd.end()
        return lookup_dict
def h4lookup(path, swath = "Earth UV-2 Swath"):
    '''
    only look-up datasets, ignore vdata and
    "WavelengthReferenceColumn" is that.
    '''
    hdf = HDF(path)
    v = hdf.vgstart()
    s2_vg = v.attach(swath)
    geo_tag, geo_ref = s2_vg.tagrefs()[0]
    dat_tag, dat_ref = s2_vg.tagrefs()[1]
    s2_vg.detach()
    #--------------------------------------------
    # found geoloaction & data fields
    #--------------------------------------------
    geo_vgs = v.attach(geo_ref); dat_vgs = v.attach(dat_ref)
    gvg_tagrefs = geo_vgs.tagrefs(); dvg_tagrefs = dat_vgs.tagrefs()
    geo_vgs.detach(); dat_vgs.detach()
    tagrefs_list = gvg_tagrefs + dvg_tagrefs
    refs_dict = {}
    #--------------------------------------------
    # create dict in which keys are names in hdf and values are refs
    #--------------------------------------------
    sd = SD(path)
    for tr in tagrefs_list:
        tag, ref = tr
        if tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            refs_dict[sds.info()[0]] = ref
    sds.endaccess(); sd.end(); v.end(); hdf.close()
    return refs_dict
Exemple #3
0
def proc(indir, outdir, inname, outname):
    path = indir + "/" + inname
    hdf = HDF(path)
    sd = SD(path)
    vs = hdf.vstart()
    v = hdf.vgstart()
    mod_vg = v.attach("MOD_Grid_monthly_CMG_VI")
    vg_members = mod_vg.tagrefs()
    # print vg_members
    mod_vg = v.attach("MOD_Grid_monthly_CMG_VI")
    tag, ref = mod_vg.tagrefs()[0]
    # print tag, ref
    vg0 = v.attach(ref)
    # print vg0._name
    tagrefs = vg0.tagrefs()
    # print tagrefs
    for tag, ref in tagrefs:
        if tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            name = sds.info()[0]
            # print name
            if name == "CMG 0.05 Deg Monthly NDVI":
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                ndvi = np.float64(sds.get())
                sds.endaccess()
            elif name == "CMG 0.05 Deg Monthly EVI":
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                evi = np.float64(sds.get())
                sds.endaccess()
    sd.end()
    v.end()

    data = ndvi
    name = outdir + "/" + outname + ".tif"
    cols = 7200
    rows = 3600
    originX = -180.0
    originY = 90.0
    pixelWidth = 0.05
    pixelHeight = -0.05

    driver = gdal.GetDriverByName('GTiff')
    newRasterfn = name
    outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
    outRaster.SetGeoTransform(
        (originX, pixelWidth, 0, originY, 0, pixelHeight))
    outband = outRaster.GetRasterBand(1)
    outband.WriteArray(data)
    outRasterSRS = osr.SpatialReference()
    outRasterSRS.ImportFromEPSG(4326)
    outRaster.SetProjection(outRasterSRS.ExportToWkt())
    outband.FlushCache()
def read_data_from_hdf(inputfile):
    wavelengths = []
    depth = []
    downwelling_downcast = []
    downwelling_upcast = []
    upwelling_downcast = []
    upwelling_upcast = []

    # open the hdf file read-only
    # Initialize the SD, V and VS interfaces on the file.
    hdf = HDF(inputfile)
    vs = hdf.vstart()
    v  = hdf.vgstart()

    # Attach and read the contents of the Profiler vgroup

    vg = v.attach(v.find('Profiler'))

    for tag, ref in vg.tagrefs():
        assert(tag == HC.DFTAG_VH)
        vd = vs.attach(ref)
        nrecs, intmode, fields, size, name = vd.inquire()

        if name == "ED_hyperspectral_downcast":
            x = vd.read(nrecs)
            downwelling_downcast = np.asarray([i[3:] for i in x])
            wavelengths = np.asarray([float(x) for x in fields[3:]])
            depth = np.asarray([i[2] for i in x])
        elif name == "ED_hyperspectral_upcast":
            downwelling_upcast = np.asarray([i[3:] for i in vd.read(nrecs)])
        elif name == "LU_hyperspectral_downcast":
            upwelling_downcast = np.asarray([i[3:] for i in vd.read(nrecs)])
        elif name == "LU_hyperspectral_upcast":
            upwelling_upcast = np.asarray([i[3:] for i in vd.read(nrecs)])

        vd.detach()

    # Close vgroup
    vg.detach()

    #clean up
    v.end()
    vs.end()
    hdf.close()

    return wavelengths, depth, downwelling_downcast, downwelling_upcast, \
            upwelling_downcast, upwelling_upcast
def HDFread(filename, variable, Class=None):
    """
    Extract the data for non scientific data in V mode of hdf file
    """
    hdf = HDF(filename, HC.READ)

    # Initialize the SD, V and VS interfaces on the file.
    sd = SD(filename)
    vs = hdf.vstart()
    v = hdf.vgstart()

    # Encontrar el puto id de las Geolocation Fields
    if Class == None:
        ref = v.findclass('SWATH Vgroup')
    else:
        ref = v.findclass(Class)

    # Open all data of the class
    vg = v.attach(ref)
    # All fields in the class
    members = vg.tagrefs()

    nrecs = []
    names = []
    for tag, ref in members:
        # Vdata tag
        vd = vs.attach(ref)
        # nrecs, intmode, fields, size, name = vd.inquire()
        nrecs.append(vd.inquire()[0])  # number of records of the Vdata
        names.append(vd.inquire()[-1])  # name of the Vdata
        vd.detach()

    idx = names.index(variable)
    var = vs.attach(members[idx][1])
    V = var.read(nrecs[idx])
    var.detach()
    # Terminate V, VS and SD interfaces.
    v.end()
    vs.end()
    sd.end()
    # Close HDF file.
    hdf.close()

    return np.array(V).ravel()
Exemple #6
0
    def showHDFinfo(self):

        if len(self.dirLst) > 1: drs = self.dirLst[0]
        else: drs = self.dirLst

        hdf = HDF(drs, HC.READ)
        sd = SD(drs)

        vs = hdf.vstart()
        v = hdf.vgstart()

        # Scan all vgroups in the file.
        ref = -1
        while 1:
            try:
                ref = v.getid(ref)
                describevg(ref, v, vs)

            except HDF4Error, msg:  # no more vgroup
                break
Exemple #7
0
    # Create a simple 3x3 float array.
    sds = sd.create(name, SDC.FLOAT32, (3, 3))
    # Initialize array
    sds[:] = ((0, 1, 2), (3, 4, 5), (6, 7, 8))
    # "close" dataset.
    sds.endaccess()


# Create HDF file
filename = 'inventory.hdf'
hdf = HDF(filename, HC.WRITE | HC.CREATE)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename, SDC.WRITE)  # SD interface
vs = hdf.vstart()  # vdata interface
v = hdf.vgstart()  # vgroup interface

# Create vdata named 'INVENTORY'.
vdatacreate(vs, 'INVENTORY')
# Create dataset named "ARR_3x3"
sdscreate(sd, 'ARR_3x3')

# Attach the vdata and the dataset.
vd = vs.attach('INVENTORY')
sds = sd.select('ARR_3x3')

# Create vgroup named 'TOTAL'.
vg = v.create('TOTAL')

# Add vdata to the vgroup
vg.insert(vd)
def mosaic(*arg,**args):
	# This function will take files tranfered in *arg and will mosaic them together and produce a new file
	# mosaic(file1,[file2,file3,...],endfile)
	# If only file1 and endfile is given, the function will only produce a copy without any other modification
	try:
		log=args["log"]
	except KeyError:
		log=logMod.Log("",nolog=True)
	if len(arg)>2:
		lfile = arg[:-1]				# This is the list of the NAME of the files to merge
		newfilename = arg[-1]			# This is the final file NAME
		# Should eventually check if files exists and can be read ***IMPROVE***
		lfHDF = []						# This is the list of the FILES to merge
		latt = []							# This is the list of the ATTRIBUTE "StructMetadata.0" of the files
		for fil in lfile:
			try:
				a=SD(fil,SDC.READ)
			except TypeError:
				a=SD(fil.encode('ascii','ignore'),SDC.READ)
			lfHDF.append(a)
			#print("hoho")
			latt.append(atribute(lfHDF[-1].attributes()["StructMetadata.0"],fil,dsi=[0,]))
			
		
		
		## Listing all the GRIDS that the new file will have
		gridlist = []						# This is the list of GRIDS to include in the final file

		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			gridlist += attOfF.listgridname()[1]		# listgridname return a list of all the grids name

		# remove double entry
		gridlist = list(set(gridlist))


		## Listing all the DATASETS that the new file will have
		dslist = []						# This is the list of DATASETS to include in the final file
		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			dslist = attOfF.orderedDS()
			
		# remove double entry
		# dslist = list(set(dslist))


		
		## Validation of commoun information
###############################################################################
# Some informations have to be the same for each file or else no mosaic can   #
# be made for exemple two files with not the same projection type can't be    #
# merged together. ***IMPROVE*** Maybe in the future we could transform file  #
# so that they have the same projection or some other thing.                  #
###############################################################################

		# List of parameter to check to insure that they are the same
		paramMustSim = ["Projection","ProjParams","SphereCode"]
		# Dictionary that will keep all the informations about every file
		paramMustSimDict = {}

		for grid in gridlist:
			# Verification of a grid

			first = True			# Variable that will enable the construction of the dict
			paramdict = {}		# Dictionary that keep the actual value that have to be the same
			
			for attOfF in latt:
				# Verification of a file
				bigG = attOfF.getgridbyname(grid)		# Getting all the attributes in the grid of a file
				if bigG is not None:
					# If the grid exists in that file
					if first :
						# If this is the first time that a file is check for that grid
						first = False
						for p in paramMustSim:
							# Checking every parameters that must be the same
							paramdict[p] = bigG.variable[p]
							
						# Validation of same Dtype for each datafield
						go = bigG.GROUP["DataField"].OBJECT
						for r in go:
							paramdict[go[r].variable["DataFieldName"]]=go[r].variable["DataType"]
					else:
						# If it's not the first time that a file is check for that grid
						for p in paramMustSim:
							# Checking every parameters that must be the same
							if not paramdict[p]==bigG.variable[p]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
						# Validation of same Dtype for each datafield
						go=bigG.GROUP["DataField"].OBJECT
						for r in go:
							if not paramdict[go[r].variable["DataFieldName"]]==go[r].variable["DataType"]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
			# Keep all this info for later it's going to be useful
			paramMustSimDict[grid]=paramdict
				
				



		## Determination of new informations
###############################################################################
# Some properties have to be calculated in order to merge. This section is    #
# doing just that                                                             #
###############################################################################

		gridResolX={}			# Getting the RESOLUTION in the X direction for each grid
		gridResolY={}			# Getting the RESOLUTION in the Y direction for each grid
		extremeup={}			# Getting the UPPER coordinates for each grid
		extremedown={}			# Getting the LOWEST coordinates for each grid
		extremeleft={}			# Getting the LEFTMOST coordinates for each grid
		extremeright={}			# Getting the RIGHTMOST coordinates for each grid
		gridDimX={}				# Getting the DIMENSIONS of X direction for each grid
		gridDimY={}				# Getting the DIMENSIONS of Y direction for each grid
		NoValueDS={}			# Getting the fill value of each dataset
		dtypeDS={}				# Getting the DTYPE for each dataset
		dstogrid={}				# Knowing wich is the GRID for each dataset
		filGridULC={}			# Getting the upper left corner of each file for each grid

		for grid in gridlist:
			# For each grid
			filGridULC[grid]={}			# Adding a dictionary for each grid that will contain information on every file
			for attOfF in latt:
				### Determination of resolution of each grid
				# ***IMPROVE*** Should check if bigd is none
				bigG=attOfF.getgridbyname(grid)				# Getting all the attributes in the grid of a file
				
				# Get extreme grid point
				ulp=eval(bigG.variable["UpperLeftPointMtrs"])
				lrp=eval(bigG.variable["LowerRightMtrs"])
				
				# Get grid dimmension
				dimx=int(bigG.variable["XDim"])
				dimy=int(bigG.variable["YDim"])
				
				# Calculate grid resolution
				gridResolX[grid]=(lrp[0]-ulp[0])/dimx
				gridResolY[grid]=(ulp[1]-lrp[1])/dimy
				
				### Determination of new extreme coordinates for each grid
				# up
				try:
					if extremeup[grid]< ulp[1]:
						extremeup[grid]=ulp[1]
				except KeyError:
					extremeup[grid]=ulp[1]
				# down
				try:
					if extremedown[grid]> lrp[1]:
						extremedown[grid]=lrp[1]
				except KeyError:
					extremedown[grid]=lrp[1]
				# left
				try:
					if extremeleft[grid]> ulp[0]:
						extremeleft[grid]=ulp[0]
				except KeyError:
					extremeleft[grid]=ulp[0]
				# right
				try:
					if extremeright[grid]< lrp[0]:
						extremeright[grid]=lrp[0]
				except KeyError:
					extremeright[grid]=lrp[0]
				### Detetermination of dataset to grid name
				if bigG is not None:
					go=bigG.GROUP["DataField"].OBJECT
					for r in go:
						dstogrid[ go[r].variable["DataFieldName"] ] = grid
				## Determination of ULC for each grid in each file
				filGridULC[grid][attOfF.name] = ulp
			## determination of new dimension for each grid
			gridDimY[grid] = int((extremeup[grid]-extremedown[grid])/gridResolY[grid])
			gridDimX[grid] = int((extremeright[grid]-extremeleft[grid])/gridResolX[grid])

		for ds in dslist:
			# For each dataset
			for sd in lfHDF:
				# For each hdf file
				
				# Try opening dataset
				try:
					sds = sd.select(eval(ds))
					# Get fill value
					NoValueDS[ds] = sds.getfillvalue()
					# Get dtype
					dtypeDS[ds] = sds.info()[3]
				except:
					log.log('e',Nom,"no dataset")



		## Start creating new file
###############################################################################
# This is the actual part were stuf appens                                    #
###############################################################################

		# This part is the same for every file in any circumstances
		########## absolute ########################
		
		# Open new file
		try:
			hdf = HDF(newfilename, HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename, SDC.WRITE | SDC.CREATE )
		except TypeError:
			hdf = HDF(newfilename.encode('ascii','ignore'), HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename.encode('ascii','ignore'), SDC.WRITE | SDC.CREATE )
		
		v=hdf.vgstart()
		vg={}
		vg1={}
		vg2={}
		
		## rewrite the gridlist
		gridlist = []
		for ds in dslist:
			if dstogrid[ds] not in gridlist:
				gridlist.append(dstogrid[ds])
				
		for grid in gridlist:
			vg[grid]=v.attach(-1,write=1)
			vg[grid]._class="GRID"
			vg[grid]._name=eval(grid)
			vg1[grid]=v.attach(-1,write=1)
			vg2[grid]=v.attach(-1,write=1)
			vg1[grid]._class="GRID Vgroup"
			vg1[grid]._name="Data Fields"
			vg2[grid]._class="GRID Vgroup"
			vg2[grid]._name="Grid Attributes"
			vg[grid].insert(vg1[grid])
			vg[grid].insert(vg2[grid])
		########## absolute ########################


		# Create dataset with the right size
		for ds in dslist:
			theGrid=dstogrid[ds]
			# Get grid name of data set
			sds = sd.create(eval(ds),dtypeDS[ds],(gridDimY[theGrid],gridDimX[theGrid]))
			
			# Set fill value
			fv=NoValueDS[ds]
			try:
				sds.setfillvalue(NoValueDS[ds])
			except OverflowError:
				log.log('e',Nom,"setfillvalue")
				sds.setfillvalue(0)
			## write real data
			for fil in range(len(latt)):
				try:
					# Determine were the data will be writen
					ulc = filGridULC[theGrid][latt[fil].name]
					# Determine the position on the grid
					y = (extremeup[theGrid]-ulc[1])/(extremeup[theGrid]-extremedown[theGrid])
					x = (ulc[0]-extremeleft[theGrid])/(extremeright[theGrid]-extremeleft[theGrid])
					y = int(y*gridDimY[theGrid])
					x = int(x*gridDimX[theGrid])
					# read data from files
					osds = lfHDF[fil].select(eval(ds))
					sh = osds[:].shape
					sds[y:y+sh[0],x:x+sh[1]] = osds[:]
					osds.endaccess()
				except:
					pass
			# Close sds
			vg1[dstogrid[ds]].add(HC.DFTAG_NDG,sds.ref())
			sds.endaccess()


    
		for g in vg1:
			vg1[g].detach()
			vg2[g].detach()
			vg[g].detach()

		# Create attribute table for the file
		attstr="GROUP=GridStructure\n"
		gridcount=1
		for gr in gridlist:
			# Start group grid
			attstr+="\tGROUP=GRID_%i\n"%gridcount
			# Add grid name
			attstr+="\t\tGridName=%s\n"%gr
			# Add dimention
			attstr+="\t\tXDim=%i\n"%gridDimX[gr]
			attstr+="\t\tYDim=%i\n"%gridDimY[gr]
			# Add UpperLeftPointMtrs
			attstr+="\t\tUpperLeftPointMtrs=(%f,%f)\n"%(extremeleft[gr],extremeup[gr])
			# Add lrp
			attstr+="\t\tLowerRightMtrs=(%f,%f)\n"%(extremeright[gr],extremedown[gr])
			# Add projection
			attstr+="\t\tProjection=%s\n"%paramMustSimDict[gr]["Projection"]
			# ProjParams
			attstr+="\t\tProjParams=%s\n"%paramMustSimDict[gr]["ProjParams"]
			# SphereCode
			attstr+="\t\tSphereCode=%s\n"%paramMustSimDict[gr]["SphereCode"]

			
			attstr+="""\t\tGROUP=Dimension
		\t\tEND_GROUP=Dimension
		\t\tGROUP=DataField\n"""

			## Add data sets
			# create list of ds for current grid
			lsdsgr=[]
			dsnum=1
			for ds in dslist:
				if dstogrid[ds] == gr:
					# Add object
					attstr+="\t\t\tOBJECT=DataField_%i\n"%dsnum
					# datafield name
					attstr+="\t\t\t\tDataFieldName=%s\n"%ds
					# datatype
					attstr+="\t\t\t\tDataType=%s\n"%paramMustSimDict[gr][ds]
					# dim
					attstr+='\t\t\t\tDimList=("YDim","XDim")\n'
					attstr+="\t\t\tEND_OBJECT=DataField_%i\n"%dsnum
					dsnum+=1
			attstr+="\t\tEND_GROUP=DataField\n"
			attstr+="""\t\tGROUP=MergedFields
		\t\tEND_GROUP=MergedFields\n"""
			attstr+="\tEND_GROUP=GRID_%i\n"%gridcount
			gridcount+=1
		attstr+="""END_GROUP=GridStructure
		GROUP=PointStructure
		END_GROUP=PointStructure
		END"""
		# adding attribute to new file
		att=sd.attr('StructMetadata.0')
		att.set(SDC.CHAR,attstr)
		sd.end()
		hdf.close()
		
		# This should return something somehow
	elif len(arg)>1:
		afile = arg[0]				# This is the list of the NAME of the files to merge
		newfilename = arg[1]			# This is the final file NAME
		# Create a copy
		from shutil import copyfile
		copyfile(afile,newfilename)
Exemple #9
0
      # Unhandled tag
      else:
          print "unhandled tag,ref",tag,ref
    
    # Close vgroup
    vg.detach()

# Open HDF file in readonly mode.
filename = sys.argv[1]
hdf = HDF(filename)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename)
vs = hdf.vstart()
v  = hdf.vgstart()

# Scan all vgroups in the file.
ref = -1
while 1:
    try:
        ref = v.getid(ref)
    except HDF4Error,msg:    # no more vgroup
        break
    describevg(ref)

# Terminate V, VS and SD interfaces.
v.end()
vs.end()
sd.end()
Exemple #10
0
        # Unhandled tag
        else:
            print "unhandled tag,ref", tag, ref

    # Close vgroup
    vg.detach()


# Open HDF file in readonly mode.
filename = sys.argv[1]
hdf = HDF(filename)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename)
vs = hdf.vstart()
v = hdf.vgstart()

# Scan all vgroups in the file.
ref = -1
while 1:
    try:
        ref = v.getid(ref)
    except HDF4Error, msg:  # no more vgroup
        break
    describevg(ref)

# Terminate V, VS and SD interfaces.
v.end()
vs.end()
sd.end()
Exemple #11
0
    # Create a simple 3x3 float array.
    sds = sd.create(name, SDC.FLOAT32, (3,3))
    # Initialize array
    sds[:] = ((0,1,2),(3,4,5),(6,7,8))
    # "close" dataset.
    sds.endaccess()

# Create HDF file
filename = 'inventory.hdf'
hdf = HDF(filename, HC.WRITE|HC.CREATE)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename, SDC.WRITE)  # SD interface
vs = hdf.vstart()             # vdata interface
v  = hdf.vgstart()            # vgroup interface

# Create vdata named 'INVENTORY'.
vdatacreate(vs, 'INVENTORY')
# Create dataset named "ARR_3x3"
sdscreate(sd, 'ARR_3x3')

# Attach the vdata and the dataset.
vd = vs.attach('INVENTORY')
sds = sd.select('ARR_3x3')

# Create vgroup named 'TOTAL'.
vg = v.create('TOTAL')

# Add vdata to the vgroup
vg.insert(vd)
def run(FILE_NAME):
    
    # Identify the data field.
    DATAFIELD_NAME = 'Blue Radiance/RDQI'

    hdf = SD(FILE_NAME, SDC.READ)

    # Read dataset.
    data3D = hdf.select(DATAFIELD_NAME)
    data = data3D[:,:,:]

    # Read attributes.
    attrs = data3D.attributes(full=1)
    fva=attrs["_FillValue"]
    _FillValue = fva[0]


    # Read geolocation dataset from another file.
    GEO_FILE_NAME = 'MISR_AM1_AGP_P117_F01_24.hdf'
    GEO_FILE_NAME = os.path.join(os.environ['HDFEOS_ZOO_DIR'], 
                                 GEO_FILE_NAME)

    hdf_geo = SD(GEO_FILE_NAME, SDC.READ)

    # Read geolocation dataset.
    lat3D = hdf_geo.select('GeoLatitude')
    lat = lat3D[:,:,:]

    lon3D = hdf_geo.select('GeoLongitude')
    lon = lon3D[:,:,:]
        

    # Read scale factor attribute.
    f = HDF(FILE_NAME, HC.READ)
    v = f.vgstart()
    vg = v.attach(8)

    # PyHDF cannot read attributes from Vgroup properly.
    # sfa = vg.attr('Scale Factor')
    # scale_factor = sfa.get()

    vg.detach()
    v.end()

    # Set it manually using HDFView.
    scale_factor = 0.047203224152326584



    # We need to shift bits for "RDQI" to get "Blue Band "only. 
    # See the page 84 of "MISR Data Products Specifications (rev. S)".
    # The document is available at [1].
    datas = np.right_shift(data, 2);
    dataf = datas.astype(np.double)

    # Apply the fill value.
    dataf[data == _FillValue] = np.nan

    # Filter out values (> 16376) used for "Flag Data".
    # See Table 1.2 in "Level 1 Radiance Scaling and Conditioning
    # Algorithm  Theoretical Basis" document [2].
    dataf[datas > 16376] = np.nan
    datam = np.ma.masked_array(dataf, mask=np.isnan(dataf))

    # Apply scale facotr.
    datam = scale_factor * datam;

    nblocks = data.shape[0]
    ydimsize = data.shape[1]
    xdimsize = data.shape[2]

    datam = datam.reshape(nblocks*ydimsize, xdimsize)
    lat = lat.reshape(nblocks*ydimsize, xdimsize)
    lon = lon.reshape(nblocks*ydimsize, xdimsize)


    # Set the limit for the plot.
    m = Basemap(projection='cyl', resolution='h',
                llcrnrlat=np.min(lat), urcrnrlat = np.max(lat),
                llcrnrlon=np.min(lon), urcrnrlon = np.max(lon))
    m.drawcoastlines(linewidth=0.5)
    m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
    m.drawmeridians(np.arange(-180., 181., 45.), labels=[0, 0, 0, 1])
    m.pcolormesh(lon, lat, datam, latlon=True)
    cb = m.colorbar()
    cb.set_label(r'$Wm^{-2}sr^{-1}{\mu}m^{-1}$')

    basename = os.path.basename(FILE_NAME)
    plt.title('{0}\n{1}'.format(basename, 'Blue Radiance'))
    fig = plt.gcf()
    # plt.show()
    pngfile = "{0}.py.agp.png".format(basename)
    fig.savefig(pngfile)
Exemple #13
0
def compress(*arg, **karg):
    # This function compress the file with a given compress parameter
    # compress(file,comression parameter)
    oldfile = arg[0]
    newfilename = arg[0] + "t"

    try:
        log = karg["log"]
    except KeyError:
        log = logMod.Log("", nolog=True)

    keepall = True

    # Should eventually check if files exists and can be read ***IMPROVE***
    try:
        HDFF = SD(oldfile, SDC.READ)  # This is the list of the FILES to merge
    except TypeError:
        HDFF = SD(oldfile.encode('ascii', 'ignore'),
                  SDC.READ)  # This is the list of the FILES to merge
    # This is the list of the ATTRIBUTE "StructMetadata.0" of the files
    attOfF = atribute(HDFF.attributes()["StructMetadata.0"],
                      oldfile,
                      dsi=[
                          0,
                      ])

    ## Listing all the GRIDS that the new file will have

    gridlist = attOfF.listgridname()[
        1]  # listgridname return a list of all the grids name

    ## Listing all the DATASETS that the new file will have

    # Should check if any grid ***IMPROVE***
    dslist = attOfF.orderedDS()

    ## Validation of commoun information
    ###############################################################################
    # Some informations have to be the same for each file or else no mosaic can   #
    # be made for exemple two files with not the same projection type can't be    #
    # merged together. ***IMPROVE*** Maybe in the future we could transform file  #
    # so that they have the same projection or some other thing.                  #
    ###############################################################################

    # List of parameter to check to insure that they are the same
    paramMustSim = ["Projection", "ProjParams", "SphereCode"]
    # Dictionary that will keep all the informations about every file
    paramMustSimDict = {}

    for grid in gridlist:
        # Verification of a grid

        paramdict = {
        }  # Dictionary that keep the actual value that have to be the same

        bigG = attOfF.getgridbyname(
            grid)  # Getting all the attributes in the grid of a file
        if bigG is not None:
            # If the grid exists in that file

            for p in paramMustSim:
                # Checking every parameters that must be the same
                paramdict[p] = bigG.variable[p]

            # Validation of same Dtype for each datafield
            go = bigG.GROUP["DataField"].OBJECT
            for r in go:
                paramdict[go[r].variable["DataFieldName"]] = go[r].variable[
                    "DataType"]

        # Keep all this info for later it's going to be useful
        paramMustSimDict[grid] = paramdict

# LAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

## Determination of new informations
###############################################################################
# Some properties have to be calculated in order to merge. This section is    #
# doing just that                                                             #
###############################################################################

    gridResolX = {}  # Getting the RESOLUTION in the X direction for each grid
    gridResolY = {}  # Getting the RESOLUTION in the Y direction for each grid
    extremeup = {}  # Getting the UPPER coordinates for each grid
    extremedown = {}  # Getting the LOWEST coordinates for each grid
    extremeleft = {}  # Getting the LEFTMOST coordinates for each grid
    extremeright = {}  # Getting the RIGHTMOST coordinates for each grid
    gridDimX = {}  # Getting the DIMENSIONS of X direction for each grid
    gridDimY = {}  # Getting the DIMENSIONS of Y direction for each grid
    NoValueDS = {}  # Getting the fill value of each dataset
    dtypeDS = {}  # Getting the DTYPE for each dataset
    dstogrid = {}  # Knowing wich is the GRID for each dataset
    filGridULC = {}  # Getting the upper left corner of each file for each grid

    for grid in gridlist:
        # For each grid
        filGridULC[grid] = {
        }  # Adding a dictionary for each grid that will contain information on every file

        ### Determination of resolution of each grid
        # ***IMPROVE*** Should check if bigd is none

        bigG = attOfF.getgridbyname(
            grid)  # Getting all the attributes in the grid of a file

        # Get extreme grid point
        ulp = eval(bigG.variable["UpperLeftPointMtrs"])
        lrp = eval(bigG.variable["LowerRightMtrs"])

        # Get grid dimmension (PIXEL)
        dimx = int(bigG.variable["XDim"])
        dimy = int(bigG.variable["YDim"])

        # Calculate grid resolution
        gridResolX[grid] = (lrp[0] - ulp[0]) / dimx
        gridResolY[grid] = (ulp[1] - lrp[1]) / dimy

        ### Determination of new extreme coordinates for each grid
        # up
        extremeup[grid] = ulp[1]
        # down
        extremedown[grid] = lrp[1]
        # left
        extremeleft[grid] = ulp[0]
        # right
        extremeright[grid] = lrp[0]
        #print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",int(LOC - UPC),int(RMC - LMC),"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")

        ### Detetermination of dataset to grid name
        if bigG is not None:
            go = bigG.GROUP["DataField"].OBJECT
            for r in go:
                dstogrid[go[r].variable["DataFieldName"]] = grid

        ### Determination of new extreme coordinates for each grid

        ## Determination of ULC for each grid in each file
        filGridULC[grid][attOfF.name] = ulp
        ## determination of new dimension for each grid (pixel)
        gridDimY[grid] = int(dimy)
        gridDimX[grid] = int(dimx)

    for ds in dslist:
        # For each dataset

        # For each hdf file

        # Try opening dataset
        try:
            sds = HDFF.select(eval(ds))
            # Get fill value
            NoValueDS[ds] = sds.getfillvalue()
            # Get dtype
            dtypeDS[ds] = sds.info()[3]
        except:
            log.log('e', Nom, "no dataset")

    ## Start creating new file


###############################################################################
# This is the actual part were stuf appens                                    #
###############################################################################

# This part is the same for every file in any circumstances
########## absolute ########################

# Open new file
    try:
        hdf = HDF(newfilename, HC.WRITE | HC.CREATE | HC.TRUNC)
        sd = SD(newfilename, SDC.WRITE | SDC.CREATE)
    except TypeError:
        hdf = HDF(newfilename.encode('ascii', 'ignore'),
                  HC.WRITE | HC.CREATE | HC.TRUNC)
        sd = SD(newfilename.encode('ascii', 'ignore'), SDC.WRITE | SDC.CREATE)
    v = hdf.vgstart()
    vg = {}
    vg1 = {}
    vg2 = {}

    ## rewrite the gridlist
    gridlist = []
    for ds in dslist:
        if dstogrid[ds] not in gridlist:
            gridlist.append(dstogrid[ds])

    for grid in gridlist:
        vg[grid] = v.attach(-1, write=1)
        vg[grid]._class = "GRID"
        vg[grid]._name = eval(grid)
        vg1[grid] = v.attach(-1, write=1)
        vg2[grid] = v.attach(-1, write=1)
        vg1[grid]._class = "GRID Vgroup"
        vg1[grid]._name = "Data Fields"
        vg2[grid]._class = "GRID Vgroup"
        vg2[grid]._name = "Grid Attributes"
        vg[grid].insert(vg1[grid])
        vg[grid].insert(vg2[grid])
    ########## absolute ########################

    # Create dataset with the right size
    for ds in dslist:
        theGrid = dstogrid[ds]
        # Get grid name of data set
        # Make sure that the set is asked to be there
        sds = sd.create(eval(ds), dtypeDS[ds],
                        (gridDimY[theGrid], gridDimX[theGrid]))

        # Set fill value
        fv = NoValueDS[ds]

        try:
            sds.setfillvalue(NoValueDS[ds])
        except OverflowError:
            log.log('e', Nom, "dataset fillvaluet")
            sds.setfillvalue(0)
        # Set compression
        try:
            sds.setcompress(*arg[1])  # args depend on compression type
        except HDF4Error as msg:
            log.log('e', Nom, "Error compressing the dataset")
            sds_id.endaccess()
            sd_id.end()
            return
        ## write real data

        try:
            # read data from files
            osds = HDFF.select(eval(ds))
            # And write it in the new sds
            sds[:, :] = osds[:, :]

            osds.endaccess()
        except:
            pass
        # Close sds
        vg1[dstogrid[ds]].add(HC.DFTAG_NDG, sds.ref())
        sds.endaccess()

    for g in vg1:
        vg1[g].detach()
        vg2[g].detach()
        vg[g].detach()

    # Create attribute table for the file
    attstr = "GROUP=GridStructure\n"
    gridcount = 1
    for gr in gridlist:
        # Start group grid
        attstr += "\tGROUP=GRID_%i\n" % gridcount
        # Add grid name
        attstr += "\t\tGridName=%s\n" % gr
        # Add dimention
        attstr += "\t\tXDim=%i\n" % gridDimX[gr]
        attstr += "\t\tYDim=%i\n" % gridDimY[gr]
        # Add UpperLeftPointMtrs
        attstr += "\t\tUpperLeftPointMtrs=(%f,%f)\n" % (extremeleft[gr],
                                                        extremeup[gr])
        # Add lrp
        attstr += "\t\tLowerRightMtrs=(%f,%f)\n" % (extremeright[gr],
                                                    extremedown[gr])
        # Add projection
        attstr += "\t\tProjection=%s\n" % paramMustSimDict[gr]["Projection"]
        # ProjParams
        attstr += "\t\tProjParams=%s\n" % paramMustSimDict[gr]["ProjParams"]
        # SphereCode
        attstr += "\t\tSphereCode=%s\n" % paramMustSimDict[gr]["SphereCode"]

        attstr += """\t\tGROUP=Dimension
	\t\tEND_GROUP=Dimension
	\t\tGROUP=DataField\n"""

        ## Add data sets
        # create list of ds for current grid
        lsdsgr = []
        dsnum = 1
        for ds in dslist:
            if dstogrid[ds] == gr:
                # Add object
                attstr += "\t\t\tOBJECT=DataField_%i\n" % dsnum
                # datafield name
                attstr += "\t\t\t\tDataFieldName=%s\n" % ds
                # datatype
                attstr += "\t\t\t\tDataType=%s\n" % paramMustSimDict[gr][ds]
                # dim
                attstr += '\t\t\t\tDimList=("YDim","XDim")\n'
                attstr += "\t\t\tEND_OBJECT=DataField_%i\n" % dsnum
                dsnum += 1
        attstr += "\t\tEND_GROUP=DataField\n"
        attstr += """\t\tGROUP=MergedFields
	\t\tEND_GROUP=MergedFields\n"""
        attstr += "\tEND_GROUP=GRID_%i\n" % gridcount
        gridcount += 1
    attstr += """END_GROUP=GridStructure
	GROUP=PointStructure
	END_GROUP=PointStructure
	END"""
    # adding attribute to new file
    att = sd.attr('StructMetadata.0')
    att.set(SDC.CHAR, attstr)
    sd.end()
    hdf.close()

    # delete old file
    os.remove(oldfile)
    # rename new file to old file name
    os.rename(newfilename, oldfile)
Exemple #14
0
    def hdfget(self, id=False, section=False, group=False):

        #for i, drs in enumerate(self.dirLst[0:500]):
        for i, drs in enumerate(self.dirLst[:]):

            #print drs

            hdf = HDF(drs, HC.READ)
            sd = SD(drs)

            vs = hdf.vstart()
            v = hdf.vgstart()

            # Scan all vgroups in the file.
            ref = -1
            while 1:
                try:
                    ref = v.getid(ref)

                    vg = v.attach(ref)

                    if vg._name == section:

                        # Read the contents of the vgroup.
                        members = vg.tagrefs()

                        # Display info about each member.
                        index = -1
                        for tag, ref2 in members:
                            index += 1
                            #print "member index", index
                            # Vdata tag
                            if tag == HC.DFTAG_VH:
                                vd = vs.attach(ref2)
                                nrecs, intmode, fields, size, name = vd.inquire(
                                )

                                if vd._name == group:

                                    self.HDF.setdefault(id + '_fields',
                                                        []).append(fields)
                                    self.HDF.setdefault(id + '_nrecs',
                                                        []).append(nrecs)
                                    self.HDF.setdefault(id + '_data',
                                                        []).append(
                                                            np.asarray(vd[:]))

                                vd.detach()

                            # SDS tag
                            elif tag == HC.DFTAG_NDG:
                                sds = sd.select(sd.reftoindex(ref2))
                                name, rank, dims, type, nattrs = sds.info()

                                sds.endaccess()

                            elif tag == HC.DFTAG_VG:
                                vg0 = v.attach(ref2)
                                vg0.detach()

                            else:
                                print "unhandled tag,ref", tag, ref2

                except HDF4Error, msg:  # no more vgroup
                    break
Exemple #15
0
def PFlux(year1, month1, day1):
    filename = external_dir + "swepam_data_1day.hdf"
    hdf = HDF(filename)

    #Initialize the V interface on the HDF file.
    v = hdf.vgstart()
    vs = hdf.vstart()
    #Scan all vgroups in the file
    ref = -1
    refnum = v.getid(ref)
    vg = v.attach(refnum)
    #print "----------------"
    #print "name:", vg._name, "class:",vg._class, "tag,ref:",
    #print vg._tag, vg._refnum
    # Show the number of members of each main object type.
    #print "members: ", vg._nmembers,
    #print "datasets:", vg.nrefs(HC.DFTAG_NDG),
    #print "vdataset:  ", vg.nrefs(HC.DFTAG_VH),
    #print "vgroups: ", vg.nrefs(HC.DFTAG_VG)

    # Read the contents of the vgroup.
    members = vg.tagrefs()
    # Display info about each member.
    index = -1
    for tag, ref in members:
        index += 1
        #    print "member index", index
        # Vdata tag
        if tag == HC.DFTAG_VH:
            vd = vs.attach(ref)
            nrecs, intmode, fields, size, name = vd.inquire()
            #      print "  vdata:",name, "tag,ref:",tag, ref
            #      print "    fields:",fields
            #      print "    nrecs:",nrecs
            vd.detach()
    # SDS tag
        elif tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            name, rank, dims, type, nattrs = sds.info()
            #      print "  dataset:",name, "tag,ref:", tag, ref
            #      print "    dims:",dims
            #      print "    type:",type
            sds.endaccess()

    # VS tag
        elif tag == HC.DFTAG_VG:
            vg0 = v.attach(ref)
            #      print "  vgroup:", vg0._name, "tag,ref:", tag, ref
            vg0.detach()

    # Unhandled tag
        else:
            print("unhandled tag,ref", tag, ref)

    # Close vgroup

    members = vg.tagrefs()
    (tag, ref) in members
    vd = vs.attach(ref)
    nrecs, intmode, fields, size, name = vd.inquire()
    alldata = vd.read(nrecs)
    vd.detach()
    vg.detach()
    v.end()
    hdf.close()

    data = np.array(alldata)

    # input
    #  (y,m,d) = (1998,3,1)
    year = data[:, 0]
    day = data[:, 1]
    hr = data[:, 2]
    min = data[:, 3]
    sec = data[:, 4]
    fp_year = data[:, 5]
    fp_doy = data[:, 6]
    pdensity = data[:, 8]
    speed = data[:, 11]

    start = datetime.date(int(year[0]), 1,
                          1) + datetime.timedelta(int(day[0]) - 1)
    end = datetime.date(int(year[-1]), 1,
                        1) + datetime.timedelta(int(day[-1]) - 1)
    #  print "star Date and End Date:", start, end

    delta1 = datetime.date(year1, month1, day1) - start
    index = delta1.days
    #  print "index ....", index
    if index >= (nrecs - 1):
        print("ERROR: the input time is too new")
#  break
    elif index < 0:
        print("ERROR: the input time is too old")
#  break
    else:
        avePF = 0
        num = 0
        for i in range(0, 90):
            j = index - i
            if j < 0:
                print("ERROR: the index is out of the array")
                break
            else:
                if pdensity[j] > 0 and speed[j] > 0:
                    avePF = avePF + pdensity[j] * speed[j]
                    num = num + 1
        avePF = avePF / num


#    print "the 90 days average proton flux is:",avePF, num
    return avePF