示例#1
0
def compress(*arg, **karg):
    # This function compress the file with a given compress parameter
    # compress(file,comression parameter)

    fileName = arg[0]

    try:
        log = karg["log"]
    except KeyError:
        log = logMod.Log("", nolog=True)

    # Open HDF file
    try:
        sd_id = SD(fileName, SDC.WRITE)
    except TypeError:
        sd_id = SD(fileName.encode('ascii', 'ignore'), SDC.WRITE)

    #open evry data set
    for dsname in list(sd_id.datasets().keys()):
        sds_id = sd_id.select(dsname)
        data = np.copy(sds_id[:])
        sds_id[:] = 0
        try:
            sds_id.setcompress(*arg[1])  # args depend on compression type
        except HDF4Error as msg:
            log.log('e', Nom, "Error compressing the dataset")
            sds_id.endaccess()
            sd_id.end()
            return
        sds_id[:] = data
        sds_id.endaccess()

    # Close hdf file to flush compressed data.
    sd_id.end()
    return None
示例#2
0
def Open_Datasets(rad_file,tau_file):
    
    f1 = SD.SD('Rad/{0}'.format(rad_file))
    g1 = f1.select('EV_250_RefSB')
    Radiance_620_d = g1.get()[0,:,:]
    scale = g1.attributes()['reflectance_scales'][0]
    offset = g1.attributes()['reflectance_offsets'][0]
    Radiance_620 = scale*(Radiance_620_d-offset)
    
    LAT = f1.select('Latitude').get()
    LON = f1.select('Longitude').get()
    
    
    f2 = SD.SD('Tau/{0}'.format(tau_file)) 
    COT = access_dataset('Cloud_Optical_Thickness_37',f2) 
    COTERR = access_dataset('Cloud_Optical_Thickness_Uncertainty_37',f2) #Percentage Uncertainty in COT_data
    SZA = access_dataset('Solar_Zenith',f2)
    SA = access_dataset('Solar_Azimuth',f2)
    VZA = access_dataset('Sensor_Zenith',f2)
    VA = access_dataset('Sensor_Azimuth',f2)
    REFF = access_dataset('Cloud_Effective_Radius_37',f2) #check - tick
    CTH = access_dataset('Cloud_Top_Height',f2) #CHECK - tick
    CWV = access_dataset('Above_Cloud_Water_Vapor_094',f2)
    #CloudMask = access_dataset('Cloud_Mask_1km',f2)
    
    RAZ = interpolation_fix(SA)-interpolation_fix(VA)
    
    return [VZA,LAT,LON,COT,COTERR,SZA,RAZ,REFF,CTH,CWV,Radiance_620]
示例#3
0
 def __init__(self, path, mode='r'):
     if mode == 'r':
         self._hdf = SD.SD(path, SD.SDC.READ)
     elif mode == 'w':
         self._hdf = SD.SD(path, SD.SDC.WRITE)
     else:
         raise ValueError("Bad mode=%s" % mode)
     self._datasets = self._hdf.datasets().keys()
示例#4
0
def proc(indir, outdir, inname, outname):
    path = indir + "/" + inname
    hdf = HDF(path)
    sd = SD(path)
    vs = hdf.vstart()
    v = hdf.vgstart()
    mod_vg = v.attach("MOD_Grid_monthly_CMG_VI")
    vg_members = mod_vg.tagrefs()
    # print vg_members
    mod_vg = v.attach("MOD_Grid_monthly_CMG_VI")
    tag, ref = mod_vg.tagrefs()[0]
    # print tag, ref
    vg0 = v.attach(ref)
    # print vg0._name
    tagrefs = vg0.tagrefs()
    # print tagrefs
    for tag, ref in tagrefs:
        if tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            name = sds.info()[0]
            # print name
            if name == "CMG 0.05 Deg Monthly NDVI":
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                ndvi = np.float64(sds.get())
                sds.endaccess()
            elif name == "CMG 0.05 Deg Monthly EVI":
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                evi = np.float64(sds.get())
                sds.endaccess()
    sd.end()
    v.end()

    data = ndvi
    name = outdir + "/" + outname + ".tif"
    cols = 7200
    rows = 3600
    originX = -180.0
    originY = 90.0
    pixelWidth = 0.05
    pixelHeight = -0.05

    driver = gdal.GetDriverByName('GTiff')
    newRasterfn = name
    outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
    outRaster.SetGeoTransform(
        (originX, pixelWidth, 0, originY, 0, pixelHeight))
    outband = outRaster.GetRasterBand(1)
    outband.WriteArray(data)
    outRasterSRS = osr.SpatialReference()
    outRasterSRS.ImportFromEPSG(4326)
    outRaster.SetProjection(outRasterSRS.ExportToWkt())
    outband.FlushCache()
示例#5
0
def get_data(filename, fieldname, SD_field_rawData):
    '''
    INPUT
          filename:      string  - hdf file filepath
          fieldname:     string  - name of desired dataset
          SD_or_rawData: boolean - 0 returns SD, 1 returns field, 2 returns rawData
    RETURN SD/ raw dataset
    '''
    if SD_field_rawData==0:
        return SD(filename) #science data set
    elif SD_field_rawData==1:
        return SD(filename).select(fieldname) #field
    else:
        return SD(filename).select(fieldname).get() #raw data
示例#6
0
def btd1(mod, rad):
    from pyhdf import SD
    f = SD.SD(mod)
    sds = f.select('Brightness_Temperature')
    # sds=f.select('Brightness_Temperature')
    #note that the shape of the file, is currently (406,270) this shape can be altered depedending on the file type.
    btd = np.repeat(np.repeat((sds.get()[:, 0:406, 0:270] + 15000) * 0.01, 5, axis=1), 5, axis=2).reshape(7, 2030, 1350)
    #data is over sampled
    #TODO include over sampling
    diff = btd[0] - btd[1]
    sds = f.select('cloud_top_temperature_1km')
    ctt = (sds.get() + 15000) * 0.01
    sds = f.select('Cloud_Phase_Optical_Properties')
    phase1 = sds.get()
    sds = f.select('Cloud_Phase_Infrared_1km')
    phase2 = sds.get()
    f = SD.SD(rad)
    lat=np.repeat(np.repeat(f.select('Latitude').get(),5,axis=0),5,axis=1)
    lon = np.repeat(np.repeat(f.select('Longitude').get(), 5, axis=0), 5, axis=1)

    sds = f.select('EV_250_Aggr1km_RefSB')

    red = (sds.get()[0]+sds.attributes()['reflectance_offsets'][0]) * sds.attributes()['reflectance_scales'][0]
    sds = f.select('EV_1KM_RefSB')
    _800 = (sds.get()[-1]+sds.attributes()['reflectance_offsets'][-1]) * sds.attributes()['reflectance_scales'][-1]





    blue = (sds.get()[2]+sds.attributes()['reflectance_offsets'][2]) * sds.attributes()['reflectance_scales'][2]
    grn = (sds.get()[3]+sds.attributes()['reflectance_offsets'][3]) * sds.attributes()['reflectance_scales'][3]
    sds=f.select('EV_500_Aggr1km_RefSB')
    _2105_band = (sds.get()[-1] + sds.attributes()['reflectance_offsets'][-1]) * sds.attributes()['reflectance_scales'][-1]
    _1600_band = (sds.get()[-2] + sds.attributes()['reflectance_offsets'][-2]) * sds.attributes()['reflectance_scales'][-2]
    _1200_band = (sds.get()[-3] + sds.attributes()['reflectance_offsets'][-3]) * sds.attributes()['reflectance_scales'][-3]

    #TODO add this to your log book to remember that you updated your plots with a linear filter to amplify colour
    colour=np.zeros([3,2030,1354])
    colour[0]=red
    colour[1]=grn
    colour[2]=blue
    #c=np.where(colour>0.15)
    #a1=0.15
    #a2=0.37
    #max_val=colour[:,:,:].max()
    #c2=(a2-a1)/(max_val-a1)
    #c1=(1-c2)*a1
    #colour[c]=c1+c2*colour[c]
    return colour, phase1,phase2,diff,_2105_band,_1600_band,_1200_band,_800,ctt,lat,lon
def h4lookup(path, swath = "Earth UV-2 Swath"):
    '''
    only look-up datasets, ignore vdata and
    "WavelengthReferenceColumn" is that.
    '''
    hdf = HDF(path)
    v = hdf.vgstart()
    s2_vg = v.attach(swath)
    geo_tag, geo_ref = s2_vg.tagrefs()[0]
    dat_tag, dat_ref = s2_vg.tagrefs()[1]
    s2_vg.detach()
    #--------------------------------------------
    # found geoloaction & data fields
    #--------------------------------------------
    geo_vgs = v.attach(geo_ref); dat_vgs = v.attach(dat_ref)
    gvg_tagrefs = geo_vgs.tagrefs(); dvg_tagrefs = dat_vgs.tagrefs()
    geo_vgs.detach(); dat_vgs.detach()
    tagrefs_list = gvg_tagrefs + dvg_tagrefs
    refs_dict = {}
    #--------------------------------------------
    # create dict in which keys are names in hdf and values are refs
    #--------------------------------------------
    sd = SD(path)
    for tr in tagrefs_list:
        tag, ref = tr
        if tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            refs_dict[sds.info()[0]] = ref
    sds.endaccess(); sd.end(); v.end(); hdf.close()
    return refs_dict
示例#8
0
def read_hdf4(file_name, array_name):
    """
    Read 2-D tomographic data from hdf4 file.
    Opens ``file_name`` and reads the contents
    of the array specified by ``array_name`` in
    the specified group of the HDF file.
    Parameters
    ----------
    file_name : str
    Input HDF file.
    array_name : str
    Name of the array to be read at exchange group.
    x_start, x_end, x_step : scalar, optional
    Values of the start, end and step of the
    slicing for the whole ndarray.
    y_start, y_end, y_step : scalar, optional
    Values of the start, end and step of the
    slicing for the whole ndarray.
    Returns
    -------
    out : ndarray
    Returns the data as a matrix.
    """
    # Read data from file.
    f = SD.SD(file_name)
    sds = f.select(array_name)
    hdfdata = sds.get()
    sds.endaccess()
    f.end()

    return hdfdata
示例#9
0
 def __init__(self, file_path):
     self.file_path = file_path
     data_file = hdf.SD(file_path, hdf.SDC.READ)
     lat_data = pd.DataFrame(data_file.select('Latitude').get())
     long_data = pd.DataFrame(data_file.select('Longitude').get())
     self.bounding_box = self.__calculate_bounds__(lat_data, long_data)
     self.timestamp = dt.datetime.now()
def btd2(mod):
    from pyhdf import SD
    f = SD.SD(mod)
    sds = f.select('Brightness_Temperature')
    # sds=f.select('Brightness_Temperature')
    btd = np.repeat(np.repeat((sds.get()[:, 0:406, 0:270] + 15000) * 0.01, 5, axis=1), 5, axis=2).reshape(7, 2030, 1350)
    diff = btd[0] - btd[1]
    sds = f.select('cloud_top_temperature_1km')
    ctt = (sds.get() + 15000) * 0.01
    sds = f.select('Cloud_Phase_Optical_Properties')
    phase1 = sds.get()
    sds = f.select('Cloud_Phase_Infrared_1km')
    phase2 = sds.get()
    # f=SD.SD(rad)

    # sds=f.select('EV_250_Aggr1km_RefSB')

    # band_600_r=sds.get()[0]*sds.attributes()['reflectance_scales'][0]
    # band_800_r=sds.get()[1]*sds.attributes()['reflectance_scales'][1]

    # sds=f.select('EV_500_Aggr1km_RefSB')

    # plot the brightness temperature differences too.
    # sds=f.select('EV_500_Aggr1km_RefSB')
    # band_6_r=sds.get()[-1]*sds.attributes()['reflectance_scales'][-1]

    # sds=f.select('EV_1KM_Emissive')
    # band_7_r=(sds.get()[-5]+sds.attributes()['radiance_offsets'][-5])*sds.attributes()['radiance_scales'][-5]
    return diff, ctt
示例#11
0
def btd(mod):
    from pyhdf import SD
    f=SD.SD(mod)
    sds=f.select('Brightness_Temperature')
    #sds=f.select('Brightness_Temperature')
    btd=np.repeat(np.repeat((sds.get()[:,0:406,:]+15000)*0.01,5,axis=1),5,axis=2).reshape(7,2030,15)
    """find a way to improve the phase discrimination at 1km"""
    diff=btd[0]-btd[1]
    sds=f.select('cloud_top_temperature_1km')
    ctt=(sds.get()+15000)*0.01
    sds=f.select('Cloud_Phase_Optical_Properties')
    phase1=sds.get()
    sds=f.select('Cloud_Phase_Infrared_1km')
    phase2=sds.get()
    sds=f.select('Latitude')
    lat=np.repeat(np.repeat(sds.get(),5,axis=0),5,axis=1)
    sds=f.select('Longitude')
    lon=np.repeat(np.repeat(sds.get(),5,axis=0),5,axis=1)
    sds=f.select('cloud_top_height_1km')
    cth=sds.get()
    sds=f.select('Cloud_Optical_Thickness')
    tau=sds.get()*0.009999999776482582
    

    #sds=f.select('EV_1KM_Emissive')
    #band_7_r=(sds.get()[-5]+sds.attributes()['radiance_offsets'][-5])*sds.attributes()['radiance_scales'][-5]
    return diff,ctt,phase1,phase2,cth,lat,lon,tau
示例#12
0
文件: hdf2txt.py 项目: sjl421/code-2
def hdf2txt(hdfFile, txtFile):
    try:
        hdf = SD(hdfFile)        # open the HDF file
        attr = hdf.attributes()  # get global attribute dictionary
        dsets = hdf.datasets()   # get dataset dictionary
    except HDF4Error, msg:
        print "HDF4Error", msg
示例#13
0
def get_l2hdf_prod(ifile):
#---------------------------------------------------------------    
    master_prod_list = ['angstrom','aot_862','aot_865','aot_869','cdom_index','chlor_a','ipar','Kd_490','nflh','par','pic','poc',
                        'Rrs_410','Rrs_412','Rrs_413','Rrs_443','Rrs_486','Rrs_488','Rrs_490','Rrs_510','Rrs_531','Rrs_547','Rrs_551',
                        'Rrs_555','Rrs_560','Rrs_620','Rrs_665','Rrs_667','Rrs_670','Rrs_671','Rrs_681','Rrs_645','Rrs_859','Rrs_482','Rrs_561','Rrs_655','adg_giop',
                        'adg_gsm','adg_qaa','aph_giop','aph_gsm','aph_qaa','arp','a_giop','a_gsm','a_qaa','bbp_giop','bbp_gsm','bbp_qaa',
                        'bb_giop','bb_gsm','bb_qaa','BT','calcite_2b','calcite_3b','cfe','chlor_oc2','chlor_oc3','chlor_oc4','chl_clark','chl_ocx',
                        'chl_gsm','chl_octsc','evi','flh','ipar','Kd_lee','Kd_morel','Kd_mueller','Kd_obpg','KPAR_lee','KPAR_morel','ndvi',
                        'poc_clark','poc_stramski_490','tsm_clark','Zeu_morel','Zhl_morel','Zphotic_lee','Zsd_morel', 'chl_oc2', 'sst','sst4']
                        
    prod_list = []
    
      
    ftype= hdf_cdf_version(ifile)
        
    
    if ftype == 'hdf4':
   
        f= SD(ifile,SDC.READ)
        dsets= f.datasets()
        dsNames = dsets.keys()
        dsNames= sorted(dsNames)
        f.end()
        
        full_var_name= np.asarray(dsNames)         
        
        bad_names= np.asarray(['elat','slat','clat','elon','slon','clon','k_no2','cntl_pt_cols', \
                   'k_oz','tilt','cntl_pt_rows','latitude','vcal_gain','csol_z','longitude', \
                   'vcal_offset','day','msec','wavelength','detnum','mside','year','l2_flags','F0', 'Tau_r', 'aw', 'bbw', \
                   'scan_ell','sen_mat', 'sun_ref', 'tilt_flags', 'tilt_ranges','nflag','ntilts','orb_vec','alt_ang','att_ang'])
        
        for vn in full_var_name:
            test_index= np.where(bad_names == vn)
            if len(bad_names[test_index]) == 0:
               prod_list.append(vn)                                             
         
        print ( '\nfull prod list inside of hdf4 get_l2hdf_prod...  ' )
        print ( prod_list )
        
        return prod_list  
            
              
    if ftype == 'hdf5':               
    
        f = Dataset(ifile, 'r') 
        group_names= f.groups.keys()
  
        var_name= f.groups['geophysical_data'].variables.keys()
        var_name= sorted(var_name)
        var_name= np.asarray(var_name)
         
        full_list_indx= np.where(var_name != 'l2_flags')
        prod_list= var_name[full_list_indx]     
                                                                         
        f.close()
        
        print ( '\nfull prod list inside of hdf5 get_l2hdf_prod...  ' )
        print ( prod_list )
       
        return prod_list
示例#14
0
 def __init__(self, hdfFn):
     self.epsgID = None  # 4326
     self.proj4Str = None  # '+proj=longlat +datum=WGS84 +no_defs'
     self.ulX = None
     self.ulY = None
     self.resolution = None
     self.sd = pyhdfSD.SD(hdfFn, pyhdfSD.SDC.WRITE | pyhdfSD.SDC.CREATE)
示例#15
0
def read_airs_hdfeos(fname, fidx=None, **kwargs):
	'''
	2016.04.11, Walter Sessions
	Moving to IRIS, need to read in radiances for retrieval.

	fname	str,	full path to hdf-eos file

	Wait... I should be generating files and making new collocation deals
	'''
	from pyhdf import SD, SDC
	from numpy import recarray, ndarray

	#_
	hdf = SD(fname, SDC.READ)

	#_define record array
	dtype = [	('AIRS_radiance', ndarray),
				('AIRS_epoch', ndarray),
				('AIRS_latitude', ndarray),
				('AIRS_longitude', ndarray) ]
	airs = recarray((0,), dtype)
	setattr(airs, 'fname', fname)

	#_	
	idx = arange(hdf.select('Time')[:].size) if fidx is None else fidx	
示例#16
0
 def getHDFData(self, sds):
     # open the hdf file for reading
     hdf=SD.SD(self.filename)
     # read the sds data
     sds=hdf.select(sds)
     data=sds.get()
     return data
示例#17
0
def get_hdf_SD_file_variables(filename):
    """
    Get all the variables from an HDF SD file

    :param str filename: The filename of the file to get the variables from
    :returns: An OrderedDict containing the variables from the file
    """
    if not SD:
        raise ImportError(
            "HDF support was not installed, please reinstall with pyhdf to read HDF files."
        )

    variables = None

    try:
        # Open the file.
        datafile = SD.SD(filename)
        # List of required variable names.
        variables = datafile.datasets()
        # Close the file
        datafile.end()
    except:
        logging.error("Error while reading SD data")

    return variables
示例#18
0
 def get_hdf_data(self, sds_name):
     # open the hdf file for reading
     hdf = SD.SD(self.filename)
     # read the sds data
     sds_obj = hdf.select(sds_name)
     data = sds_obj.get()
     return data
示例#19
0
def read_hdf4(srcPath, varName, Slice=None, verbose=True):
    h4 = SD.SD(srcPath)  # 'r')

    if Slice == None: Slice = slice(None, None, None)
    '''
    h4Var   = h4.select(varName)
    print dir(h4Var)
    print  h4Var.dimensions()

    sys.exit()
    '''

    try:
        h4Var = h4.select(varName)
        aOut = h4Var[:][Slice]

    except:
        print('!' * 80)
        print('I/O Error')
        print('Blank File? %s' % srcPath)
        print('Blank array will be returned [ %s ]' % varName)
        print(h4Var.dimensions())
        print(Slice)
        print('!' * 80)

        #raise ValueError

    if verbose == True:
        print('\t[READ_HDF4] %s [%s] -> %s' % (srcPath, varName, aOut.shape))
    # print '\t[READ_HDF4] %s %s -> %s'%( srcPath, h4Var.dimensions(), aOut.shape)

    #h4.close()

    return aOut
示例#20
0
def extract_hdf_radiance(misr_file, shrink_shape=False):
"""
	Extract radiance data from the misr_file given and
	shrink it if shrink_shape is false
"""

	hdf = SD(misr_file)

	# Pull radiance data from the hdf file and put it into a list
	color_band_ds = [hdf.select('Red Radiance/RDQI'),hdf.select('Green Radiance/RDQI'),hdf.select('Blue Radiance/RDQI')]

	# Pull solar zentih data
	solar_zenith = hdf.select('SolarZenith')[:,:,:]

	# Realign width and height if shrink shape is true
	if(shrink_shape == True): 
		expand_val = shrinked[1]
		expand_tuple = shrink_tuple[1]
	else: 
		expand_val = shrinked[0]
		expand_tuple = shrink_tuple[0]

	red = color_band_ds[0][:,:,:]
	green = color_band_ds[1][:,:,:]
	blue = color_band_ds[2][:,:,:]

	# Get rid of the additional 2 bits on the right of the radiance fdata
	red = (np.int16((np.int16(red) >> 2) & 0x3FFF)).astype(np.float_)
	green = (np.int16((np.int16(green) >> 2) & 0x3FFF)).astype(np.float_)
	blue = (np.int16((np.int16(blue) >> 2) & 0x3FFF)).astype(np.float_)


	# Clear out fill data
	red[red >= 16378.0] = np.nan
	green[green >= 16378.0] = np.nan
	blue[blue >= 16378.0]= np.nan

	solarZenith_e = np.zeros(expand_tuple)

	# Expand the solar zenith data into the current width x height and get rid of data > 90
	for i in range(len(solar_zenith)):
		solarZenith_e[i] = np.array(solar_zenith[i].repeat(expand_val,axis=0).repeat(expand_val,axis=1))
		solarZenith_e[i][solarZenith_e[i] >= 90] = -1
		solarZenith_e[i][solarZenith_e[i] < 0] = np.nan

	if(shrink_shape == True):
		red_s = [0]*len(red)
		green_s = [0]*len(red)
		blue_s = [0]*len(red)

		for i in range(len(red)):
			
			red_s[i] = np.array(shrink(red[i],128,512))
			green_s[i] = np.array(shrink(green[i],128,512))
			blue_s[i] = np.array(shrink(blue[i],128,512))

	if(shrink_shape == True): return np.array(([red_s,green_s,blue_s,solarZenith_e]))

	# Return the array of tuples that keep the r, g, b and solar zenith data per pixel
	return np.array([red,green,blue,solarZenith_e])
示例#21
0
def modisread_day_of_year(folder):
    print ' '
    print '____________Read Modis Day_of_Year_____________'
    print ' '
    d = {}
    cs_clouds = 1000
    granules = modis_granules(folder)
    #Chose one File
    k = granules.keys()[0]
    filename_doy = granules[k]['day_of_year']
    print 'read day_of_year: ' + folder + filename_doy
    f_doy = SD(folder + filename_doy)
    print ' '
    for key in ['day_of_year', 'Latitude', 'Longitude']:
        d[key] = sp.array(f_doy.select(key).get(), sp.int16)
        if key == 'day_of_year':
            day_of_year = []
            for n in d[key][0].flatten():
                #flag=convert_decimal_to_8bit(n)
                flag = sp.binary_repr(n, width=16)
                day_of_year.append(int(flag))  #0->cloudy, 1->clear
            doy = sp.array(day_of_year).reshape(d[key].shape[1:3])
            d[key] = doy

    print ' done.'
    d['day_of_year'] = d['day_of_year'][:, :-1000 / cs_clouds]
    d['Latitude'] = d[
        'Latitude'][:, :-1000 /
                    cs_clouds]  #remove the right-hand-side edge pixels
    d['Longitude'] = d['Longitude'][:, :-1000 / cs_clouds]
    print 'image shape: ' + str(d['day_of_year'].shape)
    print ' '
    return d['day_of_year'], d['Longitude'], d['Latitude']
示例#22
0
def co_locate(cal,mod):
    from calipso_run_updated_to_analyse import Cal2
    try:
        c=Cal2(cal)
        from pyhdf import SD
        #lat=c.coords()[0]
        #lon=c.coords()[1]
        lat,lon,Image2,Image3=c.file_sort_s()
        #c=np.where((lat>-68)&(lat<-48))
        #lat=lat[c]
        #lon=lon[c]
    
        #different co-ordinate resolutions of each product
        #here we are using the r5km resolution
        #btd=c.btd_10()
        #both products are offset by 20 pixels, meaning the temperature products are offset by 20kms. 
    
        #c.close()
        
        f=SD.SD(mod)
        subdataset_name='Latitude'
        sds=f.select(subdataset_name)
            #lat2=np.repeat(np.repeat(sds.get()[:406,:270],5,axis=0),5,axis=1).reshape(2030,1350,order='C')
        #sds=f.select('Longitude')
        #lon2=np.repeat(np.repeat(sds.get()[:406,:270],5,axis=0),5,axis=1).reshape(2030,1350,order='C')
        #x=np.zeros([2030,1354])
        lat2=sds.get()[:406,:3]
        sds=f.select('Longitude')
        lon2=sds.get()[:406,:3]
        #x=np.zeros([2030,1354])
        cal_index=[]
        f.end()
        #x=np.zeros([2030,1354])
        iterr=[]
        iter2=[]
        lat1=[]
        lon1=[]
        coords_x=[]
        coords_y=[]
        for i in range(len(lat)):
            c1=abs(lat2-lat[i])
    
            #print i
            c2=abs(lon2-lon[i])
            c3=np.sqrt(c1**2+c2**2)
            #print c1.min()
            c=np.where((c3==c3.min())&(c3.min()<0.025))
    
            if len(c[0])>0:
                lat1=lat1+[i for i in range(5*i,5*i+5)]
    
                #iter2.append(c[0])
                #btd1.append(btd[i])
                coords_x=coords_x+np.arange(5*c[0],5*c[0]+5).tolist()
                coords_y=coords_y+np.repeat(5*c[1],5).tolist()
        return np.array([coords_x,coords_y]),lat1
    except:
        print 'did not work'
        return [],[]
        pass
示例#23
0
    def hdf4lookup(self, path, swath):
        hdf = HDF(path)
        sd = SD(path)
        vs = hdf.vstart()
        v = hdf.vgstart()

        vg = v.attach(swath)
        vg_members = vg.tagrefs()
        vg0_members = {}
        for tag, ref in vg_members:
            vg0 = v.attach(ref)
            if tag == HC.DFTAG_VG:
                vg0_members[vg0._name] = vg0.tagrefs()
            vg0.detach
        vg.detach

        lookup_dict = {}
        for key in vg0_members.keys():
            for tag, ref in vg0_members[key]:
                if tag == HC.DFTAG_NDG:
                    # f = open(swath + '.txt', 'a'); f.writelines('#' + key + '#' + '\n'); f.close()
                    sds = sd.select(sd.reftoindex(ref))
                    name = sds.info()[0]
                    lookup_dict[name] = [tag, ref]
                    sds.endaccess()
                elif tag == HC.DFTAG_VH:
                    vd = vs.attach(ref)
                    nrecs, intmode, fields, size, name = vd.inquire()
                    lookup_dict[name] = [tag, ref]
        v.end()
        vs.end()
        sd.end()
        return lookup_dict
示例#24
0
def main():

    parser = ArgumentParser(description=__doc__)
    parser.add_argument('colloc_file', help='HDF collocation output file')
    parser.add_argument('shis_file', help='NetCDF SHIS file')
    args = parser.parse_args()

    colloc_sd = SD.SD(args.colloc_file)
    shis_sd = SD.SD(args.shis_file)

    idx_sds = colloc_sd.select('SHIS_Index')
    idx_ini, idx_fin = idx_sds[0], idx_sds[-1]

    t_ini = shis_time(shis_sd, idx_ini)
    t_fin = shis_time(shis_sd, idx_fin)

    print('SHIS.CPL.COLLOC.{0}.{1}.hdf'.format(t_ini, t_fin))
def L1_Reading(fpath):
    sd_obj = SD(fpath, SDC.READ)
    Vt_obj = HDF.HDF(fpath).vstart()
    m_data = Vt_obj.attach('metadata').read()[0]
    Height = np.array(m_data[-2])  # 583高度对应实际海拔
    Lats = sd_obj.select('Latitude').get()
    Lons = sd_obj.select('Longitude').get()
    L_route = np.concatenate([Lats.T, Lons.T]).T
    del Lons
    surface = sd_obj.select('Surface_Elevation').get()
    target_rows = []
    distance_list = []
    min_distance = 9999999
    for location in L_route:
        distance = LonLat_Distance(location, LZU_LatLon)
        if distance < min_distance:
            min_distance = distance
        if distance < 50:
            target_rows.append(True)
        else:
            target_rows.append(False)
        distance_list.append(distance)

    Per532 = np.array(
        sd_obj.select('Perpendicular_Attenuated_Backscatter_532').get())
    Per532 = cv2.GaussianBlur(Per532, (3, 11), 8)

    Per532[Per532 < 0] = 0
    Tol532 = np.array(sd_obj.select('Total_Attenuated_Backscatter_532').get())
    Tol532 = cv2.GaussianBlur(Tol532, (3, 11), 8)

    Tol532[Tol532 < 0] = 0
    Par532 = Tol532 - Per532

    # proccess Dep data
    Dep532 = np.true_divide(Per532, Par532)
    Dep532[Par532 <= 0.0003] = 0
    Dep532[Par532 <= 0.0000] = 0
    Dep532[Dep532 > 1] = 0
    Dep532 = cv2.blur(Dep532, (3, 11))

    Data_dic = {}
    Data_dic['Tol532'] = Tol532
    Data_dic['Dep532'] = Dep532
    Data_meta = {
        'route': L_route,
        'surface': surface,
        'Lats': Lats,
        'target rows': target_rows,
        'Height': Height,
        'distance': distance_list,
        'min distance': min_distance
    }
    # for key, value in Rd_dic.items():
    # value.columns = Height.values[0]
    sd_obj.end()
    HDF.HDF(fpath).close()
    return Data_dic, Data_meta
示例#26
0
    def __init__(self, *fnames):
        """Initialize the HDF group using the first HDF file"""
        N = len(fnames)

        #Get the lon&lat coordinates of the common area of the whole group
        tops = []
        bots = []
        lefs = []
        ryts = []

        for fname in fnames:
            hdf_file = SD.SD(fname)
            lon = hdf_file.select('Longitude').get()
            lat = hdf_file.select('Latitude').get()
            hdf_file.end()

            lon, lat = autoFlip(lon, 'horizontal', lon, lat)
            lon, lat = autoFlip(lat, 'vertical', lon, lat)

            tops.append(lat[0][0])
            bots.append(lat[-1][-1])
            lefs.append(lon[0][0])
            ryts.append(lon[-1][-1])

        #Intersect with Philippine area, 2.5-22.5 deg lat, 115-130 deg lon
        top = min(NP.min([tops]), 22.5)
        bot = max(NP.max([bots]), 2.5)
        lef = max(NP.max([lefs]), 115)
        ryt = min(NP.min([ryts]), 130)

        print("Common area (top, bot)(left, right): " + '(' + str(top) + ',' +
              str(bot) + ')' + '(' + str(lef) + ',' + str(ryt) + ')')

        if top < bot or lef > ryt:
            self.proceed = False
            print("No common area found")
        else:
            self.proceed = True
            print("Processing: 1/" + str(N))
            MXD35L2File.__init__(self, fnames[0])

            self.top = top
            self.bot = bot
            self.lef = lef
            self.ryt = ryt

            self.interpLonLat()
            self.imshowCloudCoast('-1RAW')
            self.computeCloudFrac('1RAW')
            self.cutCloudWaterCoast()
            self.imshowCloudCoast('-2CUT')
            self.computeCloudFrac('2CUT')
            self.interpCloudWaterCoast()
            self.imshowCloudCoast('-3INTERP')
            self.computeCloudFrac('3INTERP')

        self.N = N
        self.members = fnames
def check():
	hdf4 = SD.SD(file_path, SD.SDC.READ)
	ds = hdf4.datasets()
	print ds
	cp = hdf4.select("Precipitable_Water_Near_Infrared_Clear")
	print " ".join(cp.dimensions().keys()),  cp.attributes(), cp.attributes().get('units')

	cp.endaccess()
	hdf4.end()
def require_SD_info_hdf(file_in):
    '''Print and Return SD variable names and dimentions in a HDF-EOS file'''
    #--information from input file--
    f=SD(file_in,SDC.READ)
    var_info=f.datasets()
    print("--Variables in ",file_in,"-->")
    for name,value in var_info.items():
        print("    ",name,": ",value)
    return var_info
示例#29
0
文件: io.py 项目: andreas-h/geodas
def read_hdf4(filename, name=None, coords_only=False, **kwargs):
    import pyhdf.SD as SD
    from pyhdf.error import HDF4Error
    try:
        _file = SD.SD(filename)
    except HDF4Error:
        print("Cannot open file: %s" % filename)
        raise
    # find out which dataset to read
    if name is None:
        datasets = list(_file.datasets().keys())
        variables = []
        for d in datasets:
            var = _file.select(d)
            if len(var.dimensions()) > 1 or var.dim(0).info()[0] != d:
                variables.append(d)
        if len(variables) > 1:
            raise AttributeError("There is more than one non-coordinate "
                                 "variable in the file, and you didn't "
                                 "specify which one you want me to read!")
        name = variables[0]
    # open dataset
    sds = _file.select(name)
    # open the coordinate variables
    dims = sds.dimensions(full=True)
    dimorder = {dims[k][1]: k for k in list(dims.keys())}
    coordinates = OrderedDict()
    for d in range(len(dimorder)):
        coordinates[dimorder[d]] = _file.select(dimorder[d])[:]
    # coordinate slicing
    slices = get_coordinate_slices(coordinates, kwargs)
    # slice the coordinate arrays themselves
    for i, c in enumerate(list(coordinates.keys())):
        coordinates[c] = coordinates[c][slices[i]]
    if coords_only:
        return coordinates
    # read requested slice from disk
    data = sds[slices]
    fill = sds.getfillvalue()
    if fill is not None and not np.isnan(fill):
        data = np.where(data != fill, data, np.nan)
    # make sure latitudes go from S to N
    if coordinates['latitude'][0] > coordinates['latitude'][-1]:
        coordinates['latitude'] = coordinates['latitude'][::-1]
        for i in dimorder.keys():
            if dimorder[i] == "latitude":
                if i == 0:
                    data = np.flipud(data)
                elif i == 1:
                    data = np.fliplr(data)
                else:
                    raise ValueError("flipping data array for ascending "
                                     "coordinates only works with 2d arrays!")
                continue
    out = gridded_array(data, coordinates, name)
    _file.end()
    return out
def h4read(path, ref):
    '''
    only capable of reading datasets, vdata is not.
    '''
    sd = SD(path)
    sds = sd.select(sd.reftoindex(ref))
    data = np.float64(sds.get())
    sds.endaccess(); sd.end()
    return data