Ejemplo n.º 1
0
def getStartEndTime(fileAbsPath):
    hdf = HDF(fileAbsPath)
    vs = hdf.vstart()

    ref = vs.find('start_time')
    vd = vs.attach(ref)
    startTime = vd.read(1)[0][0]
    startDateTime = datetime.datetime(int(startTime[0:4]), int(startTime[4:6]),
                                      int(startTime[6:8]),
                                      int(startTime[8:10]),
                                      int(startTime[10:12]),
                                      int(startTime[12:14]))
    # startDateTime = startTime[0:4] + '-' + startTime[4:6] + '-' + startTime[6:8] + 'T' + startTime[8:10] + ':' + startTime[10:12] + ':' + startTime[12:14] + 'Z'

    ref = vs.find('end_time')
    vd = vs.attach(ref)
    endTime = vd.read(1)[0][0]
    endDateTime = datetime.datetime(int(endTime[0:4]), int(endTime[4:6]),
                                    int(endTime[6:8]), int(endTime[8:10]),
                                    int(endTime[10:12]), int(endTime[12:14]))
    # endDateTime = endTime[0:4] + '-' + endTime[4:6] + '-' + endTime[6:8] + 'T' + endTime[8:10] + ':' + endTime[10:12] + ':' + endTime[12:14] + 'Z'

    vs.end()

    return startDateTime, endDateTime
Ejemplo n.º 2
0
def h4lookup(path, swath = "Earth UV-2 Swath"):
    '''
    only look-up datasets, ignore vdata and
    "WavelengthReferenceColumn" is that.
    '''
    hdf = HDF(path)
    v = hdf.vgstart()
    s2_vg = v.attach(swath)
    geo_tag, geo_ref = s2_vg.tagrefs()[0]
    dat_tag, dat_ref = s2_vg.tagrefs()[1]
    s2_vg.detach()
    #--------------------------------------------
    # found geoloaction & data fields
    #--------------------------------------------
    geo_vgs = v.attach(geo_ref); dat_vgs = v.attach(dat_ref)
    gvg_tagrefs = geo_vgs.tagrefs(); dvg_tagrefs = dat_vgs.tagrefs()
    geo_vgs.detach(); dat_vgs.detach()
    tagrefs_list = gvg_tagrefs + dvg_tagrefs
    refs_dict = {}
    #--------------------------------------------
    # create dict in which keys are names in hdf and values are refs
    #--------------------------------------------
    sd = SD(path)
    for tr in tagrefs_list:
        tag, ref = tr
        if tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            refs_dict[sds.info()[0]] = ref
    sds.endaccess(); sd.end(); v.end(); hdf.close()
    return refs_dict
Ejemplo n.º 3
0
    def hdf4lookup(self, path, swath):
        hdf = HDF(path)
        sd = SD(path)
        vs = hdf.vstart()
        v = hdf.vgstart()

        vg = v.attach(swath)
        vg_members = vg.tagrefs()
        vg0_members = {}
        for tag, ref in vg_members:
            vg0 = v.attach(ref)
            if tag == HC.DFTAG_VG:
                vg0_members[vg0._name] = vg0.tagrefs()
            vg0.detach
        vg.detach

        lookup_dict = {}
        for key in vg0_members.keys():
            for tag, ref in vg0_members[key]:
                if tag == HC.DFTAG_NDG:
                    # f = open(swath + '.txt', 'a'); f.writelines('#' + key + '#' + '\n'); f.close()
                    sds = sd.select(sd.reftoindex(ref))
                    name = sds.info()[0]
                    lookup_dict[name] = [tag, ref]
                    sds.endaccess()
                elif tag == HC.DFTAG_VH:
                    vd = vs.attach(ref)
                    nrecs, intmode, fields, size, name = vd.inquire()
                    lookup_dict[name] = [tag, ref]
        v.end()
        vs.end()
        sd.end()
        return lookup_dict
Ejemplo n.º 4
0
def get_solor_angle(myd1km_filename_str):
    try:  # open hdf
        myd021km_data = SD(myd1km_filename_str)
        # 1.SDS Reading
        zenith_sds = myd021km_data.select('SensorZenith')  # 经纬度
        zenith = zenith_sds.get()  # 2
        azimuth_sds = myd021km_data.select('SensorAzimuth')  # 经纬度
        azimuth = azimuth_sds.get()

        return zenith * 0.01, azimuth * 0.01

        f = HDF(myd1km_filename_str, SDC.READ)
        vs = f.vstart()
        data_info_list = vs.vdatainfo()
        # Vset table
        # L1B swam matedata
        L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
        # Read [Swath/scan type]
        sd_info = L1B_Swath_Matedata_VD.inquire()
        all_metadata = L1B_Swath_Matedata_VD.read(sd_info[0])
        L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
        vs.end()  # Use with, so __del__ can do this.
        f.close()
        return all_metadata

    except HDF4Error:
        print("Unexpected error:(get_solor_angle)", sys.exc_info()[0])
        print('READ ERROR......:' + myd1km_filename_str)
        return None
Ejemplo n.º 5
0
def read_vdata(FILENAME):
    """Reads all vdata fields in an HDF-EOS file and places those fields 
    in a dictionary.
    
    PARAMETERS: 
    -----------
    FILENAME: Name of HDF-EOS file. 

    OUTPUTS: 
    --------
    vdata_dict: Dictionary of vdata fields.  
    """

    # Prepare to read the data.
    f = HDF(FILENAME, SDC.READ)  # Open the file
    vs = f.vstart()  # Start the vdata interface
    data_info_list = vs.vdatainfo()  # List the vdata fields
    vdata_fieldnames = [a[0] for a in data_info_list]  # Get the names

    # Load the data, place in dictionary
    vdata_dict = {}
    for field in vdata_fieldnames:
        vdata_dict[field] = np.squeeze(np.asarray(vs.attach(field)[:]))

    # terminate the vdata interface, close the file.
    vs.end()
    f.close()

    return vdata_dict
def L1_Reading(fpath):
    sd_obj = SD(fpath, SDC.READ)
    Vt_obj = HDF.HDF(fpath).vstart()
    m_data = Vt_obj.attach('metadata').read()[0]
    Height = np.array(m_data[-2])  # 583高度对应实际海拔
    Lats = sd_obj.select('Latitude').get()
    Lons = sd_obj.select('Longitude').get()
    L_route = np.concatenate([Lats.T, Lons.T]).T
    del Lons
    surface = sd_obj.select('Surface_Elevation').get()
    target_rows = []
    distance_list = []
    min_distance = 9999999
    for location in L_route:
        distance = LonLat_Distance(location, LZU_LatLon)
        if distance < min_distance:
            min_distance = distance
        if distance < 50:
            target_rows.append(True)
        else:
            target_rows.append(False)
        distance_list.append(distance)

    Per532 = np.array(
        sd_obj.select('Perpendicular_Attenuated_Backscatter_532').get())
    Per532 = cv2.GaussianBlur(Per532, (3, 11), 8)

    Per532[Per532 < 0] = 0
    Tol532 = np.array(sd_obj.select('Total_Attenuated_Backscatter_532').get())
    Tol532 = cv2.GaussianBlur(Tol532, (3, 11), 8)

    Tol532[Tol532 < 0] = 0
    Par532 = Tol532 - Per532

    # proccess Dep data
    Dep532 = np.true_divide(Per532, Par532)
    Dep532[Par532 <= 0.0003] = 0
    Dep532[Par532 <= 0.0000] = 0
    Dep532[Dep532 > 1] = 0
    Dep532 = cv2.blur(Dep532, (3, 11))

    Data_dic = {}
    Data_dic['Tol532'] = Tol532
    Data_dic['Dep532'] = Dep532
    Data_meta = {
        'route': L_route,
        'surface': surface,
        'Lats': Lats,
        'target rows': target_rows,
        'Height': Height,
        'distance': distance_list,
        'min distance': min_distance
    }
    # for key, value in Rd_dic.items():
    # value.columns = Height.values[0]
    sd_obj.end()
    HDF.HDF(fpath).close()
    return Data_dic, Data_meta
def require_VD_info_hdf(file_in):
    '''Print and Return VD variable names and dimentions in a HDF-EOS file'''
    #--information from input file--
    f=HDF(file_in)
    vs=f.vstart()
    var_info=vs.vdatainfo()
    print("--Variables in ",file_in,"-->")
    for item in var_info:
        print(item)
    return var_info
Ejemplo n.º 8
0
def proc(indir, outdir, inname, outname):
    path = indir + "/" + inname
    hdf = HDF(path)
    sd = SD(path)
    vs = hdf.vstart()
    v = hdf.vgstart()
    mod_vg = v.attach("MOD_Grid_monthly_CMG_VI")
    vg_members = mod_vg.tagrefs()
    # print vg_members
    mod_vg = v.attach("MOD_Grid_monthly_CMG_VI")
    tag, ref = mod_vg.tagrefs()[0]
    # print tag, ref
    vg0 = v.attach(ref)
    # print vg0._name
    tagrefs = vg0.tagrefs()
    # print tagrefs
    for tag, ref in tagrefs:
        if tag == HC.DFTAG_NDG:
            sds = sd.select(sd.reftoindex(ref))
            name = sds.info()[0]
            # print name
            if name == "CMG 0.05 Deg Monthly NDVI":
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                ndvi = np.float64(sds.get())
                sds.endaccess()
            elif name == "CMG 0.05 Deg Monthly EVI":
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                evi = np.float64(sds.get())
                sds.endaccess()
    sd.end()
    v.end()

    data = ndvi
    name = outdir + "/" + outname + ".tif"
    cols = 7200
    rows = 3600
    originX = -180.0
    originY = 90.0
    pixelWidth = 0.05
    pixelHeight = -0.05

    driver = gdal.GetDriverByName('GTiff')
    newRasterfn = name
    outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
    outRaster.SetGeoTransform(
        (originX, pixelWidth, 0, originY, 0, pixelHeight))
    outband = outRaster.GetRasterBand(1)
    outband.WriteArray(data)
    outRasterSRS = osr.SpatialReference()
    outRasterSRS.ImportFromEPSG(4326)
    outRaster.SetProjection(outRasterSRS.ExportToWkt())
    outband.FlushCache()
Ejemplo n.º 9
0
def getAdjustmentParams(fileAbsPath):
    hdf = HDF(fileAbsPath)
    vs = hdf.vstart()
    ref = vs.find('Radar_Reflectivity.valid_range')
    vd = vs.attach(ref)
    validRange = vd.read(1)[0][0]
    ref = vs.find('Radar_Reflectivity.factor')
    vd = vs.attach(ref)
    reflectivityFactor = vd.read(1)[0][0]
    vs.end()
    return validRange, reflectivityFactor
def read_vd_hdf(file_in,var_in,dimsz):
    '''Read data stored as a VD variable in input HDF-EOS file.'''
    '''Inputs are the file path and the required variable name.'''
    '''Outputs are the data (numpy array).                     '''

    #--information from input file--
    hdf  = HDF(file_in)
    vs   = hdf.vstart()
    vd   = vs.attach(var_in)
    var_data = np.array(vd.read(int(dimsz)))
    vd.detach()
    return var_data #,var_dimn
Ejemplo n.º 11
0
def load_vd(fname, varnames):
    '''return list containing vdata structures specified by list varnames,
	contained in hdf4 file with filename fname'''
    f = HDF(fname)
    data_list = []
    vs = f.vstart()
    for name in varnames:
        vd = vs.attach(name)
        data_list.append(vd[:])
        vd.detach()
    vs.end()
    f.close()
    return data_list
def read_hdf_VD(file_in,var_in):
    '''Read Vdata sets (table, 1D) in the input HDF-EOS file.    '''
    '''Currently coded for surface variables of A-Train granule. '''
    '''Inputs are the file path and the required variable name.  '''
    '''Outputs are the data (numpy array) and dimentions(numpy). '''

    #--information from input file--
    f=HDF(file_in)
    vs=f.vstart()
    vd=vs.attach(var_in) #return var data from vs group
    var_data=np.array(vd[:]).ravel() #convert data into flatted ndarray
    #print(var_data.dtype)
    var_dimn=np.array(var_data.shape)
    return var_data,var_dimn
Ejemplo n.º 13
0
def read_data_from_hdf(inputfile):
    wavelengths = []
    depth = []
    downwelling_downcast = []
    downwelling_upcast = []
    upwelling_downcast = []
    upwelling_upcast = []

    # open the hdf file read-only
    # Initialize the SD, V and VS interfaces on the file.
    hdf = HDF(inputfile)
    vs = hdf.vstart()
    v  = hdf.vgstart()

    # Attach and read the contents of the Profiler vgroup

    vg = v.attach(v.find('Profiler'))

    for tag, ref in vg.tagrefs():
        assert(tag == HC.DFTAG_VH)
        vd = vs.attach(ref)
        nrecs, intmode, fields, size, name = vd.inquire()

        if name == "ED_hyperspectral_downcast":
            x = vd.read(nrecs)
            downwelling_downcast = np.asarray([i[3:] for i in x])
            wavelengths = np.asarray([float(x) for x in fields[3:]])
            depth = np.asarray([i[2] for i in x])
        elif name == "ED_hyperspectral_upcast":
            downwelling_upcast = np.asarray([i[3:] for i in vd.read(nrecs)])
        elif name == "LU_hyperspectral_downcast":
            upwelling_downcast = np.asarray([i[3:] for i in vd.read(nrecs)])
        elif name == "LU_hyperspectral_upcast":
            upwelling_upcast = np.asarray([i[3:] for i in vd.read(nrecs)])

        vd.detach()

    # Close vgroup
    vg.detach()

    #clean up
    v.end()
    vs.end()
    hdf.close()

    return wavelengths, depth, downwelling_downcast, downwelling_upcast, \
            upwelling_downcast, upwelling_upcast
Ejemplo n.º 14
0
def HDFread1D(filename, variable):
    """
    Read HDF file in vs in simple mode
    """
    # Read the file
    f = HDF(filename)
    # Initialize v mode
    vs = f.vstart()
    # extrect data
    var = vs.attach(variable)
    Var = np.array(var[:]).ravel()
    # Close the file
    var.detach()
    vs.end()
    f.close()

    return Var
Ejemplo n.º 15
0
 def get_train_data(self,
                    index_tuple,
                    band_select=[
                        0, 1, 2, 3, 4, 6, 7, 18, 19, 20, 21, 22, 23, 24, 25,
                        26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37
                    ]):
     '''
     [Varification & Night eliminite]
     :param index_tuple:
     :return:
     '''
     # [band_num, rowsize]
     raw_data = self.get_by_index(index_tuple, verify=False)
     # Verify
     raw_data = raw_data[band_select, ...]
     valid_flag = np.ones([raw_data.shape[1]], dtype=bool)
     for idx in range(raw_data.shape[0]):
         valid_flag = valid_flag & (raw_data[idx, ...] > 32767)
     # Daytime mode
     try:  # open hdf
         f = HDF(self.filename, SDC.READ)
         vs = f.vstart()
         data_info_list = vs.vdatainfo()
         # Vset table
         # L1B swam matedata
         L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
         # Read [Swath/scan type]
         svath_matedata = L1B_Swath_Matedata_VD[:]
         for idx in range(valid_flag.shape[0]):
             if svath_matedata[int(
                 (index_tuple[0][idx]) / 10)][2] == 'D   ':
                 valid_flag[idx] = True
             else:
                 valid_flag[idx] = False
         L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
         vs.end()  # Use with, so __del__ can do this.
         f.close()
     except ValueError:
         print("Unexpected error:", sys.exc_info()[0])
         print('READ ERROR......(%d):' % self.filename)
         return 0, 0, False
     raw_data = self.get_by_index(index_tuple, verify=False, scaled=True)
     raw_data = raw_data[band_select, ...]
     return raw_data, valid_flag
Ejemplo n.º 16
0
def get_swath_metadata(myd1km_filename_str):
    try:  # open hdf
        f = HDF(myd1km_filename_str, SDC.READ)
        vs = f.vstart()
        data_info_list = vs.vdatainfo()
        # Vset table
        # L1B swam matedata
        L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
        # Read [Swath/scan type]
        sd_info = L1B_Swath_Matedata_VD.inquire()
        all_metadata = L1B_Swath_Matedata_VD.read(sd_info[0])
        L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
        vs.end()  # Use with, so __del__ can do this.
        f.close()
        return all_metadata

    except HDF4Error:
        print("Unexpected error:", sys.exc_info()[0])
        print('READ ERROR......:' + myd1km_filename_str)
        return None
Ejemplo n.º 17
0
    def showHDFinfo(self):

        if len(self.dirLst) > 1: drs = self.dirLst[0]
        else: drs = self.dirLst

        hdf = HDF(drs, HC.READ)
        sd = SD(drs)

        vs = hdf.vstart()
        v = hdf.vgstart()

        # Scan all vgroups in the file.
        ref = -1
        while 1:
            try:
                ref = v.getid(ref)
                describevg(ref, v, vs)

            except HDF4Error, msg:  # no more vgroup
                break
Ejemplo n.º 18
0
def read_qsmr_file(filename, species, index2):
    # Open HDF file:
    index2 = int(index2)
    if filename.split('.')[0].endswith('020'):
        l2p_path = L2P_PATH_2_0
    elif filename.split('.')[0].endswith('021'):
        l2p_path = L2P_PATH_2_1
    elif filename.split('.')[0].endswith('023'):
        l2p_path = L2P_PATH_2_3
    elif filename.split('.')[0].endswith('024'):
        l2p_path = L2P_PATH_2_4
    filename = str(os.path.join(*filename.split('-')))
    hdf = HDF.HDF(os.path.join(l2p_path, filename))
    vs = VS.VS(hdf)

    # Attatch and create indexes:
    gloc = vs.attach('Geolocation')
    i_gloc = {x: i for i, x in enumerate(gloc._fields)}
    retr = vs.attach('Retrieval')
    i_retr = {x: i for i, x in enumerate(retr._fields)}
    data = vs.attach('Data')
    i_data = {x: i for i, x in enumerate(data._fields)}

    try:
        # Get the index in the geoloc table associated with the scan:
        index1 = [
            x[i_retr['ID1']] for x in retr[:] if x[i_retr['ID2']] == index2
        ][0]

        # Extract geolocation data to dictionary:
        gloc_dict = {}
        Geolocation = [x for x in gloc[:] if x[i_gloc['ID1']] == index1][0]
        for key in i_gloc:
            gloc_dict[key] = Geolocation[i_gloc[key]]

        # Extract Data to dictionary:
        data_dict = {}
        Data = [x for x in data[:] if x[i_data['ID2']] == index2]
        for key in i_data:
            data_dict[key] = [x[i_data[key]] for x in Data]
    except IndexError:
        gloc_dict = {}
        data_dict = {}

    # Clean up:
    gloc.detach()
    retr.detach()
    data.detach()
    vs.end()
    hdf.close()

    # Return:
    return {'Data': data_dict, "Geolocation": gloc_dict}
Ejemplo n.º 19
0
def read_cloudsatcalipso_hdf_file(file, var):
    # var can be 'CloudFraction' etc

    hdf = SD(file, SDC.READ)
    print(hdf.datasets())

    # read height and actual cloud data
    dset = hdf.select('Height')
    height = dset[:, :]

    dset = hdf.select(var)
    data = np.array(dset[:, :], dtype=float)
    #pprint.pprint( dset.attributes() )

    # process cloud data according to valid range and scale factor
    data_at = dset.attributes(full=1)
    data_sf = data_at["factor"][0]
    data_vmin = data_at["valid_range"][0][0]
    data_vmax = data_at["valid_range"][0][1]
    data[data < data_vmin] = np.nan
    data[data > data_vmax] = np.nan
    data = data / data_sf

    # Close dataset
    dset.endaccess()

    # read geolocation data and time
    h = HDF.HDF(file)
    vs = h.vstart()

    xid = vs.find('Latitude')
    latid = vs.attach(xid)
    latid.setfields('Latitude')
    nrecs, _, _, _, _ = latid.inquire()
    latitude = np.array(latid.read(nRec=nrecs))
    latid.detach()

    lonid = vs.attach(vs.find('Longitude'))
    lonid.setfields('Longitude')
    nrecs, _, _, _, _ = lonid.inquire()
    longitude = np.array(lonid.read(nRec=nrecs))
    lonid.detach()

    timeid = vs.attach(vs.find('Profile_time'))
    timeid.setfields('Profile_time')
    nrecs, _, _, _, _ = timeid.inquire()
    time = timeid.read(nRec=nrecs)
    timeid.detach()

    # Close file
    hdf.end()

    return data, height, longitude, latitude, time
def read_vd_hdf2(file_in,var_in):
    '''Read data stored as a VD variable in input HDF-EOS file.'''
    '''Inputs are the file path and the required variable name.'''
    '''Outputs are the data (numpy array).                     '''

    #--information from input file--
    hdf  = HDF(file_in)      # the HDF file
    vs   = hdf.vstart()      # initialize VS interface on HDF file
    ### vdinfo = vs.vdatainfo() # return info about all vdatas
    vd   = vs.attach(var_in) # open a vdata given its name
    dimsz = vd.inquire()[0]  # return 5 elements: 
                             #   1. # of records
                             #   2. interlace mode
                             #   3. list of vdata field names
                             #   4. size in bytes of the vdata record
                             #   5. name of the vdata
    var_data = np.array(vd.read(dimsz)) #read a number of records
    vd.detach()              # close the vdata
    vs.end()                 # terminate the vdata interface
    hdf.close()              # close the HDF file
    return var_data,dimsz
Ejemplo n.º 21
0
    def look(self, path, mem_list, lp_list):
        data = {}
        for name in mem_list:  # subdata sets type data
            tag = lp_list[name][0]
            ref = lp_list[name][1]
            if tag == HC.DFTAG_NDG:
                sd = SD(path)
                sds = sd.select(sd.reftoindex(ref))
                data[name] = np.float64(sds.get())
                sds.endaccess()
                sd.end()
            elif tag == HC.DFTAG_VH:  #vd type data
                hdf = HDF(path)
                vs = hdf.vstart()
                vd = vs.attach(ref)
                nrecs, intmode, fields, size, name = vd.inquire()
                data[name] = np.full(nrecs, np.float64(vd.read()[0]))
                vs.end()
                hdf.close()

        return data
Ejemplo n.º 22
0
    def read(self, file_info, extra_fields=None, mapping=None):
        """Read and parse HDF4 files and load them to an GroupedArrays.

        Args:
            file_info: Path and name of the file as string or FileInfo object.
            extra_fields: Additional field names that you want to extract from
                this file as a list.
            mapping: A dictionary that maps old field names to new field names.
                If given, *extra_fields* must contain the old field names.

        Returns:
            An GroupedArrays object.
        """

        dataset = GroupedArrays(name="CloudSat")

        # The files are in HDF4 format therefore we cannot use the netCDF4
        # module. This code is taken from
        # http://hdfeos.org/zoo/OTHER/2010128055614_21420_CS_2B-GEOPROF_GRANULE_P_R04_E03.hdf.py
        # and adapted by John Mrziglod. A description about all variables in
        # CloudSat dataset can be found in
        # http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-ice?term=53.

        file = HDF.HDF(file_info.path)

        try:
            vs = file.vstart()

            # Extract the standard fields:
            dataset["time"] = self._get_time_field(vs, file_info)
            dataset["lat"] = self._get_field(vs, "Latitude")
            dataset["lon"] = self._get_field(vs, "Longitude")
            dataset["scnline"] = Array(
                np.arange(dataset["time"].size), dims=["time_id"]
            )
            dataset["scnpos"] = Array(
                [1 for _ in range(dataset["time"].size)], dims=["time_id"]
            )

            # Get the extra fields:
            if extra_fields is not None:
                for field, dimensions in self.parse_fields(extra_fields):
                    data = self._get_field(vs, field)

                    # Add the field data to the dataset.
                    dataset[field] = self.select(data, dimensions)
        except Exception as e:
            raise e
        finally:
            file.close()

        return dataset
Ejemplo n.º 23
0
def get_myd1km_sci_time(myd1km_filename_str):
    try:  # open hdf
        f = HDF(myd1km_filename_str, SDC.READ)
        vs = f.vstart()
        data_info_list = vs.vdatainfo()
        # Vset table
        # L1B swam matedata
        L1B_Swath_Matedata_VD = vs.attach('Level 1B Swath Metadata')
        # Read [Swath/scan type]
        begin = L1B_Swath_Matedata_VD[0]
        begin = begin[4]
        end = L1B_Swath_Matedata_VD[-1]
        end = end[4]
        L1B_Swath_Matedata_VD.detach()  # __del__ with handle this.
        vs.end()  # Use with, so __del__ can do this.
        f.close()
        return begin, end, True

    except HDF4Error:
        print("Unexpected error:", sys.exc_info()[0])
        print('READ ERROR......:' + myd1km_filename_str)
        return 0, 0, False
def L2_VFM_Reading(fpath):
    sd_obj = SD(fpath, SDC.READ)
    Vt_obj = HDF.HDF(fpath).vstart()
    m_data = Vt_obj.attach('metadata').read()[0]
    Height = np.array(m_data[-1])  # 583高度对应实际海拔
    Lats = sd_obj.select('Latitude').get()
    Lons = sd_obj.select('Longitude').get()
    L_route = np.concatenate([Lats.T, Lons.T]).T
    target_rows = []

    for location in L_route:
        distance = LonLat_Distance(location, LZU_LatLon)
        if distance < 50:
            target_rows.append(True)
        else:
            target_rows.append(False)

    VFM_basic = np.array(sd_obj.select('Feature_Classification_Flags').get())
    VFM_basic = VFM_basic % 8
    VFM_1 = np.reshape(VFM_basic[:, 0:165], (VFM_basic.shape[0] * 3, 55))
    VFM_1 = np.repeat(VFM_1, 5, axis=0)
    VFM_2 = np.reshape(VFM_basic[:, 165:1165], (VFM_basic.shape[0] * 5, 200))
    VFM_2 = np.repeat(VFM_2, 3, axis=0)
    VFM_3 = np.reshape(VFM_basic[:, 1165:5515], (VFM_basic.shape[0] * 15, 290))
    VFM = np.concatenate((VFM_1, VFM_2, VFM_3), axis=1)
    target_rows_VFM = np.repeat(target_rows, 15)
    Rd_dic = {}
    Rd_dic['VFM'] = VFM
    Rd_dic_meta = {
        'route': L_route,
        'Lats': Lats,
        'target rows': target_rows,
        'Height': Height,
        'target rows VFM': target_rows_VFM,
    }
    sd_obj.end()
    HDF.HDF(fpath).close()
    return Rd_dic, Rd_dic_meta
Ejemplo n.º 25
0
def getCoordsULLR(fileAbsPath, start, stop):
    # coordinates every 10 pixels
    nodes = range(start, stop, 10)
    hdf = HDF(fileAbsPath)
    vs = hdf.vstart()

    ref = vs.find('Latitude')
    vd = vs.attach(ref)
    nrecs, intmode, fields, size, name = vd.inquire()
    latitude = np.array(vd.read(nrecs))

    ref = vs.find('Longitude')
    vd = vs.attach(ref)
    nrecs, intmode, fields, size, name = vd.inquire()
    longitude = np.array(vd.read(nrecs))

    lat = []
    lon = []
    for n in nodes:
        lat.append(latitude[n][0])
        lon.append(longitude[n][0])

    # Append the last pixel coordinates: it is 9 pixel distant from the
    # previous one, and not 10 px like the others!!!
    lat.append(latitude[stop - 1][0])
    lon.append(longitude[stop - 1][0])

    vs.end()

    coords = []
    for x, y in zip(lat, lon):
        coords.append(x)
        coords.append(y)

    # Nodes are 101: 100 points with distance 10 px
    # and the last one with distance 9 px from the 990th
    return coords, len(nodes) + 1
Ejemplo n.º 26
0
def load_data_from_files(filename):
    if not os.path.exists(filename):
        print("File {} does not exist, cannot load data.".format(filename))
        return
    elif not HDF.ishdf(filename):
        print("File {} is not in hdf4 file format, cannot load data.".format(
            filename))
        return

    f = SD(filename, SDC.READ)
    data_field = None
    for i, d in enumerate(f.datasets()):
        # print("{0}. {1}".format(i+1,d))
        if "NDVI" in d:
            data_field = d

    ndvi_data = f.select(data_field)
    data = np.array(ndvi_data.get())
    return data
Ejemplo n.º 27
0
def test_hdf_type(filename):
        """ This is a simple function to return the type of HDF file that is passed to it"""
	filetype = None

	"""check to see if file is an hdf4 file
	returns 1 if HDF4 file
	returns 0 if not an HDF4 file"""
	hdf4flag = HDF4.ishdf(filename)

	if hdf4flag == 1:
		filetype = 'HDF4'

	
	#check to see if file is hdf5 (also support hdf5-eos)
	# returns >0 if True
	# returns 0 if False
	hdf5flag = HDF5.isHDF5File(filename)
	if hdf5flag > 0:
		filetype = 'HDF5'

	return filetype
Ejemplo n.º 28
0
def vdata(file):
    from pyhdf import HDF
    from pyhdf import VS
    f = HDF.HDF(file)
    vs = f.vstart()                  # init vdata interface

    ## read Gains table
    vd = vs.attach('Gain Information')
    fields = vd._fields
    gains = {}

    while 1:
        try:
            rec = vd.read()[0]
            gains[rec[0]] = float(rec[1])
        except:
            break
    vd.detach()

    ## read Modes table
    vd = vs.attach('Mode Information')      # attach 'INVENTORY' in read mode
    fields = vd._fields

    mode_info = {}
    r=0
    while 1:
        try:
            rec = vd.read()[0]
            cd = {fn:rec[fi] for fi,fn in enumerate(fields)}
            mode_info[r] = cd
            r+=1
        except:
            break
    vd.detach()

    ## close vdata
    vs.end()                  # terminate the vdata interface
    f.close()                 # close the HDF file

    return(gains, mode_info)
Ejemplo n.º 29
0
    def read(self, file_info, fields=None, mapping=None):
        """Read and parse HDF4 files and load them to a xarray.Dataset

        Args:
            file_info: Path and name of the file as string or FileInfo object.
            fields: Field names that you want to extract from this file as a
                list.
            mapping: A dictionary that maps old field names to new field names.
                If given, `fields` must contain the old field names.

        Returns:
            A xarray.Dataset object.
        """

        if fields is None:
            raise NotImplementedError(
                "You have to set field names. Loading the complete file is not"
                " yet implemented!"
            )

        dataset = xr.Dataset()

        # Files in HDF4 format are not very pretty. This code is taken from
        # http://hdfeos.org/zoo/OTHER/2010128055614_21420_CS_2B-GEOPROF_GRANULE_P_R04_E03.hdf.py
        # and adapted by John Mrziglod.

        file = HDF.HDF(file_info.path)

        try:
            vs = file.vstart()

            for field in fields:
                # Add the field data to the dataset.
                dataset[field] = self._get_field(vs, field)
        except Exception as e:
            raise e
        finally:
            file.close()

        return _xarray_rename_fields(dataset, mapping)
Ejemplo n.º 30
0
def HDFread(filename, variable, Class=None):
    """
    Extract the data for non scientific data in V mode of hdf file
    """
    hdf = HDF(filename, HC.READ)

    # Initialize the SD, V and VS interfaces on the file.
    sd = SD(filename)
    vs = hdf.vstart()
    v = hdf.vgstart()

    # Encontrar el puto id de las Geolocation Fields
    if Class == None:
        ref = v.findclass('SWATH Vgroup')
    else:
        ref = v.findclass(Class)

    # Open all data of the class
    vg = v.attach(ref)
    # All fields in the class
    members = vg.tagrefs()

    nrecs = []
    names = []
    for tag, ref in members:
        # Vdata tag
        vd = vs.attach(ref)
        # nrecs, intmode, fields, size, name = vd.inquire()
        nrecs.append(vd.inquire()[0])  # number of records of the Vdata
        names.append(vd.inquire()[-1])  # name of the Vdata
        vd.detach()

    idx = names.index(variable)
    var = vs.attach(members[idx][1])
    V = var.read(nrecs[idx])
    var.detach()
    # Terminate V, VS and SD interfaces.
    v.end()
    vs.end()
    sd.end()
    # Close HDF file.
    hdf.close()

    return np.array(V).ravel()
Ejemplo n.º 31
0
from pyhdf.HDF import *
from pyhdf.VS import *

f = HDF('inventory.hdf',         # Open file 'inventory.hdf' in write mode
            HC.WRITE|HC.CREATE)  # creating it if it does not exist
vs = f.vstart()                  # init vdata interface
vd = vs.attach('INVENTORY', 1)   # attach vdata 'INVENTORY' in write mode

# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list where
# number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 3 done')

# Update record at index 1 (second record)
vd[1]  = ('Z4367', 'surprise', 10, 3.1, 44.5)
# Update record at index 4, and those after
vd[4:] = (
          ('QR231', 'toy', 12, 2.5, 45),
          ('R3389', 'robot', 3, 45, 2000),
          ('R3390', 'robot2', 8, 55, 2050)
         )
vd.detach()               # "close" the vdata
vs.end()                  # terminate the vdata interface
f.close()                 # close the HDF file
Ejemplo n.º 32
0
    vd.detach()


def sdscreate(sd, name):

    # Create a simple 3x3 float array.
    sds = sd.create(name, SDC.FLOAT32, (3, 3))
    # Initialize array
    sds[:] = ((0, 1, 2), (3, 4, 5), (6, 7, 8))
    # "close" dataset.
    sds.endaccess()


# Create HDF file
filename = 'inventory.hdf'
hdf = HDF(filename, HC.WRITE | HC.CREATE)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename, SDC.WRITE)  # SD interface
vs = hdf.vstart()  # vdata interface
v = hdf.vgstart()  # vgroup interface

# Create vdata named 'INVENTORY'.
vdatacreate(vs, 'INVENTORY')
# Create dataset named "ARR_3x3"
sdscreate(sd, 'ARR_3x3')

# Attach the vdata and the dataset.
vd = vs.attach('INVENTORY')
sds = sd.select('ARR_3x3')
Ejemplo n.º 33
0
def mosaic(*arg,**args):
	# This function will take files tranfered in *arg and will mosaic them together and produce a new file
	# mosaic(file1,[file2,file3,...],endfile)
	# If only file1 and endfile is given, the function will only produce a copy without any other modification
	try:
		log=args["log"]
	except KeyError:
		log=logMod.Log("",nolog=True)
	if len(arg)>2:
		lfile = arg[:-1]				# This is the list of the NAME of the files to merge
		newfilename = arg[-1]			# This is the final file NAME
		# Should eventually check if files exists and can be read ***IMPROVE***
		lfHDF = []						# This is the list of the FILES to merge
		latt = []							# This is the list of the ATTRIBUTE "StructMetadata.0" of the files
		for fil in lfile:
			try:
				a=SD(fil,SDC.READ)
			except TypeError:
				a=SD(fil.encode('ascii','ignore'),SDC.READ)
			lfHDF.append(a)
			#print("hoho")
			latt.append(atribute(lfHDF[-1].attributes()["StructMetadata.0"],fil,dsi=[0,]))
			
		
		
		## Listing all the GRIDS that the new file will have
		gridlist = []						# This is the list of GRIDS to include in the final file

		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			gridlist += attOfF.listgridname()[1]		# listgridname return a list of all the grids name

		# remove double entry
		gridlist = list(set(gridlist))


		## Listing all the DATASETS that the new file will have
		dslist = []						# This is the list of DATASETS to include in the final file
		for attOfF in latt:
			# Should check if any grid ***IMPROVE***
			dslist = attOfF.orderedDS()
			
		# remove double entry
		# dslist = list(set(dslist))


		
		## Validation of commoun information
###############################################################################
# Some informations have to be the same for each file or else no mosaic can   #
# be made for exemple two files with not the same projection type can't be    #
# merged together. ***IMPROVE*** Maybe in the future we could transform file  #
# so that they have the same projection or some other thing.                  #
###############################################################################

		# List of parameter to check to insure that they are the same
		paramMustSim = ["Projection","ProjParams","SphereCode"]
		# Dictionary that will keep all the informations about every file
		paramMustSimDict = {}

		for grid in gridlist:
			# Verification of a grid

			first = True			# Variable that will enable the construction of the dict
			paramdict = {}		# Dictionary that keep the actual value that have to be the same
			
			for attOfF in latt:
				# Verification of a file
				bigG = attOfF.getgridbyname(grid)		# Getting all the attributes in the grid of a file
				if bigG is not None:
					# If the grid exists in that file
					if first :
						# If this is the first time that a file is check for that grid
						first = False
						for p in paramMustSim:
							# Checking every parameters that must be the same
							paramdict[p] = bigG.variable[p]
							
						# Validation of same Dtype for each datafield
						go = bigG.GROUP["DataField"].OBJECT
						for r in go:
							paramdict[go[r].variable["DataFieldName"]]=go[r].variable["DataType"]
					else:
						# If it's not the first time that a file is check for that grid
						for p in paramMustSim:
							# Checking every parameters that must be the same
							if not paramdict[p]==bigG.variable[p]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
						# Validation of same Dtype for each datafield
						go=bigG.GROUP["DataField"].OBJECT
						for r in go:
							if not paramdict[go[r].variable["DataFieldName"]]==go[r].variable["DataType"]:
								# Stop merging and return error ***IMPROVE*** 
								# Maybe do only the things that can be done ***IMPROVE***
								log.log('e',Nom,"Error dataset are not compatible")
								
			# Keep all this info for later it's going to be useful
			paramMustSimDict[grid]=paramdict
				
				



		## Determination of new informations
###############################################################################
# Some properties have to be calculated in order to merge. This section is    #
# doing just that                                                             #
###############################################################################

		gridResolX={}			# Getting the RESOLUTION in the X direction for each grid
		gridResolY={}			# Getting the RESOLUTION in the Y direction for each grid
		extremeup={}			# Getting the UPPER coordinates for each grid
		extremedown={}			# Getting the LOWEST coordinates for each grid
		extremeleft={}			# Getting the LEFTMOST coordinates for each grid
		extremeright={}			# Getting the RIGHTMOST coordinates for each grid
		gridDimX={}				# Getting the DIMENSIONS of X direction for each grid
		gridDimY={}				# Getting the DIMENSIONS of Y direction for each grid
		NoValueDS={}			# Getting the fill value of each dataset
		dtypeDS={}				# Getting the DTYPE for each dataset
		dstogrid={}				# Knowing wich is the GRID for each dataset
		filGridULC={}			# Getting the upper left corner of each file for each grid

		for grid in gridlist:
			# For each grid
			filGridULC[grid]={}			# Adding a dictionary for each grid that will contain information on every file
			for attOfF in latt:
				### Determination of resolution of each grid
				# ***IMPROVE*** Should check if bigd is none
				bigG=attOfF.getgridbyname(grid)				# Getting all the attributes in the grid of a file
				
				# Get extreme grid point
				ulp=eval(bigG.variable["UpperLeftPointMtrs"])
				lrp=eval(bigG.variable["LowerRightMtrs"])
				
				# Get grid dimmension
				dimx=int(bigG.variable["XDim"])
				dimy=int(bigG.variable["YDim"])
				
				# Calculate grid resolution
				gridResolX[grid]=(lrp[0]-ulp[0])/dimx
				gridResolY[grid]=(ulp[1]-lrp[1])/dimy
				
				### Determination of new extreme coordinates for each grid
				# up
				try:
					if extremeup[grid]< ulp[1]:
						extremeup[grid]=ulp[1]
				except KeyError:
					extremeup[grid]=ulp[1]
				# down
				try:
					if extremedown[grid]> lrp[1]:
						extremedown[grid]=lrp[1]
				except KeyError:
					extremedown[grid]=lrp[1]
				# left
				try:
					if extremeleft[grid]> ulp[0]:
						extremeleft[grid]=ulp[0]
				except KeyError:
					extremeleft[grid]=ulp[0]
				# right
				try:
					if extremeright[grid]< lrp[0]:
						extremeright[grid]=lrp[0]
				except KeyError:
					extremeright[grid]=lrp[0]
				### Detetermination of dataset to grid name
				if bigG is not None:
					go=bigG.GROUP["DataField"].OBJECT
					for r in go:
						dstogrid[ go[r].variable["DataFieldName"] ] = grid
				## Determination of ULC for each grid in each file
				filGridULC[grid][attOfF.name] = ulp
			## determination of new dimension for each grid
			gridDimY[grid] = int((extremeup[grid]-extremedown[grid])/gridResolY[grid])
			gridDimX[grid] = int((extremeright[grid]-extremeleft[grid])/gridResolX[grid])

		for ds in dslist:
			# For each dataset
			for sd in lfHDF:
				# For each hdf file
				
				# Try opening dataset
				try:
					sds = sd.select(eval(ds))
					# Get fill value
					NoValueDS[ds] = sds.getfillvalue()
					# Get dtype
					dtypeDS[ds] = sds.info()[3]
				except:
					log.log('e',Nom,"no dataset")



		## Start creating new file
###############################################################################
# This is the actual part were stuf appens                                    #
###############################################################################

		# This part is the same for every file in any circumstances
		########## absolute ########################
		
		# Open new file
		try:
			hdf = HDF(newfilename, HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename, SDC.WRITE | SDC.CREATE )
		except TypeError:
			hdf = HDF(newfilename.encode('ascii','ignore'), HC.WRITE  | HC.CREATE  |HC.TRUNC)
			sd  =  SD(newfilename.encode('ascii','ignore'), SDC.WRITE | SDC.CREATE )
		
		v=hdf.vgstart()
		vg={}
		vg1={}
		vg2={}
		
		## rewrite the gridlist
		gridlist = []
		for ds in dslist:
			if dstogrid[ds] not in gridlist:
				gridlist.append(dstogrid[ds])
				
		for grid in gridlist:
			vg[grid]=v.attach(-1,write=1)
			vg[grid]._class="GRID"
			vg[grid]._name=eval(grid)
			vg1[grid]=v.attach(-1,write=1)
			vg2[grid]=v.attach(-1,write=1)
			vg1[grid]._class="GRID Vgroup"
			vg1[grid]._name="Data Fields"
			vg2[grid]._class="GRID Vgroup"
			vg2[grid]._name="Grid Attributes"
			vg[grid].insert(vg1[grid])
			vg[grid].insert(vg2[grid])
		########## absolute ########################


		# Create dataset with the right size
		for ds in dslist:
			theGrid=dstogrid[ds]
			# Get grid name of data set
			sds = sd.create(eval(ds),dtypeDS[ds],(gridDimY[theGrid],gridDimX[theGrid]))
			
			# Set fill value
			fv=NoValueDS[ds]
			try:
				sds.setfillvalue(NoValueDS[ds])
			except OverflowError:
				log.log('e',Nom,"setfillvalue")
				sds.setfillvalue(0)
			## write real data
			for fil in range(len(latt)):
				try:
					# Determine were the data will be writen
					ulc = filGridULC[theGrid][latt[fil].name]
					# Determine the position on the grid
					y = (extremeup[theGrid]-ulc[1])/(extremeup[theGrid]-extremedown[theGrid])
					x = (ulc[0]-extremeleft[theGrid])/(extremeright[theGrid]-extremeleft[theGrid])
					y = int(y*gridDimY[theGrid])
					x = int(x*gridDimX[theGrid])
					# read data from files
					osds = lfHDF[fil].select(eval(ds))
					sh = osds[:].shape
					sds[y:y+sh[0],x:x+sh[1]] = osds[:]
					osds.endaccess()
				except:
					pass
			# Close sds
			vg1[dstogrid[ds]].add(HC.DFTAG_NDG,sds.ref())
			sds.endaccess()


    
		for g in vg1:
			vg1[g].detach()
			vg2[g].detach()
			vg[g].detach()

		# Create attribute table for the file
		attstr="GROUP=GridStructure\n"
		gridcount=1
		for gr in gridlist:
			# Start group grid
			attstr+="\tGROUP=GRID_%i\n"%gridcount
			# Add grid name
			attstr+="\t\tGridName=%s\n"%gr
			# Add dimention
			attstr+="\t\tXDim=%i\n"%gridDimX[gr]
			attstr+="\t\tYDim=%i\n"%gridDimY[gr]
			# Add UpperLeftPointMtrs
			attstr+="\t\tUpperLeftPointMtrs=(%f,%f)\n"%(extremeleft[gr],extremeup[gr])
			# Add lrp
			attstr+="\t\tLowerRightMtrs=(%f,%f)\n"%(extremeright[gr],extremedown[gr])
			# Add projection
			attstr+="\t\tProjection=%s\n"%paramMustSimDict[gr]["Projection"]
			# ProjParams
			attstr+="\t\tProjParams=%s\n"%paramMustSimDict[gr]["ProjParams"]
			# SphereCode
			attstr+="\t\tSphereCode=%s\n"%paramMustSimDict[gr]["SphereCode"]

			
			attstr+="""\t\tGROUP=Dimension
		\t\tEND_GROUP=Dimension
		\t\tGROUP=DataField\n"""

			## Add data sets
			# create list of ds for current grid
			lsdsgr=[]
			dsnum=1
			for ds in dslist:
				if dstogrid[ds] == gr:
					# Add object
					attstr+="\t\t\tOBJECT=DataField_%i\n"%dsnum
					# datafield name
					attstr+="\t\t\t\tDataFieldName=%s\n"%ds
					# datatype
					attstr+="\t\t\t\tDataType=%s\n"%paramMustSimDict[gr][ds]
					# dim
					attstr+='\t\t\t\tDimList=("YDim","XDim")\n'
					attstr+="\t\t\tEND_OBJECT=DataField_%i\n"%dsnum
					dsnum+=1
			attstr+="\t\tEND_GROUP=DataField\n"
			attstr+="""\t\tGROUP=MergedFields
		\t\tEND_GROUP=MergedFields\n"""
			attstr+="\tEND_GROUP=GRID_%i\n"%gridcount
			gridcount+=1
		attstr+="""END_GROUP=GridStructure
		GROUP=PointStructure
		END_GROUP=PointStructure
		END"""
		# adding attribute to new file
		att=sd.attr('StructMetadata.0')
		att.set(SDC.CHAR,attstr)
		sd.end()
		hdf.close()
		
		# This should return something somehow
	elif len(arg)>1:
		afile = arg[0]				# This is the list of the NAME of the files to merge
		newfilename = arg[1]			# This is the final file NAME
		# Create a copy
		from shutil import copyfile
		copyfile(afile,newfilename)
Ejemplo n.º 34
0
      # VS tag
      elif tag == HC.DFTAG_VG:
          vg0 = v.attach(ref)
	  print "  vgroup:", vg0._name, "tag,ref:", tag, ref
	  vg0.detach()

      # Unhandled tag
      else:
          print "unhandled tag,ref",tag,ref
    
    # Close vgroup
    vg.detach()

# Open HDF file in readonly mode.
filename = sys.argv[1]
hdf = HDF(filename)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename)
vs = hdf.vstart()
v  = hdf.vgstart()

# Scan all vgroups in the file.
ref = -1
while 1:
    try:
        ref = v.getid(ref)
    except HDF4Error,msg:    # no more vgroup
        break
    describevg(ref)
Ejemplo n.º 35
0
             ))
    # "close" vdata
    vd.detach()

def sdscreate(sd, name):

    # Create a simple 3x3 float array.
    sds = sd.create(name, SDC.FLOAT32, (3,3))
    # Initialize array
    sds[:] = ((0,1,2),(3,4,5),(6,7,8))
    # "close" dataset.
    sds.endaccess()

# Create HDF file
filename = 'inventory.hdf'
hdf = HDF(filename, HC.WRITE|HC.CREATE)

# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename, SDC.WRITE)  # SD interface
vs = hdf.vstart()             # vdata interface
v  = hdf.vgstart()            # vgroup interface

# Create vdata named 'INVENTORY'.
vdatacreate(vs, 'INVENTORY')
# Create dataset named "ARR_3x3"
sdscreate(sd, 'ARR_3x3')

# Attach the vdata and the dataset.
vd = vs.attach('INVENTORY')
sds = sd.select('ARR_3x3')
def run(FILE_NAME):
    
    # Identify the data field.
    DATAFIELD_NAME = 'Blue Radiance/RDQI'

    hdf = SD(FILE_NAME, SDC.READ)

    # Read dataset.
    data3D = hdf.select(DATAFIELD_NAME)
    data = data3D[:,:,:]

    # Read attributes.
    attrs = data3D.attributes(full=1)
    fva=attrs["_FillValue"]
    _FillValue = fva[0]


    # Read geolocation dataset from another file.
    GEO_FILE_NAME = 'MISR_AM1_AGP_P117_F01_24.hdf'
    GEO_FILE_NAME = os.path.join(os.environ['HDFEOS_ZOO_DIR'], 
                                 GEO_FILE_NAME)

    hdf_geo = SD(GEO_FILE_NAME, SDC.READ)

    # Read geolocation dataset.
    lat3D = hdf_geo.select('GeoLatitude')
    lat = lat3D[:,:,:]

    lon3D = hdf_geo.select('GeoLongitude')
    lon = lon3D[:,:,:]
        

    # Read scale factor attribute.
    f = HDF(FILE_NAME, HC.READ)
    v = f.vgstart()
    vg = v.attach(8)

    # PyHDF cannot read attributes from Vgroup properly.
    # sfa = vg.attr('Scale Factor')
    # scale_factor = sfa.get()

    vg.detach()
    v.end()

    # Set it manually using HDFView.
    scale_factor = 0.047203224152326584



    # We need to shift bits for "RDQI" to get "Blue Band "only. 
    # See the page 84 of "MISR Data Products Specifications (rev. S)".
    # The document is available at [1].
    datas = np.right_shift(data, 2);
    dataf = datas.astype(np.double)

    # Apply the fill value.
    dataf[data == _FillValue] = np.nan

    # Filter out values (> 16376) used for "Flag Data".
    # See Table 1.2 in "Level 1 Radiance Scaling and Conditioning
    # Algorithm  Theoretical Basis" document [2].
    dataf[datas > 16376] = np.nan
    datam = np.ma.masked_array(dataf, mask=np.isnan(dataf))

    # Apply scale facotr.
    datam = scale_factor * datam;

    nblocks = data.shape[0]
    ydimsize = data.shape[1]
    xdimsize = data.shape[2]

    datam = datam.reshape(nblocks*ydimsize, xdimsize)
    lat = lat.reshape(nblocks*ydimsize, xdimsize)
    lon = lon.reshape(nblocks*ydimsize, xdimsize)


    # Set the limit for the plot.
    m = Basemap(projection='cyl', resolution='h',
                llcrnrlat=np.min(lat), urcrnrlat = np.max(lat),
                llcrnrlon=np.min(lon), urcrnrlon = np.max(lon))
    m.drawcoastlines(linewidth=0.5)
    m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
    m.drawmeridians(np.arange(-180., 181., 45.), labels=[0, 0, 0, 1])
    m.pcolormesh(lon, lat, datam, latlon=True)
    cb = m.colorbar()
    cb.set_label(r'$Wm^{-2}sr^{-1}{\mu}m^{-1}$')

    basename = os.path.basename(FILE_NAME)
    plt.title('{0}\n{1}'.format(basename, 'Blue Radiance'))
    fig = plt.gcf()
    # plt.show()
    pngfile = "{0}.py.agp.png".format(basename)
    fig.savefig(pngfile)
Ejemplo n.º 37
0
  def  read_misr_dir(cls, rawdirname, AODdirname, outfile):
    """read_misr_dir(rawdirname, AODdirname, outfile)

    Read in raw MISR data from .hdf files in rawdirname,
    and AOD data from all .hdf files in AODdirname.
    Pickle the result and save it to outfile.
    Note: does NOT update object fields.
    Follow this with a call to readin().
    """

      # Get the meta-information
      #meta = sd.attributes()
        #        for val in ['Origin_block.ulc.x',
        #             'Origin_block.ulc.y',
        #            'Local_mode_site_name']:
        #info[val] = meta[val]

      # Get orbit parameters?

    data = []
    rgbimages = []

    datestr = []
    datestr2 = []
    i = 0

    # Read in the AOD (from local mode) data; this is what we'll analyze
    files = sorted(os.listdir(AODdirname))
    for f in files:
      if fnmatch.fnmatch(f, '*.hdf'):
        print " %d / %d " % (i, len(files)),
        i += 1
      
        filename = AODdirname + f

        # Check that filename exists and is an HDF file
        if HDF.ishdf(filename) != 1:
          print "File %s cannot be found or is not an HDF-4 file." % filename
          continue

        orbit    = int(filename.split('_')[5].split('O')[1])
        thisdate = MISRData.orbit_to_date(orbit)
        print "orbit: %d -> %s " % (orbit, thisdate)
        datestr = datestr + [thisdate]
        
        sd = SD.SD(filename)

        # This is 3 (SOMBlock) x 32 (x) x 128 (y) x 4 (bands)
        dataset  = sd.select('RegBestEstimateSpectralOptDepth')
        dim      = dataset.dimensions()
        # Get all of the data for the green band (band = 1)
        along_track = dim['SOMBlockDim:RegParamsAer'] * dim['XDim:RegParamsAer'] 
        cross_track = dim['YDim:RegParamsAer']
        data_now = dataset.get((0,0,0,1),(dim['SOMBlockDim:RegParamsAer'],
                                          dim['XDim:RegParamsAer'],
                                          dim['YDim:RegParamsAer'],
                                          1)).squeeze()

        # Reshape to concatenate blocks
        nrows    = data_now.shape[0]*data_now.shape[1]
        ncols    = data_now.shape[2]
        data_now = data_now.reshape((nrows, ncols))

        # Set -9999 values to NaN
        naninds = np.equal(data_now, -9999)

        # Visualize this timeslice
        #pylab.imshow(data_now)
        #pylab.title(thisdate)
        #pylab.axis('off')
        #pylab.savefig(filename + '.png')

        # Set -9999 values to NaN
        data_now[naninds] = float('NaN')

        data_now = data_now.reshape((-1, 1))
        #print type(data_now)
        #print data_now.shape
        if data == []:
          data = [data_now]
        else:
          data.append(data_now)

        # Close the file
        sd.end()

        print '.',
        sys.stdout.flush()

    data = np.asarray(data).squeeze().T
    print data.shape
    
    print
    # Data is now n x d, where n = # pixels and d = # timepts
    print 'Read data set with %d pixels, %d time points.' % data.shape
    
    # TODO: Add lat/lon coords here
    latlons = ['Unknown'] * data.shape[0]

    # Read in the raw data (for later visualization)
    files = sorted(os.listdir(rawdirname + 'AN/'))
    print "+++++++++++++"
    print len(files)
    iii = 0
    for f in files:
      if fnmatch.fnmatch(f, '*.hdf'):
        filename = rawdirname + 'AN/' + f
        #print filename
        print " %d / %d " % (iii, len(files)),
        iii += 1

        # Check that filename exists and is an HDF file
        if HDF.ishdf(filename) != 1:
          print "File %s cannot be found or is not an HDF-4 file." % filename
          continue

        # We'll assume there's a one-to-one correspondence
        # with the AOD data.  But print it out anyway as a check.
        orbit    = int(filename.split('_')[6].split('O')[1])
        thisdate = MISRData.orbit_to_date(orbit)
        print "orbit: %d -> %s " % (orbit, thisdate)
        datestr2 = datestr2 + [thisdate]
        
        sd = SD.SD(filename)
        
        
        ##################################################################################################################################################################
        dataset  = sd.select('Green Radiance/RDQI')
        dim      = dataset.dimensions()
        data_g = dataset.get((60,0,0),
                             (4, dim['XDim:GreenBand'], dim['YDim:GreenBand']),
                             (1, 1, 1)
                             ).reshape([2048, 2048])
        
        mountains = np.equal(data_g, 65511)
        padding = np.equal(data_g, 65515)
        hlines = np.equal(data_g, 65523)
        
        data_g[data_g == 65515] = 0 # PADDING

        conv_factor_ds = sd.select('GreenConversionFactor')
        dim         = conv_factor_ds.dimensions()
        conv_factor = conv_factor_ds.get((60,0,0),
                                         (4, dim['XDim:BRF Conversion Factors'], dim['YDim:BRF Conversion Factors']),
                                         (1, 1, 1)
                                         ).reshape((32, 32))
        
        conv_factor[conv_factor < 0] = 0
        
        for x in range(0,data_g.shape[0],64):
          for y in range(0,data_g.shape[1],64):
            converted = np.multiply(data_g[x:x+64,y:y+64],
                                       conv_factor[x/64,y/64])
            data_g[x:x+64,y:y+64] = converted
        
        dataset  = sd.select('Red Radiance/RDQI')
        dim      = dataset.dimensions()
        data_r = dataset.get((60,0,0),
                             (4, dim['XDim:RedBand'], dim['YDim:RedBand']),
                             (1, 1, 1)
                             ).reshape([2048, 2048])
        
        data_r[data_r == 65515] = 0 # PADDING
        
        conv_factor_ds = sd.select('RedConversionFactor')
        dim         = conv_factor_ds.dimensions()
        conv_factor = conv_factor_ds.get((60,0,0),
                                         (4, dim['XDim:BRF Conversion Factors'], dim['YDim:BRF Conversion Factors']),
                                         (1, 1, 1)
                                         ).reshape((32, 32))
        conv_factor[conv_factor < 0] = 0
        
        for x in range(0,data_r.shape[0],64):
          for y in range(0,data_r.shape[1],64):
            converted = np.multiply(data_r[x:x+64,y:y+64],
                                       conv_factor[x/64,y/64])
            data_r[x:x+64,y:y+64] = converted
        
        dataset  = sd.select('Blue Radiance/RDQI')
        dim      = dataset.dimensions()
        data_b = dataset.get((60,0,0),
                             (4, dim['XDim:BlueBand'], dim['YDim:BlueBand']),
                             (1, 1, 1)
                             ).reshape([2048, 2048])
        
        data_b[data_b == 65515] = 0 # PADDING
        
        conv_factor_ds = sd.select('BlueConversionFactor')
        dim         = conv_factor_ds.dimensions()
        conv_factor = conv_factor_ds.get((60,0,0),
                                         (4, dim['XDim:BRF Conversion Factors'], dim['YDim:BRF Conversion Factors']),
                                         (1, 1, 1)
                                         ).reshape((32, 32))
        conv_factor[conv_factor < 0] = 0
        
        for x in range(0,data_b.shape[0],64):
          for y in range(0,data_b.shape[1],64):
            converted = np.multiply(data_b[x:x+64,y:y+64],
                                       conv_factor[x/64,y/64])
            data_b[x:x+64,y:y+64] = converted
        
        im = np.zeros([2048, 2048, 3])
        data_r = data_r / float(data_r.max()) * 256
        data_g = data_g / float(data_g.max()) * 256
        data_b = data_b / float(data_b.max()) * 256

        im[...,0] = data_r
        im[...,1] = data_g
        im[...,2] = data_b
        im = im.astype('uint8')
        
        im[np.equal(im, 0)] = 255
        
        
        im[0:512, 64:, :] = im[0:512, :-64, :]
        im[1024:, :-64, :] = im[1024:, 64:, :]
        im[1536:, :-64, :] = im[1536:, 64:, :]
        
        isnotwhite = np.not_equal(im, 255)
        isnotwhiterows = isnotwhite.sum(1)
        isnotwhitecols = isnotwhite.sum(0)
        goodrows = [i for i in range(im.shape[0]) if isnotwhiterows[i, :].sum() > 0]
        goodcols = [i for i in range(im.shape[1]) if isnotwhitecols[i, :].sum() > 0]
        im = im[goodrows[0]:goodrows[-1], goodcols[0]:goodcols[-1], :]
        
        rgbimages.append(im)

        # Close the file
        sd.end()

        print '.',
        sys.stdout.flush()
    
    outf = open(outfile, 'w')
    print len(datestr)
    
    # Assert that the raw and AOD sequences are corresponding
    for i in range(len(datestr)):
      if datestr[i] != datestr2[i]:
        print "ERROR!  Date sequences do not align."
        print "  detected at index %d: AOD %s, raw %s" % (i, datestr[i], datestr2[i])
    
    pickle.dump((data, rgbimages, along_track, cross_track,
                 latlons, datestr), outf)
    #pickle.dump((data, along_track, cross_track,
    #             latlons, datestr), outf)
    outf.close()