Exemplo n.º 1
0
    def get_metadata(self, family=None):
        if not family:
            family = [i for i in self.datasetDict.keys() if i != 'bperp'][0]
        self.file = self.datasetDict[family]
        self.metadata = read_attribute(self.file.split('.xml')[0],
                                       metafile_ext='.rsc')
        #self.metadata = read_attribute(self.file, metafile_ext='.rsc')
        self.length = int(self.metadata['LENGTH'])
        self.width = int(self.metadata['WIDTH'])
        if 'UNIT' in self.metadata.keys():
            self.metadata.pop('UNIT')

        # if self.processor is None:
        #    ext = self.file.split('.')[-1]
        #    if 'PROCESSOR' in self.metadata.keys():
        #        self.processor = self.metadata['PROCESSOR']
        #    elif os.path.exists(self.file+'.xml'):
        #        self.processor = 'isce'
        #    elif os.path.exists(self.file+'.rsc'):
        #        self.processor = 'roipac'
        #    elif os.path.exists(self.file+'.par'):
        #        self.processor = 'gamma'
        #    elif ext == 'grd':
        #        self.processor = 'gmtsar'
        #    #what for DORIS/SNAP
        #    else:
        #        self.processor = 'isce'
        #self.metadata['PROCESSOR'] = self.processor
        return self.metadata
Exemplo n.º 2
0
def check_input_file_info(inps):
    # File Baic Info
    atr = read_attribute(inps.file)
    msg = 'input file is '
    if not inps.file.endswith(('.h5', '.he5')):
        msg += '{} '.format(atr['PROCESSOR'])
    msg += '{} file: {}'.format(atr['FILE_TYPE'], os.path.abspath(inps.file))
    if 'DATA_TYPE' in atr.keys():
        msg += ' in {} format'.format(atr['DATA_TYPE'])

    vprint('run {} in {}'.format(os.path.basename(__file__),
                                 version.description))
    vprint(msg)

    ## size and name
    inps.length = int(atr['LENGTH'])
    inps.width = int(atr['WIDTH'])
    inps.key = atr['FILE_TYPE']
    inps.fileBase = os.path.splitext(os.path.basename(inps.file))[0]
    inps.fileExt = os.path.splitext(inps.file)[1]
    vprint('file size in y/x: {}'.format((inps.length, inps.width)))

    # File dataset List
    inps.sliceList = get_slice_list(inps.file)

    # Read input list of dataset to display
    inps, atr = read_dataset_input(inps)

    return inps, atr
Exemplo n.º 3
0
def prepare_geometry(geom_dir, metadata=dict(), update_mode=True):
    """Prepare and extract metadata from geometry files"""
    print('prepare .rsc file for geometry files')
    # grab all existed files

    isce_files = ['hgt', 'lat', 'lon', 'los', 'shadowMask', 'incLocal']
    isce_files = [
        os.path.join(os.path.abspath(geom_dir), x + '.rdr.full.xml')
        for x in isce_files
    ]
    # isce_files = [os.path.join(os.path.abspath(geom_dir), '{}.rdr.full.xml'.format(i))
    #              for i in ['hgt', 'lat', 'lon', 'los', 'shadowMask', 'incLocal']]
    if not os.path.exists(isce_files[0]):
        isce_files = ['hgt', 'lat', 'lon', 'los', 'shadowMask', 'incLocal']
        isce_files = [
            os.path.join(os.path.abspath(geom_dir), x + '.rdr.xml')
            for x in isce_files
        ]

    isce_files = [i for i in isce_files if os.path.isfile(i)]
    # write rsc file for each file
    for isce_file in isce_files:
        # prepare metadata for current file
        geom_metadata = read_attribute(isce_file.split('.xml')[0],
                                       metafile_ext='.xml')
        geom_metadata.update(metadata)
        # write .rsc file
        rsc_file = isce_file.split('.xml')[0] + '.rsc'
        writefile.write_roipac_rsc(geom_metadata,
                                   rsc_file,
                                   update_mode=update_mode,
                                   print_msg=True)
    return metadata
Exemplo n.º 4
0
    def get_metadata(self, family='slc'):
        self.file = self.datasetDict[family].split('.xml')[0]
        self.metadata = read_attribute(self.file, metafile_ext='.rsc')
        self.length = int(self.metadata['LENGTH'])
        self.width = int(self.metadata['WIDTH'])

        # if self.processor is None:
        #    ext = self.file.split('.')[-1]
        #    if 'PROCESSOR' in self.metadata.keys():
        #        self.processor = self.metadata['PROCESSOR']
        #    elif os.path.exists(self.file+'.xml'):
        #        self.processor = 'isce'
        #    elif os.path.exists(self.file+'.rsc'):
        #        self.processor = 'roipac'
        #    elif os.path.exists(self.file+'.par'):
        #        self.processor = 'gamma'
        #    elif ext == 'grd':
        #        self.processor = 'gmtsar'
        #    #what for DORIS/SNAP
        #    else:
        #        self.processor = 'isce'
        #self.metadata['PROCESSOR'] = self.processor

        if self.track:
            self.metadata['TRACK'] = self.track

        if self.platform:
            self.metadata['PLATFORM'] = self.platform

        return self.metadata
Exemplo n.º 5
0
 def get_perp_baseline(self, family='slc'):
     self.file = self.datasetDict[family].split('.xml')[0]
     metadata = read_attribute(self.file, metafile_ext='.rsc')
     self.bperp_top = float(metadata['P_BASELINE_TOP_HDR'])
     self.bperp_bottom = float(metadata['P_BASELINE_BOTTOM_HDR'])
     self.bperp = (self.bperp_top + self.bperp_bottom) / 2.0
     return self.bperp
Exemplo n.º 6
0
    def read(self, family, box=None, datasetName=None):
        fname = self.datasetDict[family].split('.xml')[0]

        # metadata
        dsname4atr = None  # used to determine UNIT
        if isinstance(datasetName, list):
            dsname4atr = datasetName[0].split('-')[0]
        elif isinstance(datasetName, str):
            dsname4atr = datasetName.split('-')[0]
        atr = read_attribute(fname,
                             datasetName=dsname4atr,
                             metafile_ext='.rsc')

        # Read Data
        fext = os.path.splitext(os.path.basename(fname))[1].lower()
        if fext in ['.h5', '.he5']:
            # box
            length, width = int(atr['LENGTH']), int(atr['WIDTH'])
            if not box:
                box = (0, 0, width, length)
            data = readfile.read_hdf5_file(fname,
                                           datasetName=datasetName,
                                           box=box)
            return data
        else:
            # data, metadata = read_binary_file(fname, datasetName=datasetName, box=box)
            metadata = read_binary_file(fname, datasetName=datasetName)
            return fname, metadata
Exemplo n.º 7
0
 def get_dataset_data_type(self, dsName):
     slcObj = [v for v in self.pairsDict.values()][0]
     dsFile = slcObj.datasetDict[dsName]
     metadata = read_attribute(dsFile.split('.xml')[0], metafile_ext='.rsc')
     dsDataType = dataType
     if 'DATA_TYPE' in metadata.keys():
         dsDataType = dataTypeDict[metadata['DATA_TYPE'].lower()]
     return dsDataType
Exemplo n.º 8
0
    def get_size(self, family=None, box=None, xstep=1, ystep=1):
        if not family:
            family = [i for i in self.datasetDict.keys() if i != 'bperp'][0]
        self.file = self.datasetDict[family]
        metadata = read_attribute(self.file.split('.xml')[0],
                                  metafile_ext='.rsc')
        #metadata = read_attribute(self.file, metafile_ext='.rsc')
        # update due to subset
        if box:
            length = box[3] - box[1]
            width = box[2] - box[0]
        else:
            length = int(metadata['LENGTH'])
            width = int(metadata['WIDTH'])

        # update due to multilook
        length = length // ystep
        width = width // xstep
        return length, width
Exemplo n.º 9
0
    def __init__(self,
                 name='geometry',
                 processor=None,
                 datasetDict={},
                 extraMetadata=None):
        self.name = name
        self.processor = processor
        self.datasetDict = datasetDict
        self.extraMetadata = extraMetadata

        # get extra metadata from geometry file if possible
        self.dsNames = list(self.datasetDict.keys())
        if not self.extraMetadata:
            dsFile = self.datasetDict[self.dsNames[0]]
            metadata = read_attribute(dsFile.split('.xml')[0],
                                      metafile_ext='.rsc')
            #metadata = read_attribute(dsFile, metafile_ext='.rsc')
            if all(i in metadata.keys()
                   for i in ['STARTING_RANGE', 'RANGE_PIXEL_SIZE']):
                self.extraMetadata = metadata
Exemplo n.º 10
0
def prepare_stack(inputDir,
                  filePattern,
                  metadata=dict(),
                  baseline_dict=dict(),
                  update_mode=True):
    print('prepare .rsc file for ', filePattern)
    if not os.path.exists(
            glob.glob(
                os.path.join(os.path.abspath(inputDir), '*',
                             filePattern + '.xml'))[0]):
        filePattern = filePattern.split('.full')[0]
    isce_files = sorted(
        glob.glob(
            os.path.join(os.path.abspath(inputDir), '*',
                         filePattern + '.xml')))
    if len(isce_files) == 0:
        raise FileNotFoundError(
            'no file found in pattern: {}'.format(filePattern))
    slc_dates = np.sort(os.listdir(inputDir))
    # write .rsc file for each interferogram file
    num_file = len(isce_files)
    prog_bar = ptime.progressBar(maxValue=num_file)
    for i in range(num_file):
        isce_file = isce_files[i].split('.xml')[0]
        # prepare metadata for current file
        slc_metadata = read_attribute(isce_file, metafile_ext='.xml')
        slc_metadata.update(metadata)
        dates = [slc_dates[0], os.path.basename(os.path.dirname(isce_file))]
        slc_metadata = add_slc_metadata(slc_metadata, dates, baseline_dict)

        # write .rsc file
        rsc_file = isce_file + '.rsc'
        writefile.write_roipac_rsc(slc_metadata,
                                   rsc_file,
                                   update_mode=update_mode,
                                   print_msg=False)
        prog_bar.update(i + 1, suffix='{}_{}'.format(dates[0], dates[1]))
    prog_bar.close()
    return
Exemplo n.º 11
0
def read_inps_dict2geometry_dict_object(inpsDict):
    # eliminate dsName by processor
    if not 'processor' in inpsDict and 'PROCESSOR'in inpsDict:
        inpsDict['processor'] = inpsDict['PROCESSOR']
    if inpsDict['processor'] in ['isce', 'doris']:
        datasetName2templateKey.pop('azimuthCoord')
        datasetName2templateKey.pop('rangeCoord')
    elif inpsDict['processor'] in ['roipac', 'gamma']:
        datasetName2templateKey.pop('latitude')
        datasetName2templateKey.pop('longitude')
    elif inpsDict['processor'] in ['snap']:
        # check again when there is a SNAP product in radar coordiantes
        pass
    else:
        print('Un-recognized InSAR processor: {}'.format(inpsDict['processor']))

    # inpsDict --> dsPathDict
    print('-' * 50)
    print('searching geometry files info')
    print('input data files:')

    maxDigit = max([len(i) for i in list(datasetName2templateKey.keys())])
    dsPathDict = {}
    for dsName in [i for i in geometryDatasetNames
                   if i in datasetName2templateKey.keys()]:
        key = datasetName2templateKey[dsName]
        if key in inpsDict.keys():
            files = sorted(glob.glob(str(inpsDict[key]) + '.xml'))
            files = [item.split('.xml')[0] for item in files]
            if len(files) > 0:
                if dsName == 'bperp':
                    bperpDict = {}
                    for file in files:
                        date = ptime.yyyymmdd(os.path.basename(os.path.dirname(file)))
                        bperpDict[date] = file
                    dsPathDict[dsName] = bperpDict
                    print('{:<{width}}: {path}'.format(dsName,
                                                       width=maxDigit,
                                                       path=inpsDict[key]))
                    print('number of bperp files: {}'.format(len(list(bperpDict.keys()))))
                else:
                    dsPathDict[dsName] = files[0]
                    print('{:<{width}}: {path}'.format(dsName,
                                                       width=maxDigit,
                                                       path=files[0]))

    # Check required dataset
    dsName0 = geometryDatasetNames[0]
    if dsName0 not in dsPathDict.keys():
        print('WARNING: No reqired {} data files found!'.format(dsName0))

    # metadata
    slcRadarMetadata = None
    slcKey = datasetName2templateKey['slc']
    if slcKey in inpsDict.keys():
        slcFiles = glob.glob(str(inpsDict[slcKey]))
        if len(slcFiles) > 0:
            atr = readfile.read_attribute(slcFiles[0])
            if 'Y_FIRST' not in atr.keys():
                slcRadarMetadata = atr.copy()

    # dsPathDict --> dsGeoPathDict + dsRadarPathDict
    dsNameList = list(dsPathDict.keys())
    dsGeoPathDict = {}
    dsRadarPathDict = {}
    for dsName in dsNameList:
        if dsName == 'bperp':
            atr = readfile.read_attribute(next(iter(dsPathDict[dsName].values())))
        else:
            atr = mut.read_attribute(dsPathDict[dsName].split('.xml')[0], metafile_ext='.xml')
        if 'Y_FIRST' in atr.keys():
            dsGeoPathDict[dsName] = dsPathDict[dsName]
        else:
            dsRadarPathDict[dsName] = dsPathDict[dsName]

    geomRadarObj = None
    geomGeoObj = None

    if len(dsRadarPathDict) > 0:
        geomRadarObj = geometryDict(processor=inpsDict['processor'],
                                    datasetDict=dsRadarPathDict,
                                    extraMetadata=slcRadarMetadata)
    if len(dsGeoPathDict) > 0:
        geomGeoObj = geometryDict(processor=inpsDict['processor'],
                                  datasetDict=dsGeoPathDict,
                                  extraMetadata=None)
    return geomRadarObj, geomGeoObj
Exemplo n.º 12
0
def prepare4multi_subplots(inps, metadata):
    """Prepare for multiple subplots:
    1) check multilook to save memory
    2) read existed reference pixel info for unwrapPhase
    3) read dropIfgram info
    4) read and prepare DEM for background
    """
    inps.dsetFamilyList = sorted(list(set(i.split('-')[0] for i in inps.dset)))

    # Update multilook parameters with new num and col number
    if inps.multilook and inps.multilook_num == 1:
        # Do not auto multilook mask and lookup table file
        auto_multilook = True
        for dsFamily in inps.dsetFamilyList:
            if any(i in dsFamily.lower() for i in ['mask', 'coord']):
                auto_multilook = False
        if auto_multilook:
            inps.multilook, inps.multilook_num = check_multilook_input(
                inps.pix_box, inps.fig_row_num, inps.fig_col_num)
        if inps.msk is not None:
            inps.msk = multilook_data(inps.msk, inps.multilook_num,
                                      inps.multilook_num)

    # Reference pixel for timeseries and ifgramStack
    # metadata = read_attribute(inps.file)
    inps.file_ref_yx = None
    if inps.key in ['ifgramStack'] and 'REF_Y' in metadata.keys():
        ref_y, ref_x = int(metadata['REF_Y']), int(metadata['REF_X'])
        length, width = int(metadata['LENGTH']), int(metadata['WIDTH'])
        if 0 <= ref_y < length and 0 <= ref_x < width:
            inps.file_ref_yx = [ref_y, ref_x]
            vprint('consider reference pixel in y/x: {}'.format(
                inps.file_ref_yx))

    if inps.dsetNum > 10:
        inps.ref_marker_size /= 10.
    elif inps.dsetNum > 100:
        inps.ref_marker_size /= 20.
        # inps.disp_ref_pixel = False
        # vprint('turn off reference pixel plot for more than 10 datasets to display')

    # Check dropped interferograms
    inps.dropDatasetList = []
    if inps.key == 'ifgramStack' and inps.disp_title:
        obj = ifgramStack(inps.file)
        obj.open(print_msg=False)
        dropDate12List = obj.get_drop_date12_list()
        for i in inps.dsetFamilyList:
            inps.dropDatasetList += [
                '{}-{}'.format(i, j) for j in dropDate12List
            ]
        vprint(
            "mark interferograms with 'dropIfgram=False' in red colored title")

    # Read DEM
    if inps.dem_file:
        dem_metadata = read_attribute(inps.dem_file)
        if all(dem_metadata[i] == metadata[i] for i in ['LENGTH', 'WIDTH']):
            vprint('reading DEM: {} ... '.format(
                os.path.basename(inps.dem_file)))
            dem = read(inps.dem_file,
                       datasetName='height',
                       box=inps.pix_box,
                       print_msg=False)[0]
            if inps.multilook:
                dem = multilook_data(dem, inps.multilook_num,
                                     inps.multilook_num)
            (inps.dem_shade, inps.dem_contour,
             inps.dem_contour_seq) = pp.prepare_dem_background(
                 dem=dem, inps=inps, print_msg=inps.print_msg)
        else:
            inps.dem_file = None
            inps.transparency = 1.0
            vprint(
                'Input DEM file has different size than data file, ignore it.')
    return inps
Exemplo n.º 13
0
def read_dataset_input(inps):
    """Check input / exclude / reference dataset input with file dataset list"""
    # read inps.dset + inps.dsetNumList --> inps.dsetNumList

    if len(inps.dset) > 0 or len(inps.dsetNumList) > 0:
        if inps.key == 'velocity':
            inps.globSearch = False
            vprint('turning glob search OFF for {} file'.format(inps.key))
        inps.dsetNumList = check_dataset_input(inps.sliceList, inps.dset,
                                               inps.dsetNumList,
                                               inps.globSearch)[1]

    else:
        # default dataset to display for certain type of files
        if inps.key == 'geometry':
            inps.dset = geometryDatasetNames
            inps.dset.remove('bperp')
        elif inps.key == 'slc':
            inps.dset = ['slc']
        elif inps.key == 'ifgramStack':
            inps.dset = ['unwrapPhase']
        elif inps.key == 'HDFEOS':
            inps.dset = ['displacement']
        elif inps.key == 'giantTimeseries':
            inps.dset = 'recons'
        elif inps.key == 'giantIfgramStack':
            obj = giantIfgramStack(inps.file)
            obj.open(print_msg=False)
            inps.dset = [obj.sliceList[0].split('-')[0]]
        else:
            inps.dset = inps.sliceList
        inps.dsetNumList = check_dataset_input(inps.sliceList, inps.dset,
                                               inps.dsetNumList,
                                               inps.globSearch)[1]

    # read inps.exDsetList
    inps.exDsetList, inps.exDsetNumList = check_dataset_input(
        inps.sliceList, inps.exDsetList, [], inps.globSearch)

    # get inps.dset
    inps.dsetNumList = sorted(
        list(set(inps.dsetNumList) - set(inps.exDsetNumList)))
    inps.dset = [inps.sliceList[i] for i in inps.dsetNumList]
    inps.dsetNum = len(inps.dset)

    if inps.ref_date:
        if inps.key not in timeseriesKeyNames:
            inps.ref_date = None
        ref_date = check_dataset_input(inps.sliceList, [inps.ref_date], [],
                                       inps.globSearch)[0][0]
        if not ref_date:
            vprint(
                'WARNING: input reference date is not included in input file!')
            vprint('input reference date: ' + inps.ref_date)
            inps.ref_date = None
        else:
            inps.ref_date = ref_date

    if inps.key in ['ifgramStack']:
        vprint('num of datasets in file {}: {}'.format(
            os.path.basename(inps.file), len(inps.sliceList)))
        vprint('num of datasets to exclude: {}'.format(len(inps.exDsetList)))
        vprint('num of datasets to display: {}'.format(len(inps.dset)))
    else:
        vprint('num of datasets in file {}: {}'.format(
            os.path.basename(inps.file), len(inps.sliceList)))
        vprint('datasets to exclude ({}):\n{}'.format(len(inps.exDsetList),
                                                      inps.exDsetList))
        vprint('datasets to display ({}):\n{}'.format(len(inps.dset),
                                                      inps.dset))
    if inps.ref_date and inps.key in timeseriesKeyNames:
        vprint('input reference date: {}'.format(inps.ref_date))

    if inps.dsetNum == 0:
        msg = 'No input dataset found!'
        msg += '\navailable datasets:\n{}'.format(inps.sliceList)
        raise Exception(msg)

    atr = read_attribute(inps.file, datasetName=inps.dset[0].split('-')[0])
    return inps, atr
Exemplo n.º 14
0
 def get_size(self, family='slc'):
     self.file = self.datasetDict[family].split('.xml')[0]
     metadata = read_attribute(self.file, metafile_ext='.rsc')
     self.length = int(metadata['LENGTH'])
     self.width = int(metadata['WIDTH'])
     return self.length, self.width