コード例 #1
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)
    start_time = time.time()

    if inps.updateMode:
        print('update mode: ON')
    else:
        print('update mode: OFF')

    # extract metadata
    meta = extract_metadata(inps.unwFile)
    box, meta = read_subset_box(inps.template_file, meta)
    if inps.xstep * inps.ystep > 1:
        meta = attr.update_attribute4multilook(meta,
                                               lks_y=inps.ystep,
                                               lks_x=inps.xstep)

    length = int(meta["LENGTH"])
    width = int(meta["WIDTH"])
    num_pair = int(meta["NUMBER_OF_PAIRS"])

    # prepare output directory
    out_dir = os.path.dirname(inps.outfile[0])
    os.makedirs(out_dir, exist_ok=True)

    ########## output file 1 - ifgramStack
    # define dataset structure for ifgramStack
    dsNameDict = {
        "date": (np.dtype('S8'), (num_pair, 2)),
        "dropIfgram": (np.bool_, (num_pair, )),
        "bperp": (np.float32, (num_pair, )),
        "unwrapPhase": (np.float32, (num_pair, length, width)),
        "coherence": (np.float32, (num_pair, length, width)),
        "connectComponent": (np.int16, (num_pair, length, width)),
    }
    if inps.magFile is not None:
        dsNameDict['magnitude'] = (np.float32, (num_pair, length, width))

    if run_or_skip(inps, dsNameDict, out_file=inps.outfile[0]) == 'run':
        # initiate h5 file with defined structure
        meta['FILE_TYPE'] = 'ifgramStack'
        writefile.layout_hdf5(inps.outfile[0],
                              dsNameDict,
                              meta,
                              compression=inps.compression)

        # write data to h5 file in disk
        write_ifgram_stack(inps.outfile[0],
                           unwStack=inps.unwFile,
                           cohStack=inps.corFile,
                           connCompStack=inps.connCompFile,
                           ampStack=inps.magFile,
                           box=box,
                           xstep=inps.xstep,
                           ystep=inps.ystep)

    ########## output file 2 - geometryGeo
    # define dataset structure for geometry
    dsNameDict = {
        "height": (np.float32, (length, width)),
        "incidenceAngle": (np.float32, (length, width)),
        "slantRangeDistance": (np.float32, (length, width)),
    }
    if inps.azAngleFile is not None:
        dsNameDict["azimuthAngle"] = (np.float32, (length, width))
    if inps.waterMaskFile is not None:
        dsNameDict["waterMask"] = (np.bool_, (length, width))

    if run_or_skip(inps, dsNameDict, out_file=inps.outfile[1]) == 'run':
        # initiate h5 file with defined structure
        meta['FILE_TYPE'] = 'geometry'
        writefile.layout_hdf5(inps.outfile[1],
                              dsNameDict,
                              meta,
                              compression=inps.compression)

        # write data to disk
        write_geometry(inps.outfile[1],
                       demFile=inps.demFile,
                       incAngleFile=inps.incAngleFile,
                       azAngleFile=inps.azAngleFile,
                       waterMaskFile=inps.waterMaskFile,
                       box=box,
                       xstep=inps.xstep,
                       ystep=inps.ystep)

    print('-' * 50)

    # time info
    m, s = divmod(time.time() - start_time, 60)
    print('time used: {:02.0f} mins {:02.1f} secs.'.format(m, s))

    return inps.outfile
コード例 #2
0
    def write2hdf5(self,
                   outputFile='ifgramStack.h5',
                   access_mode='w',
                   box=None,
                   xstep=1,
                   ystep=1,
                   compression=None,
                   extra_metadata=None):
        """Save/write an ifgramStackDict object into an HDF5 file with the structure defined in:

        https://mintpy.readthedocs.io/en/latest/api/data_structure/#ifgramstack

        Parameters: outputFile : str, Name of the HDF5 file for the InSAR stack
                    access_mode : str, access mode of output File, e.g. w, r+
                    box : tuple, subset range in (x0, y0, x1, y1)
                    extra_metadata : dict, extra metadata to be added into output file
        Returns:    outputFile
        """

        self.pairs = sorted([pair for pair in self.pairsDict.keys()])
        self.dsNames = list(self.pairsDict[self.pairs[0]].datasetDict.keys())
        self.dsNames = [i for i in ifgramDatasetNames if i in self.dsNames]
        maxDigit = max([len(i) for i in self.dsNames])
        self.get_size(box=box, xstep=xstep, ystep=ystep)

        self.outputFile = outputFile
        with h5py.File(self.outputFile, access_mode) as f:
            print('create HDF5 file {} with {} mode'.format(
                self.outputFile, access_mode))

            ###############################
            # 3D datasets containing unwrapPhase, magnitude, coherence, connectComponent, wrapPhase, etc.
            for dsName in self.dsNames:
                dsShape = (self.numIfgram, self.length, self.width)
                dsDataType = np.float32
                dsCompression = compression
                if dsName in ['connectComponent']:
                    dsDataType = np.int16
                    dsCompression = 'lzf'

                print(('create dataset /{d:<{w}} of {t:<25} in size of {s}'
                       ' with compression = {c}').format(d=dsName,
                                                         w=maxDigit,
                                                         t=str(dsDataType),
                                                         s=dsShape,
                                                         c=dsCompression))
                ds = f.create_dataset(dsName,
                                      shape=dsShape,
                                      maxshape=(None, dsShape[1], dsShape[2]),
                                      dtype=dsDataType,
                                      chunks=True,
                                      compression=dsCompression)

                prog_bar = ptime.progressBar(maxValue=self.numIfgram)
                for i in range(self.numIfgram):
                    # read
                    ifgramObj = self.pairsDict[self.pairs[i]]
                    data = ifgramObj.read(dsName,
                                          box=box,
                                          xstep=xstep,
                                          ystep=ystep)[0]
                    # write
                    ds[i, :, :] = data
                    prog_bar.update(i + 1,
                                    suffix='{}_{}'.format(
                                        self.pairs[i][0], self.pairs[i][1]))
                prog_bar.close()
                ds.attrs['MODIFICATION_TIME'] = str(time.time())

            ###############################
            # 2D dataset containing reference and secondary dates of all pairs
            dsName = 'date'
            dsDataType = np.string_
            dsShape = (self.numIfgram, 2)
            print('create dataset /{d:<{w}} of {t:<25} in size of {s}'.format(
                d=dsName, w=maxDigit, t=str(dsDataType), s=dsShape))
            data = np.array(self.pairs, dtype=dsDataType)
            f.create_dataset(dsName, data=data)

            ###############################
            # 1D dataset containing perpendicular baseline of all pairs
            dsName = 'bperp'
            dsDataType = np.float32
            dsShape = (self.numIfgram, )
            print('create dataset /{d:<{w}} of {t:<25} in size of {s}'.format(
                d=dsName, w=maxDigit, t=str(dsDataType), s=dsShape))
            # get bperp
            data = np.zeros(self.numIfgram, dtype=dsDataType)
            for i in range(self.numIfgram):
                ifgramObj = self.pairsDict[self.pairs[i]]
                data[i] = ifgramObj.get_perp_baseline(family=self.dsName0)
            # write
            f.create_dataset(dsName, data=data)

            ###############################
            # 1D dataset containing bool value of dropping the interferograms or not
            dsName = 'dropIfgram'
            dsDataType = np.bool_
            dsShape = (self.numIfgram, )
            print('create dataset /{d:<{w}} of {t:<25} in size of {s}'.format(
                d=dsName, w=maxDigit, t=str(dsDataType), s=dsShape))
            data = np.ones(dsShape, dtype=dsDataType)
            f.create_dataset(dsName, data=data)

            ###############################
            # Attributes
            self.get_metadata()
            if extra_metadata:
                self.metadata.update(extra_metadata)
                print('add extra metadata: {}'.format(extra_metadata))

            # update metadata due to subset
            self.metadata = attr.update_attribute4subset(self.metadata, box)
            # update metadata due to multilook
            if xstep * ystep > 1:
                self.metadata = attr.update_attribute4multilook(
                    self.metadata, ystep, xstep)

            self.metadata['FILE_TYPE'] = self.name
            for key, value in self.metadata.items():
                f.attrs[key] = value

        print('Finished writing to {}'.format(self.outputFile))
        return self.outputFile
コード例 #3
0
def multilook_file(infile,
                   lks_y,
                   lks_x,
                   outfile=None,
                   method='average',
                   margin=[0, 0, 0, 0],
                   max_memory=4):
    """ Multilook input file
    Parameters: infile - str, path of input file to be multilooked.
                lks_y  - int, number of looks in y / row direction.
                lks_x  - int, number of looks in x / column direction.
                margin - list of 4 int, number of pixels to be skipped during multilooking.
                         useful for offset product, where the marginal pixels are ignored during
                         cross correlation matching.
                outfile - str, path of output file
    Returns:    outfile - str, path of output file
    """
    lks_y = int(lks_y)
    lks_x = int(lks_x)

    # input file info
    atr = readfile.read_attribute(infile)
    length, width = int(atr['LENGTH']), int(atr['WIDTH'])
    k = atr['FILE_TYPE']
    print('multilooking {} {} file: {}'.format(atr['PROCESSOR'], k, infile))
    print('number of looks in y / azimuth direction: %d' % lks_y)
    print('number of looks in x / range   direction: %d' % lks_x)
    print('multilook method: {}'.format(method))

    # margin --> box
    if margin is not [0, 0, 0, 0]:  # top, bottom, left, right
        box = (margin[2], margin[0], width - margin[3], length - margin[1])
        print(
            'number of pixels to skip in top/bottom/left/right boundaries: {}'.
            format(margin))
    else:
        box = (0, 0, width, length)

    # output file name
    ext = os.path.splitext(infile)[1]
    if not outfile:
        if os.getcwd() == os.path.dirname(os.path.abspath(infile)):
            outfile = os.path.splitext(infile)[0] + '_' + str(
                lks_y) + 'alks_' + str(lks_x) + 'rlks' + ext
        else:
            outfile = os.path.basename(infile)

    # update metadata
    atr = attr.update_attribute4multilook(atr, lks_y, lks_x, box=box)

    if ext in ['.h5', '.he5']:
        writefile.layout_hdf5(outfile, metadata=atr, ref_file=infile)

    # read source data and multilooking
    dsNames = readfile.get_dataset_list(infile)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('multilooking {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(infile)))

        # split in Y/row direction for IO for HDF5 only
        if ext in ['.h5', '.he5']:
            # calc step size with memory usage up to 4 GB
            with h5py.File(infile, 'r') as f:
                ds = f[dsName]
                ds_size = np.prod(ds.shape) * 4
            num_step = int(np.ceil(ds_size * 4 / (max_memory * 1024**3)))
            row_step = int(np.rint(length / num_step / 10) * 10)
            row_step = max(row_step, 10)

        else:
            row_step = box[3] - box[1]

        num_step = int(np.ceil((box[3] - box[1]) / (row_step * lks_y)))
        for i in range(num_step):
            r0 = box[1] + row_step * lks_y * i
            r1 = box[1] + row_step * lks_y * (i + 1)
            r1 = min(r1, box[3])
            # IO box
            box_i = (box[0], r0, box[2], r1)
            box_o = (int((box[0] - box[0]) / lks_x), int(
                (r0 - box[1]) / lks_y), int(
                    (box[2] - box[0]) / lks_x), int((r1 - box[1]) / lks_y))
            print('box: {}'.format(box_o))

            # read / multilook
            if method == 'nearest':
                data = readfile.read(infile,
                                     datasetName=dsName,
                                     box=box_i,
                                     xstep=lks_x,
                                     ystep=lks_y,
                                     print_msg=False)[0]

            else:
                data = readfile.read(infile,
                                     datasetName=dsName,
                                     box=box_i,
                                     print_msg=False)[0]

                data = multilook_data(data, lks_y, lks_x)

            # output block
            if data.ndim == 3:
                block = [
                    0, data.shape[0], box_o[1], box_o[3], box_o[0], box_o[2]
                ]
            else:
                block = [box_o[1], box_o[3], box_o[0], box_o[2]]

            # write
            if ext in ['.h5', '.he5']:
                writefile.write_hdf5_block(outfile,
                                           data=data,
                                           datasetName=dsName,
                                           block=block,
                                           print_msg=False)
            else:
                dsDict[dsName] = data

    # for binary file with 2 bands, always use BIL scheme
    if (len(dsDict.keys()) == 2
            and os.path.splitext(infile)[1] not in ['.h5', '.he5']
            and atr.get('scheme', 'BIL').upper() != 'BIL'):
        print('the input binary file has 2 bands with band interleave as: {}'.
              format(atr['scheme']))
        print(
            'for the output binary file, change the band interleave to BIL as default.'
        )
        atr['scheme'] = 'BIL'

    if ext not in ['.h5', '.he5']:
        writefile.write(dsDict,
                        out_file=outfile,
                        metadata=atr,
                        ref_file=infile)
    return outfile
コード例 #4
0
    def write2hdf5(self,
                   outputFile='geometryRadar.h5',
                   access_mode='w',
                   box=None,
                   xstep=1,
                   ystep=1,
                   compression='lzf',
                   extra_metadata=None):
        """Save/write to HDF5 file with structure defined in:
            https://mintpy.readthedocs.io/en/latest/api/data_structure/#geometry
        """
        if len(self.datasetDict) == 0:
            print(
                'No dataset file path in the object, skip HDF5 file writing.')
            return None

        maxDigit = max([len(i) for i in geometryDatasetNames])
        length, width = self.get_size(box=box, xstep=xstep, ystep=ystep)

        self.outputFile = outputFile
        with h5py.File(self.outputFile, access_mode) as f:
            print('create HDF5 file {} with {} mode'.format(
                self.outputFile, access_mode))

            ###############################
            for dsName in self.dsNames:
                # 3D datasets containing bperp
                if dsName == 'bperp':
                    self.dateList = list(self.datasetDict[dsName].keys())
                    dsDataType = np.float32
                    self.numDate = len(self.dateList)
                    dsShape = (self.numDate, length, width)
                    ds = f.create_dataset(dsName,
                                          shape=dsShape,
                                          maxshape=(None, dsShape[1],
                                                    dsShape[2]),
                                          dtype=dsDataType,
                                          chunks=True,
                                          compression=compression)
                    print(
                        ('create dataset /{d:<{w}} of {t:<25} in size of {s}'
                         ' with compression = {c}').format(d=dsName,
                                                           w=maxDigit,
                                                           t=str(dsDataType),
                                                           s=dsShape,
                                                           c=str(compression)))

                    print(
                        'read coarse grid baseline files and linear interpolate into full resolution ...'
                    )
                    prog_bar = ptime.progressBar(maxValue=self.numDate)
                    for i in range(self.numDate):
                        fname = self.datasetDict[dsName][self.dateList[i]]
                        data = read_isce_bperp_file(fname=fname,
                                                    full_shape=self.get_size(),
                                                    box=box,
                                                    xstep=xstep,
                                                    ystep=ystep)
                        ds[i, :, :] = data
                        prog_bar.update(i + 1, suffix=self.dateList[i])
                    prog_bar.close()

                    # Write 1D dataset date accompnay the 3D bperp
                    dsName = 'date'
                    dsShape = (self.numDate, )
                    dsDataType = np.string_
                    print(('create dataset /{d:<{w}} of {t:<25}'
                           ' in size of {s}').format(d=dsName,
                                                     w=maxDigit,
                                                     t=str(dsDataType),
                                                     s=dsShape))
                    data = np.array(self.dateList, dtype=dsDataType)
                    ds = f.create_dataset(dsName, data=data)

                # 2D datasets containing height, latitude/longitude, range/azimuthCoord, incidenceAngle, shadowMask, etc.
                else:
                    dsDataType = np.float32
                    if dsName.lower().endswith('mask'):
                        dsDataType = np.bool_
                    dsShape = (length, width)
                    print(
                        ('create dataset /{d:<{w}} of {t:<25} in size of {s}'
                         ' with compression = {c}').format(d=dsName,
                                                           w=maxDigit,
                                                           t=str(dsDataType),
                                                           s=dsShape,
                                                           c=str(compression)))

                    # read
                    data = np.array(self.read(family=dsName,
                                              box=box,
                                              xstep=xstep,
                                              ystep=ystep)[0],
                                    dtype=dsDataType)

                    # water body: -1 for water and 0 for land
                    # water mask:  0 for water and 1 for land
                    fname = os.path.basename(self.datasetDict[dsName])
                    if fname.startswith('waterBody') or fname.endswith('.wbd'):
                        data = data > -0.5
                        print((
                            '    input file "{}" is water body (-1/0 for water/land), '
                            'convert to water mask (0/1 for water/land).'.
                            format(fname)))

                    elif dsName == 'height':
                        noDataValueDEM = -32768
                        if np.any(data == noDataValueDEM):
                            data[data == noDataValueDEM] = np.nan
                            print(
                                '    convert no-data value for DEM {} to NaN.'.
                                format(noDataValueDEM))

                    elif dsName == 'rangeCoord' and xstep != 1:
                        print(
                            '    scale value of {:<15} by 1/{} due to multilooking'
                            .format(dsName, xstep))
                        data /= xstep

                    elif dsName == 'azimuthCoord' and ystep != 1:
                        print(
                            '    scale value of {:<15} by 1/{} due to multilooking'
                            .format(dsName, ystep))
                        data /= ystep

                    # write
                    ds = f.create_dataset(dsName,
                                          data=data,
                                          chunks=True,
                                          compression=compression)

            ###############################
            # Generate Dataset if not existed in binary file: incidenceAngle, slantRangeDistance
            for dsName in [
                    i for i in ['incidenceAngle', 'slantRangeDistance']
                    if i not in self.dsNames
            ]:
                # Calculate data
                data = None
                if dsName == 'incidenceAngle':
                    data = self.get_incidence_angle(box=box,
                                                    xstep=xstep,
                                                    ystep=ystep)
                elif dsName == 'slantRangeDistance':
                    data = self.get_slant_range_distance(box=box,
                                                         xstep=xstep,
                                                         ystep=ystep)

                # Write dataset
                if data is not None:
                    dsShape = data.shape
                    dsDataType = np.float32
                    print(
                        ('create dataset /{d:<{w}} of {t:<25} in size of {s}'
                         ' with compression = {c}').format(d=dsName,
                                                           w=maxDigit,
                                                           t=str(dsDataType),
                                                           s=dsShape,
                                                           c=str(compression)))
                    ds = f.create_dataset(dsName,
                                          data=data,
                                          dtype=dsDataType,
                                          chunks=True,
                                          compression=compression)

            ###############################
            # Attributes
            self.get_metadata()
            if extra_metadata:
                self.metadata.update(extra_metadata)
                print('add extra metadata: {}'.format(extra_metadata))

            # update due to subset
            self.metadata = attr.update_attribute4subset(self.metadata, box)
            # update due to multilook
            if xstep * ystep > 1:
                self.metadata = attr.update_attribute4multilook(
                    self.metadata, ystep, xstep)

            self.metadata['FILE_TYPE'] = self.name
            for key, value in self.metadata.items():
                f.attrs[key] = value

        print('Finished writing to {}'.format(self.outputFile))
        return self.outputFile
コード例 #5
0
    def write2hdf5(self,
                   outputFile='slcStack.h5',
                   access_mode='a',
                   box=None,
                   xstep=1,
                   ystep=1,
                   compression=None,
                   extra_metadata=None):
        '''Save/write an slcStackDict object into an HDF5 file with the structure below:

        /                  Root level
        Attributes         Dictionary for metadata
        /date              2D array of string  in size of (m, 2   ) in YYYYMMDD format for reference and secondary date
        /bperp             1D array of float32 in size of (m,     ) in meter.

        Parameters: outputFile : str, Name of the HDF5 file for the SLC stack
                    access_mode : str, access mode of output File, e.g. w, r+
                    box : tuple, subset range in (x0, y0, x1, y1)
                    extra_metadata : dict, extra metadata to be added into output file
        Returns:    outputFile
        '''
        self.outputFile = outputFile
        f = h5py.File(self.outputFile, access_mode)
        print('create HDF5 file {} with {} mode'.format(
            self.outputFile, access_mode))

        # self.pairs = sorted([pair for pair in self.pairsDict.keys()])
        # self.dsNames = list(self.pairsDict[self.pairs[0]].datasetDict.keys())
        self.dates = sorted([date for date in self.pairsDict.keys()])
        self.dsNames = list(self.pairsDict[self.dates[0]].datasetDict.keys())
        self.dsNames = [i for i in slcDatasetNames if i in self.dsNames]
        maxDigit = max([len(i) for i in self.dsNames])
        self.get_size(box=box, xstep=xstep, ystep=ystep)

        self.bperp = np.zeros(self.numSlc)

        ###############################

        # 3D datasets containing slc.
        for dsName in self.dsNames:
            dsShape = (self.numSlc, self.length, self.width)
            dsDataType = dataType
            dsCompression = compression
            if dsName in ['connectComponent']:
                dsDataType = np.int16
                dsCompression = 'lzf'

            print(('create dataset /{d:<{w}} of {t:<25} in size of {s}'
                   ' with compression = {c}').format(d=dsName,
                                                     w=maxDigit,
                                                     t=str(dsDataType),
                                                     s=dsShape,
                                                     c=dsCompression))

            if dsName in f.keys():
                ds = f[dsName]
            else:
                ds = f.create_dataset(dsName,
                                      shape=dsShape,
                                      maxshape=(None, dsShape[1], dsShape[2]),
                                      dtype=dsDataType,
                                      chunks=True,
                                      compression=dsCompression)

                prog_bar = ptime.progressBar(maxValue=self.numSlc)

                for i in range(self.numSlc):
                    slcObj = self.pairsDict[self.dates[i]]
                    # fname, metadata = slcObj.read(dsName, box=box)
                    fname, metadata = slcObj.read(dsName)

                    if not box:
                        box = (0, 0, self.width, self.length)
                    dsSlc = gdal.Open(fname + '.vrt', gdal.GA_ReadOnly)
                    ds[i, :, :] = dsSlc.GetRasterBand(1).ReadAsArray(
                        int(box[0]), int(box[1]), self.width, self.length)

                    self.bperp[i] = slcObj.get_perp_baseline()
                    prog_bar.update(i + 1,
                                    suffix='{}'.format(self.dates[i][0]))

                prog_bar.close()
            ds.attrs['MODIFICATION_TIME'] = str(time.time())

        ###############################
        # 1D dataset containing dates of all images
        dsName = 'date'
        dsDataType = np.string_
        dsShape = (self.numSlc, 1)
        print('create dataset /{d:<{w}} of {t:<25} in size of {s}'.format(
            d=dsName, w=maxDigit, t=str(dsDataType), s=dsShape))
        data = np.array(self.dates, dtype=dsDataType)
        if not dsName in f.keys():
            f.create_dataset(dsName, data=data)

        ###############################
        # 1D dataset containing perpendicular baseline of all pairs
        dsName = 'bperp'
        dsDataType = np.float32
        dsShape = (self.numSlc, )
        print('create dataset /{d:<{w}} of {t:<25} in size of {s}'.format(
            d=dsName, w=maxDigit, t=str(dsDataType), s=dsShape))
        data = np.array(self.bperp, dtype=dsDataType)
        if not dsName in f.keys():
            f.create_dataset(dsName, data=data)

        ###############################
        # Attributes
        self.get_metadata()
        if extra_metadata:
            self.metadata.update(extra_metadata)
            #print('add extra metadata: {}'.format(extra_metadata))
        self.metadata = attr.update_attribute4subset(self.metadata, box)

        # update metadata due to multilook
        if xstep * ystep > 1:
            self.metadata = attr.update_attribute4multilook(
                self.metadata, ystep, xstep)

        self.metadata['FILE_TYPE'] = 'timeseries'  #'slc'
        for key, value in self.metadata.items():
            f.attrs[key] = value

        f.close()
        print('Finished writing to {}'.format(self.outputFile))
        return self.outputFile