示例#1
0
def multilook_file(infile, lks_y, lks_x, outfile=None):
    lks_y = int(lks_y)
    lks_x = int(lks_x)

    # input file info
    atr = readfile.read_attribute(infile)
    k = atr['FILE_TYPE']
    print('multilooking {} {} file: {}'.format(atr['PROCESSOR'], k, infile))
    print('number of looks in y / azimuth direction: %d' % lks_y)
    print('number of looks in x / range   direction: %d' % lks_x)

    # output file name
    if not outfile:
        if os.getcwd() == os.path.dirname(os.path.abspath(infile)):
            ext = os.path.splitext(infile)[1]
            outfile = os.path.splitext(infile)[0]+'_'+str(lks_y)+'alks_'+str(lks_x)+'rlks'+ext
        else:
            outfile = os.path.basename(infile)
    #print('writing >>> '+outfile)

    # read source data and multilooking
    dsNames = readfile.get_dataset_list(infile)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('multilooking {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(infile)))
        data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]
        data = multilook_data(data, lks_y, lks_x)
        dsDict[dsName] = data
    atr = multilook_attribute(atr, lks_y, lks_x)
    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=infile)
    return outfile
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    # read timeseries data
    obj = timeseries(inps.timeseries_file)
    obj.open()
    ts_data = obj.read()
    inps.date_list = list(obj.dateList)

    # read topographic data (DEM)
    dem = read_topographic_data(inps.geom_file, obj.metadata)

    # estimate phase/elevation ratio parameters
    X = estimate_phase_elevation_ratio(dem, ts_data, inps)

    # correct trop delay in timeseries
    trop_data = estimate_tropospheric_delay(dem, X, obj.metadata)
    mask = ts_data == 0.
    ts_data -= trop_data
    ts_data[mask] = 0.

    # write time-series file
    metadata = dict(obj.metadata)
    metadata['mintpy.troposphericDelay.polyOrder'] = str(inps.poly_order)
    if not inps.outfile:
        inps.outfile = '{}_tropHgt.h5'.format(
            os.path.splitext(inps.timeseries_file)[0])
    writefile.write(ts_data,
                    out_file=inps.outfile,
                    metadata=metadata,
                    ref_file=inps.timeseries_file)
    return inps.outfile
示例#3
0
def write_to_one_file(outfile,
                      dH,
                      dV,
                      atr,
                      dLOS_list,
                      atr_list,
                      ref_file=None):
    """Write all datasets into one HDF5 file"""

    print('write all datasets into {}'.format(outfile))
    dsDict = {}
    for i in range(len(atr_list)):
        # auto dataset name
        atr_i = atr_list[i]
        dsName = sensor.project_name2sensor_name(atr_i['PROJECT_NAME'])[0]
        if atr['ORBIT_DIRECTION'].lower().startswith('asc'):
            dsName += 'A'
        else:
            dsName += 'D'
        if 'trackNumber' in atr_i.keys():
            dsName += 'T{}'.format(atr_i['trackNumber'])
        dsName += '_{}'.format(atr_i['DATE12'])

        dsDict[dsName] = dLOS_list[i]
    dsDict['vertical'] = dV
    dsDict['horizontal'] = dH

    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=ref_file)
    return outfile
示例#4
0
def write_to_one_file(outfile, dH, dV, atr, dLOS, atr_list, ref_file=None):
    """Write all datasets into one HDF5 file"""
    from mintpy.objects import sensor

    print('write all datasets into {}'.format(outfile))
    length, width = dH.shape
    dsDict = {}
    for i in range(len(atr_list)):
        # auto dataset name
        atr = atr_list[i]
        dsName = sensor.project_name2sensor_name(atr['FILE_PATH'])[0]
        if atr['ORBIT_DIRECTION'].lower().startswith('asc'):
            dsName += 'A'
        else:
            dsName += 'D'
        if 'trackNumber' in atr.keys():
            dsName += 'T{}'.format(atr['trackNumber'])
        dsName += '_{}'.format(atr['DATE12'])

        dsDict[dsName] = dLOS[i,:].reshape(length, width)
    dsDict['vertical'] = dV
    dsDict['horizontal'] = dH

    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=ref_file)
    return outfile
示例#5
0
def mask_file(fname, mask_file, out_file, inps=None):
    """ Mask input fname with mask_file
    Inputs:
        fname/mask_file - string, 
        inps_dict - dictionary including the following options:
                    subset_x/y - list of 2 ints, subset in x/y direction
                    threshold - float, threshold/minValue to generate mask
    Output:
        out_file - string
    """
    if not inps:
        inps = cmd_line_parse()

    # read mask_file
    mask = readfile.read(mask_file)[0]
    mask = update_mask_with_inps(mask, inps)

    # masking input file
    atr = readfile.read_attribute(fname)
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = {}
    for dsName in dsNames:
        print('masking {d:<{w}} from {f} ...'.format(d=dsName, w=maxDigit, f=fname))
        data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
        data = mask_matrix(data, mask, fill_value=inps.fill_value)
        dsDict[dsName] = data

    # default output filename
    if not out_file:
        fbase, fext = os.path.splitext(fname)
        out_file = '{}_msk{}'.format(fbase, fext)

    writefile.write(dsDict, out_file=out_file, ref_file=fname)
    return out_file
示例#6
0
def add_file(fnames, out_file=None):
    """Generate sum of all input files
    Parameters: fnames : list of str, path/name of input files to be added
                out_file : str, optional, path/name of output file
    Returns:    out_file : str, path/name of output file
    Example:    'mask_all.h5' = add_file(['mask_1.h5','mask_2.h5','mask_3.h5'], 'mask_all.h5')
    """
    # Default output file name
    ext = os.path.splitext(fnames[0])[1]
    if not out_file:
        out_file = os.path.splitext(fnames[0])[0]
        for i in range(1, len(fnames)):
            out_file += '_plus_' + os.path.splitext(os.path.basename(fnames[i]))[0]
        out_file += ext

    atr = readfile.read_attribute(fnames[0])
    dsNames = readfile.get_dataset_list(fnames[0])
    dsDict = {}
    for dsName in dsNames:
        print('adding {} ...'.format(dsName))
        data = readfile.read(fnames[0], datasetName=dsName)[0]
        for i in range(1, len(fnames)):
            d = readfile.read(fnames[i], datasetName=dsName)[0]
            data = add_matrix(data, d)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=out_file, metadata=atr, ref_file=fnames[0])
    return out_file
示例#7
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    # Calculate look angle
    atr = readfile.read_attribute(inps.file)
    dem = None
    if inps.dem_file:
        dem = readfile.read(inps.dem_file, datasetName='height')[0]
    angle = ut.incidence_angle(atr, dem=dem, dimension=2)

    # Geo coord
    if 'Y_FIRST' in atr.keys():
        print(
            'Input file is geocoded, only center incident angle is calculated: '
        )
        print(angle)
        length = int(atr['LENGTH'])
        width = int(atr['WIDTH'])
        angle_mat = np.zeros((length, width), np.float32)
        angle_mat[:] = angle
        angle = angle_mat

    atr['FILE_TYPE'] = 'mask'
    atr['UNIT'] = 'degree'
    if 'REF_DATE' in atr.keys():
        atr.pop('REF_DATE')

    if not inps.outfile:
        inps.outfile = 'incidenceAngle.h5'
    writefile.write(angle, out_file=inps.outfile, metadata=atr)
    return inps.outfile
示例#8
0
    def run_network_modification(self, step_name):
        """Modify network of interferograms before the network inversion."""
        # check the existence of ifgramStack.h5
        stack_file, geom_file = ut.check_loaded_dataset(self.workDir, print_msg=False)[1:3]
        coh_txt = '{}_coherence_spatialAvg.txt'.format(os.path.splitext(os.path.basename(stack_file))[0])
        try:
            net_fig = [i for i in ['Network.pdf', 'pic/Network.pdf'] if os.path.isfile(i)][0]
        except:
            net_fig = None

        # 1) output waterMask.h5 to simplify the detection/use of waterMask
        water_mask_file = 'waterMask.h5'
        if 'waterMask' in readfile.get_dataset_list(geom_file):
            print('generate {} from {} for conveniency'.format(water_mask_file, geom_file))
            if ut.run_or_skip(out_file=water_mask_file, in_file=geom_file) == 'run':
                water_mask, atr = readfile.read(geom_file, datasetName='waterMask')
                atr['FILE_TYPE'] = 'waterMask'
                writefile.write(water_mask, out_file=water_mask_file, metadata=atr)

        # 2) modify network
        scp_args = '{} -t {}'.format(stack_file, self.templateFile)
        print('modify_network.py', scp_args)
        mintpy.modify_network.main(scp_args.split())

        # 3) plot network
        scp_args = '{} -t {} --nodisplay'.format(stack_file, self.templateFile)
        print('\nplot_network.py', scp_args)
        if ut.run_or_skip(out_file=net_fig,
                          in_file=[stack_file, coh_txt, self.templateFile],
                          check_readable=False) == 'run':
            mintpy.plot_network.main(scp_args.split())

        # 4) aux files: maskConnComp and avgSpatialCoh
        self.generate_ifgram_aux_file()
        return
示例#9
0
def prepare_ps_mask(outfile, infile, metadata, box=None):
    print('-' * 50)
    print('preparing PS mask file: {}'.format(outfile))

    # copy metadata to meta
    meta = {key: value for key, value in metadata.items()}
    meta["FILE_TYPE"] = "mask"
    meta["UNIT"] = "1"

    # size info
    if not box:
        box = (0, 0, int(meta['WIDTH']), int(meta['LENGTH']))
    kwargs = dict(xoff=box[0],
                  yoff=box[1],
                  win_xsize=box[2] - box[0],
                  win_ysize=box[3] - box[1])

    # read data using gdal
    ds = gdal.Open(infile, gdal.GA_ReadOnly)
    data = np.array(ds.GetRasterBand(1).ReadAsArray(**kwargs),
                    dtype=np.float32)

    # write to HDF5 file
    writefile.write(data, outfile, metadata=meta)
    return outfile
示例#10
0
def mask_file(fname, mask_file, out_file, inps=None):
    """ Mask input fname with mask_file
    Parameters: fname     - str, file to be masked
                mask_file - str, mask file
                out_file  - str, output file name
                inps      - namespace object, from cmd_line_parse()
    Returns:    out_file  - str, output file name
    """
    if not inps:
        inps = cmd_line_parse()

    # read mask_file
    mask = readfile.read(mask_file)[0]
    mask = update_mask_with_inps(mask, inps)

    # masking input file
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = {}
    for dsName in dsNames:
        print('masking {d:<{w}} from {f} ...'.format(d=dsName,
                                                     w=maxDigit,
                                                     f=fname))
        data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
        data = mask_matrix(data, mask, fill_value=inps.fill_value)
        dsDict[dsName] = data

    # default output filename
    if not out_file:
        fbase, fext = os.path.splitext(fname)
        out_file = '{}_msk{}'.format(fbase, fext)

    writefile.write(dsDict, out_file=out_file, ref_file=fname)
    return out_file
示例#11
0
def main(argv):
    try:
        File = argv[0]
        atr = readfile.read_attribute(File)
    except:
        usage()
        sys.exit(1)

    try:
        outFile = argv[1]
    except:
        outFile = 'rangeDistance.h5'

    # Calculate look angle
    range_dis = ut.range_distance(atr, dimension=2)

    # Geo coord
    if 'Y_FIRST' in atr.keys():
        print('Input file is geocoded, only center range distance is calculated: ')
        print(range_dis)
        length = int(atr['LENGTH'])
        width = int(atr['WIDTH'])
        range_dis_mat = np.zeros((length, width), np.float32)
        range_dis_mat[:] = range_dis
        range_dis = range_dis_mat

    print('writing >>> '+outFile)
    atr['FILE_TYPE'] = 'mask'
    atr['UNIT'] = 'm'
    try:
        atr.pop('REF_DATE')
    except:
        pass
    writefile.write(range_dis, out_file=outFile, metadata=atr)
    return outFile
示例#12
0
文件: mask.py 项目: hfattahi/PySAR
def mask_file(fname, mask_file, out_file, inps=None):
    """ Mask input fname with mask_file
    Inputs:
        fname/mask_file - string, 
        inps_dict - dictionary including the following options:
                    subset_x/y - list of 2 ints, subset in x/y direction
                    threshold - float, threshold/minValue to generate mask
    Output:
        out_file - string
    """
    if not inps:
        inps = cmd_line_parse()

    # read mask_file
    mask = readfile.read(mask_file)[0]
    mask = update_mask_with_inps(mask, inps)

    # masking input file
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = {}
    for dsName in dsNames:
        if dsName not in ['coherence']:
            print('masking {d:<{w}} from {f} ...'.format(d=dsName, w=maxDigit, f=fname))
            data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
            data = mask_matrix(data, mask, fill_value=inps.fill_value)
        dsDict[dsName] = data

    # default output filename
    if not out_file:
        fbase, fext = os.path.splitext(fname)
        out_file = '{}_msk{}'.format(fbase, fext)

    writefile.write(dsDict, out_file=out_file, ref_file=fname)
    return out_file
示例#13
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)
    if not inps.outfile:
        inps.outfile = os.path.splitext(inps.file)[0] + '.h5'

    if inps.data_type:
        if inps.data_type in ['float', 'float32', 'np.float32']:
            inps.data_type = np.float32
        elif inps.data_type in ['float64', 'np.float64']:
            inps.data_type = np.float64
        elif inps.data_type in ['int', 'int16', 'np.int16']:
            inps.data_type = np.int16
        elif inps.data_type in ['bool', 'np.bool_']:
            inps.data_type = np.bool_
        elif inps.data_type in ['complex', 'np.complex64']:
            inps.data_type = np.complex64
        elif inps.data_type in ['complex128', 'np.complex128']:
            inps.data_type = np.complex128
        else:
            raise ValueError('un-recognized input data type: {}'.format(
                inps.data_type))

    atr = readfile.read_attribute(inps.file)
    if not inps.dset_names:
        inps.dset_names = readfile.get_dataset_list(inps.file)

    dsDict = {}
    for ds_name in inps.dset_names:
        data = readfile.read(inps.file, datasetName=ds_name)[0]
        if inps.data_type:
            data = np.array(data, inps.data_type)
        dsDict[ds_name] = data
    writefile.write(dsDict, out_file=inps.outfile, metadata=atr)
    return inps.outfile
示例#14
0
def multilook_file(infile, lks_y, lks_x, outfile=None):
    lks_y = int(lks_y)
    lks_x = int(lks_x)

    # input file info
    atr = readfile.read_attribute(infile)
    k = atr['FILE_TYPE']
    print('multilooking {} {} file: {}'.format(atr['PROCESSOR'], k, infile))
    print('number of looks in y / azimuth direction: %d' % lks_y)
    print('number of looks in x / range   direction: %d' % lks_x)

    # output file name
    if not outfile:
        if os.getcwd() == os.path.dirname(os.path.abspath(infile)):
            ext = os.path.splitext(infile)[1]
            outfile = os.path.splitext(infile)[0]+'_'+str(lks_y)+'alks_'+str(lks_x)+'rlks'+ext
        else:
            outfile = os.path.basename(infile)
    #print('writing >>> '+outfile)

    # read source data and multilooking
    dsNames = readfile.get_dataset_list(infile)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('multilooking {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(infile)))
        data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]
        data = multilook_data(data, lks_y, lks_x)
        dsDict[dsName] = data
    atr = multilook_attribute(atr, lks_y, lks_x)
    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=infile)
    return outfile
示例#15
0
def get_nonzero_phase_closure(ifgram_file,
                              out_file=None,
                              thres=0.1,
                              unwDatasetName='unwrapPhase'):
    """Calculate/Read number of non-zero phase closure
    Parameters: ifgram_file : string, path of ifgram stack file
                out_file    : string, path of num non-zero phase closure file
    Returns:    num_nonzero_closure : 2D np.array in size of (length, width)
    """
    if not out_file:
        out_file = 'numNonzeroPhaseClosure_{}.h5'.format(unwDatasetName)
    if os.path.isfile(out_file) and readfile.read_attribute(out_file):
        print('1. read number of nonzero phase closure from file: {}'.format(
            out_file))
        num_nonzero_closure = readfile.read(out_file)[0]
    else:
        obj = ifgramStack(ifgram_file)
        obj.open(print_msg=False)
        length, width = obj.length, obj.width

        ref_phase = obj.get_reference_phase(unwDatasetName=unwDatasetName,
                                            dropIfgram=False)
        C = obj.get_design_matrix4triplet(
            obj.get_date12_list(dropIfgram=False))

        # calculate phase closure line by line to save memory usage
        num_nonzero_closure = np.zeros((length, width), np.float32)
        print(
            '1. calculating phase closure of all pixels from dataset - {} ...'.
            format(unwDatasetName))
        line_step = 10
        num_loop = int(np.ceil(length / line_step))
        prog_bar = ptime.progressBar(maxValue=num_loop)
        for i in range(num_loop):
            # read phase
            i0, i1 = i * line_step, min(length, (i + 1) * line_step)
            box = (0, i0, width, i1)
            pha_data = ifginv.read_unwrap_phase(obj,
                                                box,
                                                ref_phase,
                                                unwDatasetName=unwDatasetName,
                                                dropIfgram=False,
                                                print_msg=False)
            # calculate phase closure
            pha_closure = np.dot(C, pha_data)
            pha_closure = np.abs(pha_closure - ut.wrap(pha_closure))
            # get number of non-zero phase closure
            num_nonzero = np.sum(pha_closure >= thres, axis=0)
            num_nonzero_closure[i0:i1, :] = num_nonzero.reshape(i1 - i0, width)
            prog_bar.update(i + 1,
                            every=1,
                            suffix='{}/{} lines'.format((i + 1) * line_step,
                                                        length))
        prog_bar.close()

        atr = dict(obj.metadata)
        atr['FILE_TYPE'] = 'mask'
        atr['UNIT'] = 1
        writefile.write(num_nonzero_closure, out_file=out_file, metadata=atr)
    return num_nonzero_closure
示例#16
0
def correct_single_ifgram(dis_file, tropo_file, cor_dis_file):
    print('\n------------------------------------------------------------------------------')
    print('correcting relative delay for input interferogram')

    print('read phase from {}'.format(dis_file))
    data, atr = readfile.read(dis_file, datasetName='phase')
    date1, date2 = ptime.yyyymmdd(atr['DATE12'].split('-'))
    ref_y, ref_x = int(atr['REF_Y']), int(atr['REF_X'])

    print('calc tropospheric delay for {}-{} from {}'.format(date1, date2, tropo_file))
    tropo  = readfile.read(tropo_file, datasetName=date2)[0]
    tropo -= readfile.read(tropo_file, datasetName=date1)[0]
    tropo *= -4. * np.pi / float(atr['WAVELENGTH'])

    # apply the correction and re-referencing
    data -= tropo
    data -= data[ref_y, ref_x]

    print('read magnitude from {}'.format(dis_file))
    mag = readfile.read(dis_file, datasetName='magnitude')[0]

    print('write corrected data to {}'.format(cor_dis_file))
    ds_dict = {'magnitude': mag, 'phase': data}
    writefile.write(ds_dict, cor_dis_file, atr)

    return cor_dis_file
示例#17
0
def prepare_temporal_coherence(outfile, infile, metadata, box=None):
    print('-' * 50)
    print('preparing temporal coherence file: {}'.format(outfile))

    # copy metadata to meta
    meta = {key: value for key, value in metadata.items()}
    meta["FILE_TYPE"] = "temporalCoherence"
    meta["UNIT"] = "1"

    # size info
    box = box if box else (0, 0, int(meta['WIDTH']), int(meta['LENGTH']))
    kwargs = dict(xoff=box[0],
                  yoff=box[1],
                  win_xsize=box[2] - box[0],
                  win_ysize=box[3] - box[1])

    # read data using gdal
    ds = gdal.Open(infile, gdal.GA_ReadOnly)
    data = np.array(ds.GetRasterBand(1).ReadAsArray(**kwargs),
                    dtype=np.float32)

    print('set all data less than 0 to 0.')
    data[data < 0] = 0

    # write to HDF5 file
    writefile.write(data, outfile, metadata=meta)
    return outfile
示例#18
0
def subset_data_based_bbox(inps, dataset):
    """return the row_no,sample_no and rows and samples"""
    # metadata
    atr = readfile.read_attribute("".join(inps.file))
    ul_lat = float(atr['Y_FIRST'])
    ul_lon = float(atr['X_FIRST'])
    lat_step = float(atr["Y_STEP"])
    lon_step = float(atr["X_STEP"])
    # bbox
    user_lat0 = float(inps.SNWE[1])
    user_lon0 = float(inps.SNWE[2])
    user_lat1 = float(inps.SNWE[0])
    user_lon1 = float(inps.SNWE[3])
    if user_lat0 < user_lat1:
        parser.print_usage()
        raise Exception('input bounding box error! Wrong latitude order!')
    elif user_lon0 > user_lon1:
        parser.print_usage()
        raise Exception('input bounding box error! Wrong longitude order!')

    row = int((user_lat0 - ul_lat) / lat_step + 0.5)
    sample = int((user_lon0 - ul_lon) / lon_step + 0.5)
    rows = int((user_lat1 - user_lat0) / lat_step + 0.5) + 1
    samples = int((user_lon1 - user_lon0) / lon_step + 0.5) + 1

    # subset data
    data, atr = readfile.read(dataset)
    atr['LENGTH'] = str(rows)
    atr['WIDTH'] = str(samples)
    writefile.write(data, out_file=dataset, metadata=atr)

    return
示例#19
0
def timeseries2ifgram(ts_file, ifgram_file, out_file='reconUnwrapIfgram.h5'):
    # read time-series
    atr = readfile.read_attribute(ts_file)
    range2phase = -4.*np.pi / float(atr['WAVELENGTH'])
    print('reading timeseries data from file {} ...'.format(ts_file))
    ts_data = readfile.read(ts_file)[0] * range2phase
    num_date, length, width = ts_data.shape
    ts_data = ts_data.reshape(num_date, -1)

    # reconstruct unwrapPhase
    print('reconstructing the interferograms from timeseries')
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open(print_msg=False)
    A1 = stack_obj.get_design_matrix4timeseries(stack_obj.get_date12_list(dropIfgram=False))[0]
    num_ifgram = A1.shape[0]
    A0 = -1.*np.ones((num_ifgram, 1))
    A = np.hstack((A0, A1))
    ifgram_est = np.dot(A, ts_data).reshape(num_ifgram, length, width)
    ifgram_est = np.array(ifgram_est, dtype=ts_data.dtype)
    del ts_data

    # write to ifgram file
    dsDict = {}
    dsDict['unwrapPhase'] = ifgram_est
    writefile.write(dsDict, out_file=out_file, ref_file=ifgram_file)
    return ifgram_file
示例#20
0
def prepare_geometry(outfile, geom_dir, box, metadata):
    print('-'*50)
    print('preparing geometry file: {}'.format(outfile))

    # copy metadata to meta
    meta = {key : value for key, value in metadata.items()}
    meta["FILE_TYPE"] = "temporalCoherence"

    fDict = {
        'height'         : os.path.join(geom_dir, 'hgt.rdr.full'),
        'latitude'       : os.path.join(geom_dir, 'lat.rdr.full'),
        'longitude'      : os.path.join(geom_dir, 'lon.rdr.full'),
        'incidenceAngle' : os.path.join(geom_dir, 'los.rdr.full'),
        'azimuthAngle'   : os.path.join(geom_dir, 'los.rdr.full'),
        'shadowMask'     : os.path.join(geom_dir, 'shadowMask.rdr.full'),
    }

    # initiate dsDict
    dsDict = {}
    for dsName, fname in fDict.items():
        dsDict[dsName] = readfile.read(fname, datasetName=dsName, box=box)[0]

    dsDict['slantRangeDistance'] = ut.range_distance(meta, dimension=2)

    # write data to HDF5 file
    writefile.write(dsDict, outfile, metadata=meta)

    return outfile
示例#21
0
def file_operation(fname, operator, operand, out_file=None):
    """Mathmathic operation of file"""

    # Basic Info
    atr = readfile.read_attribute(fname)
    k = atr['FILE_TYPE']
    print('input is '+k+' file: '+fname)
    print('operation: file %s %f' % (operator, operand))

    # default output filename
    if not out_file:
        if operator in ['+', 'plus',  'add',      'addition']:
            suffix = 'plus'
        elif operator in ['-', 'minus', 'substract', 'substraction']:
            suffix = 'minus'
        elif operator in ['*', 'times', 'multiply', 'multiplication']:
            suffix = 'multiply'
        elif operator in ['/', 'obelus', 'divide',   'division']:
            suffix = 'divide'
        elif operator in ['^', 'pow', 'power']:
            suffix = 'pow'
        out_file = '{}_{}{}{}'.format(os.path.splitext(fname)[0], suffix,
                                      str(operand), os.path.splitext(fname)[1])

    atr = readfile.read_attribute(fname)
    dsNames = readfile.get_dataset_list(fname)
    dsDict = {}
    for dsName in dsNames:
        data = readfile.read(fname, datasetName=dsName)[0]
        data = data_operation(data, operator, operand)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=out_file, metadata=atr, ref_file=fname)
    return out_file
示例#22
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    (dH, dV, atr, dLOS_list,
     atr_list) = asc_desc_files2horz_vert(inps.file[0],
                                          inps.file[1],
                                          dsname=inps.dsname,
                                          azimuth=inps.azimuth)

    print('---------------------')
    if inps.one_outfile:
        write_to_one_file(inps.one_outfile,
                          dH,
                          dV,
                          atr,
                          dLOS_list,
                          atr_list,
                          ref_file=inps.ref_file)

    else:
        print('writing horizontal component to file: ' + inps.outfile[0])
        writefile.write(dH,
                        out_file=inps.outfile[0],
                        metadata=atr,
                        ref_file=inps.ref_file)

        print('writing   vertical component to file: ' + inps.outfile[1])
        writefile.write(dV,
                        out_file=inps.outfile[1],
                        metadata=atr,
                        ref_file=inps.ref_file)

    print('Done.')
    return inps.outfile
示例#23
0
def file_operation(fname, operator, operand, out_file=None):
    """Mathmathic operation of file"""

    # Basic Info
    atr = readfile.read_attribute(fname)
    k = atr['FILE_TYPE']
    print('input is '+k+' file: '+fname)
    print('operation: file %s %f' % (operator, operand))

    # default output filename
    if not out_file:
        if operator in ['+', 'plus',  'add',      'addition']:
            suffix = 'plus'
        elif operator in ['-', 'minus', 'substract', 'substraction']:
            suffix = 'minus'
        elif operator in ['*', 'times', 'multiply', 'multiplication']:
            suffix = 'multiply'
        elif operator in ['/', 'obelus', 'divide',   'division']:
            suffix = 'divide'
        elif operator in ['^', 'pow', 'power']:
            suffix = 'pow'
        out_file = '{}_{}{}{}'.format(os.path.splitext(fname)[0], suffix,
                                      str(operand), os.path.splitext(fname)[1])

    atr = readfile.read_attribute(fname)
    dsNames = readfile.get_dataset_list(fname)
    dsDict = {}
    for dsName in dsNames:
        data = readfile.read(fname, datasetName=dsName)[0]
        data = data_operation(data, operator, operand)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=out_file, metadata=atr, ref_file=fname)
    return out_file
示例#24
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    # read timeseries data
    obj = timeseries(inps.timeseries_file)
    obj.open()
    ts_data = obj.read()
    inps.date_list = list(obj.dateList)

    # read topographic data (DEM)
    dem = read_topographic_data(inps.geom_file, obj.metadata)

    # estimate phase/elevation ratio parameters
    X = estimate_phase_elevation_ratio(dem, ts_data, inps)

    # correct trop delay in timeseries
    trop_data = estimate_tropospheric_delay(dem, X, obj.metadata)
    mask = ts_data == 0.
    ts_data -= trop_data
    ts_data[mask] = 0.

    # write time-series file
    metadata = dict(obj.metadata)
    metadata['mintpy.troposphericDelay.polyOrder'] = str(inps.poly_order)
    if not inps.outfile:
        inps.outfile = '{}_tropHgt.h5'.format(os.path.splitext(inps.timeseries_file)[0])
    writefile.write(ts_data, out_file=inps.outfile, metadata=metadata, ref_file=inps.timeseries_file)
    return inps.outfile
示例#25
0
    def run_load_data(self, step_name):
        """Load InSAR stacks into HDF5 files in ./inputs folder.
        It 1) copy auxiliary files into work directory (for Unvi of Miami only)
           2) load all interferograms stack files into mintpy/inputs directory.
           3) check loading result
           4) add custom metadata (optional, for HDF-EOS5 format only)
        """
        # 1) copy aux files (optional)
        self._copy_aux_file()

        # 2) loading data
        scp_args = '--template {}'.format(self.templateFile)
        if self.customTemplateFile:
            scp_args += ' {}'.format(self.customTemplateFile)
        if self.projectName:
            scp_args += ' --project {}'.format(self.projectName)
        # run
        print("load_data.py", scp_args)
        mintpy.load_data.main(scp_args.split())
        os.chdir(self.workDir)

        # 3) check loading result
        load_complete, stack_file, geom_file = ut.check_loaded_dataset(self.workDir, print_msg=True)[0:3]

        # 3.1) output waterMask.h5
        water_mask_file = 'waterMask.h5'
        if 'waterMask' in readfile.get_dataset_list(geom_file):
            print('generate {} from {} for conveniency'.format(water_mask_file, geom_file))
            if ut.run_or_skip(out_file=water_mask_file, in_file=geom_file) == 'run':
                water_mask, atr = readfile.read(geom_file, datasetName='waterMask')
                atr['FILE_TYPE'] = 'waterMask'
                writefile.write(water_mask, out_file=water_mask_file, metadata=atr)

        # 4) add custom metadata (optional)
        if self.customTemplateFile:
            print('updating {}, {} metadata based on custom template file: {}'.format(
                os.path.basename(stack_file),
                os.path.basename(geom_file),
                os.path.basename(self.customTemplateFile)))
            # use ut.add_attribute() instead of add_attribute.py because of
            # better control of special metadata, such as SUBSET_X/YMIN
            ut.add_attribute(stack_file, self.customTemplate)
            ut.add_attribute(geom_file, self.customTemplate)

        # 5) if not load_complete, plot and raise exception
        if not load_complete:
            # plot result if error occured
            self.plot_result(print_aux=False, plot=plot)

            # go back to original directory
            print('Go back to directory:', self.cwd)
            os.chdir(self.cwd)

            # raise error
            msg = 'step {}: NOT all required dataset found, exit.'.format(step_name)
            raise RuntimeError(msg)
        return
示例#26
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    data, atr, out_file = read_data(inps)

    atr = clean_metadata4roipac(atr)

    writefile.write(data, out_file=out_file, metadata=atr)
    return inps.outfile
示例#27
0
def mask_filter(inps, dataset):
    """mask data"""
    print("mask {} file".format(dataset))
    maskfile = readfile.read(
        "".join(inps.file),
        datasetName='/HDFEOS/GRIDS/timeseries/quality/mask')[0]
    data, atr = readfile.read(dataset)
    data[maskfile == 0] = np.nan
    writefile.write(data, out_file=dataset, metadata=atr)
示例#28
0
def multilook_file(infile, lks_y, lks_x, outfile=None, margin=[0,0,0,0]):
    """ Multilook input file
    Parameters: infile - str, path of input file to be multilooked.
                lks_y  - int, number of looks in y / row direction.
                lks_x  - int, number of looks in x / column direction.
                margin - list of 4 int, number of pixels to be skipped during multilooking.
                         useful for offset product, where the marginal pixels are ignored during
                         cross correlation matching.
                outfile - str, path of output file
    Returns:    outfile - str, path of output file
    """
    lks_y = int(lks_y)
    lks_x = int(lks_x)

    # input file info
    atr = readfile.read_attribute(infile)
    length, width = int(atr['LENGTH']), int(atr['WIDTH'])
    k = atr['FILE_TYPE']
    print('multilooking {} {} file: {}'.format(atr['PROCESSOR'], k, infile))
    print('number of looks in y / azimuth direction: %d' % lks_y)
    print('number of looks in x / range   direction: %d' % lks_x)

    # margin --> box
    if margin is not [0,0,0,0]:    # top, bottom, left, right
        box = (margin[2], margin[0], width - margin[3], length - margin[1])
        print('number of pixels to skip in top/bottom/left/right boundaries: {}'.format(margin))
    else:
        box = (0, 0, width, length)

    # output file name
    if not outfile:
        if os.getcwd() == os.path.dirname(os.path.abspath(infile)):
            ext = os.path.splitext(infile)[1]
            outfile = os.path.splitext(infile)[0]+'_'+str(lks_y)+'alks_'+str(lks_x)+'rlks'+ext
        else:
            outfile = os.path.basename(infile)
    #print('writing >>> '+outfile)

    # read source data and multilooking
    dsNames = readfile.get_dataset_list(infile)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('multilooking {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(infile)))
        data = readfile.read(infile, datasetName=dsName, box=box, print_msg=False)[0]

        # keep timeseries data as 3D matrix when there is only one acquisition
        # because readfile.read() will squeeze it to 2D
        if atr['FILE_TYPE'] == 'timeseries' and len(data.shape) == 2:
            data = np.reshape(data, (1, data.shape[0], data.shape[1]))

        data = multilook_data(data, lks_y, lks_x)
        dsDict[dsName] = data
    atr = multilook_attribute(atr, lks_y, lks_x, box=box)
    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=infile)
    return outfile
示例#29
0
def correct_local_oscilator_drift(fname, rg_dist_file=None, out_file=None):
    print('-'*50)
    print('correct Local Oscilator Drift for Envisat using an empirical model (Marinkovic and Larsen, 2013)')
    print('-'*50)
    atr = readfile.read_attribute(fname)

    # Check Sensor Type
    platform = atr['PLATFORM']
    print('platform: '+platform)
    if not platform.lower() in ['env', 'envisat']:
        print('No need to correct LOD for '+platform)
        return

    # output file name
    if not out_file:
        out_file = '{}_LODcor{}'.format(os.path.splitext(fname)[0], os.path.splitext(fname)[1])

    # Get LOD ramp rate from empirical model
    if not rg_dist_file:
        print('calculate range distance from file metadata')
        rg_dist = get_relative_range_distance(atr)
    else:
        print('read range distance from file: %s' % (rg_dist_file))
        rg_dist = readfile.read(rg_dist_file, datasetName='slantRangeDistance', print_msg=False)[0]
        rg_dist -= rg_dist[int(atr['REF_Y']), int(atr['REF_X'])]
    ramp_rate = np.array(rg_dist * 3.87e-7, np.float32)

    # Correct LOD Ramp for Input fname
    range2phase = -4*np.pi / float(atr['WAVELENGTH'])
    k = atr['FILE_TYPE']
    if k == 'timeseries':
        # read
        obj = timeseries(fname)
        obj.open()
        data = obj.read()

        # correct LOD
        diff_year = np.array(obj.yearList)
        diff_year -= diff_year[obj.refIndex]
        for i in range(data.shape[0]):
            data[i, :, :] -= ramp_rate * diff_year[i]

        # write
        obj_out = timeseries(out_file)
        obj_out.write2hdf5(data, refFile=fname)

    elif k in ['.unw']:
        data, atr = readfile.read(fname)

        dates = ptime.yyyymmdd2years(ptime.yyyymmdd(atr['DATE12'].split('-')))
        dt = dates[1] - dates[0]
        data -= ramp_rate * range2phase * dt

        writefile.write(data, out_file=out_file, metadata=atr)
    else:
        print('No need to correct for LOD for %s file' % (k))
    return out_file
示例#30
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    data, atr, out_file = read_data(inps)

    atr = clean_metadata4roipac(atr)

    writefile.write(data, out_file=out_file, metadata=atr)
    return inps.outfile
示例#31
0
def filter_file(fname, filter_type, filter_par=None, fname_out=None):
    """Filter 2D matrix with selected filter
    Inputs:
        fname       : string, name/path of file to be filtered
        filter_type : string, filter type
        filter_par  : string, optional, parameter for low/high pass filter
                      for low/highpass_avg, it's kernel size in int
                      for low/highpass_gaussain, it's sigma in float
    Output:
        fname_out   : string, optional, output file name/path
    """
    # Info
    filter_type = filter_type.lower()
    atr = readfile.read_attribute(fname)
    k = atr['FILE_TYPE']
    msg = 'filtering {} file: {} using {} filter'.format(k, fname, filter_type)
    if filter_type.endswith('avg'):
        if not filter_par:
            filter_par = 5
        msg += ' with kernel size of {}'.format(filter_par)
    elif filter_type.endswith('gaussian'):
        if not filter_par:
            filter_par = 3.0
        msg += ' with sigma of {:.1f}'.format(filter_par)
    print(msg)

    # output filename
    if not fname_out:
        fname_out = '{}_{}{}'.format(
            os.path.splitext(fname)[0], filter_type,
            os.path.splitext(fname)[1])

    # filtering file
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        msg = 'filtering {d:<{w}} from {f} '.format(d=dsName,
                                                    w=maxDigit,
                                                    f=os.path.basename(fname))
        data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
        if len(data.shape) == 3:
            num_loop = data.shape[0]
            for i in range(num_loop):
                data[i, :, :] = filter_data(data[i, :, :], filter_type,
                                            filter_par)
                sys.stdout.write('\r{} {}/{} ...'.format(msg, i + 1, num_loop))
                sys.stdout.flush()
            print('')
        else:
            data = filter_data(data, filter_type, filter_par)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=fname_out, metadata=atr, ref_file=fname)
    return fname_out
示例#32
0
def stitch_files(fnames,
                 out_file,
                 apply_offset=True,
                 disp_fig=True,
                 no_data_value=None):
    """Stitch all input files into one
    """
    # printout msg
    print('files to be stitched:')
    for fname in fnames:
        print('\t{}'.format(fname))

    # stitching
    print('read data from file: {}'.format(fnames[0]))
    mat, atr = readfile.read(fnames[0])
    if no_data_value is not None:
        print('convert no_data_value from {} to NaN'.format(no_data_value))
        mat[mat == no_data_value] = np.nan

    for i in range(1, len(fnames)):
        fname = fnames[i]
        print('-' * 50)
        print('read data from file: {}'.format(fname))
        mat2, atr2 = readfile.read(fname)
        if no_data_value is not None:
            mat2[mat2 == no_data_value] = np.nan

        print('stitching ...')
        (mat, atr, mat11, mat22,
         mat_diff) = stitch_two_matrices(mat,
                                         atr,
                                         mat2,
                                         atr2,
                                         apply_offset=apply_offset)

        # plot
        if apply_offset:
            print('plot stitching & shifting result ...')
            suffix = '_{}{}'.format(i, i + 1)
            out_fig = '{}_{}.png'.format(
                os.path.splitext(out_file)[0], sufffix)
            plot_stitch(mat11,
                        mat22,
                        mat,
                        mat_diff,
                        out_fig=out_fig,
                        disp_fig=disp_fig)

    # write output ffile
    print('-' * 50)
    writefile.write(mat, out_file=out_file, metadata=atr)

    return out_file
示例#33
0
def nonzero_mask(File, out_file='maskConnComp.h5', datasetName=None):
    """Generate mask file for non-zero value of input multi-group hdf5 file"""
    atr = readfile.read_attribute(File)
    k = atr['FILE_TYPE']
    if k == 'ifgramStack':
        mask = ifgramStack(File).nonzero_mask(datasetName=datasetName)
    else:
        print('Only ifgramStack file is supported for now, input is ' + k)
        return None

    atr['FILE_TYPE'] = 'mask'
    writefile.write(mask, out_file=out_file, metadata=atr)
    return out_file
示例#34
0
文件: utils1.py 项目: hfattahi/PySAR
def nonzero_mask(File, out_file='maskConnComp.h5', datasetName=None):
    """Generate mask file for non-zero value of input multi-group hdf5 file"""
    atr = readfile.read_attribute(File)
    k = atr['FILE_TYPE']
    if k == 'ifgramStack':
        mask = ifgramStack(File).nonzero_mask(datasetName=datasetName)
    else:
        print('Only ifgramStack file is supported for now, input is '+k)
        return None

    atr['FILE_TYPE'] = 'mask'
    writefile.write(mask, out_file=out_file, metadata=atr)
    return out_file
示例#35
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    temp_coh = calculate_temporal_coherence(ifgram_file=inps.ifgram_file,
                                            timeseries_file=inps.timeseries_file,
                                            ifg_num_file=inps.ifg_num_file)

    # write file
    atr = readfile.read_attribute(inps.timeseries_file)
    atr['FILE_TYPE'] = 'temporalCoherence'
    atr['UNIT'] = '1'
    writefile.write(temp_coh, out_file=inps.outfile, metadata=atr)
    return inps.outfile
示例#36
0
def write_model_data(model_data_copy, inps):
    """change simulated model data to mintpy format"""
    model_data_copy[inps.mask == False] = np.nan
    outfile = inps.outdir[0] + inps.modelfile.split('.')[0] + '_mintpy.h5'
    writefile.write(model_data_copy, out_file=outfile, metadata=inps.metadata)
    # wrap
    vmin = -float(inps.metadata['WAVELENGTH']) / 4
    vmax = float(inps.metadata['WAVELENGTH']) / 4
    model_data_copy_wrap = vmin + np.mod(model_data_copy - vmin, vmax - vmin)
    outfile = inps.outdir[0] + inps.modelfile.split('.')[0] + '_mintpy_wrap.h5'
    writefile.write(model_data_copy_wrap,
                    out_file=outfile,
                    metadata=inps.metadata)
示例#37
0
    def run_network_modification(self, step_name, plot=True):
        """Modify network of interferograms before the network inversion."""
        # check the existence of ifgramStack.h5
        stack_file, geom_file = ut.check_loaded_dataset(self.workDir, print_msg=False)[1:3]
        coh_txt = 'coherenceSpatialAvg.txt'
        try:
            net_fig = [i for i in ['network.pdf', 'pic/network.pdf'] if os.path.isfile(i)][0]
        except:
            net_fig = None

        # 1) output waterMask.h5 to simplify the detection/use of waterMask
        water_mask_file = 'waterMask.h5'
        if 'waterMask' in readfile.get_dataset_list(geom_file):
            print('generate {} from {} for conveniency'.format(water_mask_file, geom_file))
            if ut.run_or_skip(out_file=water_mask_file, in_file=geom_file) == 'run':
                water_mask, atr = readfile.read(geom_file, datasetName='waterMask')
                # ignore no-data pixels in geometry files
                ds_name_list = readfile.get_dataset_list(geom_file)
                for ds_name in ['latitude','longitude']:
                    if ds_name in ds_name_list:
                        print('set pixels with 0 in {} to 0 in waterMask'.format(ds_name))
                        ds = readfile.read(geom_file, datasetName=ds_name)[0]
                        water_mask[ds == 0] = 0
                atr['FILE_TYPE'] = 'waterMask'
                writefile.write(water_mask, out_file=water_mask_file, metadata=atr)

        # 2) modify network
        iargs = [stack_file, '-t', self.templateFile]
        print('\nmodify_network.py', ' '.join(iargs))
        mintpy.modify_network.main(iargs)

        # 3) plot network
        iargs = [stack_file, '-t', self.templateFile, '--nodisplay']

        dsNames = readfile.get_dataset_list(stack_file)
        if any('phase' in i.lower() for i in dsNames):
            iargs += ['-d', 'coherence', '-v', '0.2', '1.0']
        elif any('offset' in i.lower() for i in dsNames):
            iargs += ['-d', 'offsetSNR', '-v', '0', '20']

        print('\nplot_network.py', ' '.join(iargs))

        # run
        if self.template['mintpy.plot'] and plot:
            if ut.run_or_skip(out_file=net_fig,
                              in_file=[stack_file, coh_txt, self.templateFile],
                              check_readable=False) == 'run':
                mintpy.plot_network.main(iargs)
        else:
            print('mintpy.plot is turned OFF, skip plotting network.')
        return
示例#38
0
    def run_network_modification(self, step_name, plot=True):
        """Modify network of interferograms before the network inversion."""
        # check the existence of ifgramStack.h5
        stack_file, geom_file = ut.check_loaded_dataset(self.workDir, print_msg=False)[1:3]
        coh_txt = '{}_coherence_spatialAvg.txt'.format(os.path.splitext(os.path.basename(stack_file))[0])
        try:
            net_fig = [i for i in ['Network.pdf', 'pic/Network.pdf'] if os.path.isfile(i)][0]
        except:
            net_fig = None

        # 1) output waterMask.h5 to simplify the detection/use of waterMask
        water_mask_file = 'waterMask.h5'
        if 'waterMask' in readfile.get_dataset_list(geom_file):
            print('generate {} from {} for conveniency'.format(water_mask_file, geom_file))
            if ut.run_or_skip(out_file=water_mask_file, in_file=geom_file) == 'run':
                water_mask, atr = readfile.read(geom_file, datasetName='waterMask')
                # ignore no-data pixels in geometry files
                ds_name_list = readfile.get_dataset_list(geom_file)
                for ds_name in ['latitude','longitude']:
                    if ds_name in ds_name_list:
                        print('set pixels with 0 in {} to 0 in waterMask'.format(ds_name))
                        ds = readfile.read(geom_file, datasetName=ds_name)[0]
                        water_mask[ds == 0] = 0
                atr['FILE_TYPE'] = 'waterMask'
                writefile.write(water_mask, out_file=water_mask_file, metadata=atr)

        # 2) modify network
        scp_args = '{} -t {}'.format(stack_file, self.templateFile)
        print('modify_network.py', scp_args)
        mintpy.modify_network.main(scp_args.split())

        # 3) plot network
        if self.template['mintpy.plot'] and plot:
            scp_args = '{} -t {} --nodisplay'.format(stack_file, self.templateFile)

            dsNames = readfile.get_dataset_list(stack_file)
            if any('phase' in i.lower() for i in dsNames):
                scp_args += ' -d coherence -v 0.2 1.0 '
            elif any('offset' in i.lower() for i in dsNames):
                scp_args += ' -d offsetSNR -v 0 20 '

            print('\nplot_network.py', scp_args)
            if ut.run_or_skip(out_file=net_fig,
                              in_file=[stack_file, coh_txt, self.templateFile],
                              check_readable=False) == 'run':
                mintpy.plot_network.main(scp_args.split())

        # 4) aux files: maskConnComp and avgSpatialCoh
        self.generate_ifgram_aux_file()
        return
示例#39
0
def get_nonzero_phase_closure(ifgram_file, out_file=None, thres=0.1, unwDatasetName='unwrapPhase'):
    """Calculate/Read number of non-zero phase closure
    Parameters: ifgram_file : string, path of ifgram stack file
                out_file    : string, path of num non-zero phase closure file
    Returns:    num_nonzero_closure : 2D np.array in size of (length, width)
    """
    if not out_file:
        out_file = 'numNonzeroPhaseClosure_{}.h5'.format(unwDatasetName)
    if os.path.isfile(out_file) and readfile.read_attribute(out_file):
        print('1. read number of nonzero phase closure from file: {}'.format(out_file))
        num_nonzero_closure = readfile.read(out_file)[0]
    else:
        obj = ifgramStack(ifgram_file)
        obj.open(print_msg=False)
        length, width = obj.length, obj.width

        ref_phase = obj.get_reference_phase(unwDatasetName=unwDatasetName, dropIfgram=False)
        C = obj.get_design_matrix4triplet(obj.get_date12_list(dropIfgram=False))

        # calculate phase closure line by line to save memory usage
        num_nonzero_closure = np.zeros((length, width), np.float32)
        print('1. calculating phase closure of all pixels from dataset - {} ...'.format(unwDatasetName))
        line_step = 10
        num_loop = int(np.ceil(length / line_step))
        prog_bar = ptime.progressBar(maxValue=num_loop)
        for i in range(num_loop):
            # read phase
            i0, i1 = i*line_step, min(length, (i+1)*line_step)
            box = (0, i0, width, i1)
            pha_data = ifginv.read_unwrap_phase(obj,
                                                box,
                                                ref_phase,
                                                unwDatasetName=unwDatasetName,
                                                dropIfgram=False,
                                                print_msg=False)
            # calculate phase closure
            pha_closure = np.dot(C, pha_data)
            pha_closure = np.abs(pha_closure - ut.wrap(pha_closure))
            # get number of non-zero phase closure
            num_nonzero = np.sum(pha_closure >= thres, axis=0)
            num_nonzero_closure[i0:i1, :] = num_nonzero.reshape(i1-i0, width)
            prog_bar.update(i+1, every=1, suffix='{}/{} lines'.format((i+1)*line_step, length))
        prog_bar.close()

        atr = dict(obj.metadata)
        atr['FILE_TYPE'] = 'mask'
        atr['UNIT'] = 1
        writefile.write(num_nonzero_closure, out_file=out_file, metadata=atr)
    return num_nonzero_closure
示例#40
0
def main(argv):
    try:
        dem_file = argv[1]
        dem_error_file = argv[2]
    except:
        usage()
        sys.exit(1)

    print('Correcting the DEM')

    dem, demrsc = readfile.read(dem_file)
    dem_error = readfile.read(dem_error_file)

    dem_out = dem + dem_error
    writefile.write(dem_out, out_file='DEM_w_error.dem', metadata=demrsc)

    date12_file = open('111111-222222_baseline.rsc', 'w')
    date12_file.write('P_BASELINE_TOP_ODR'+'     '+'000')
    date12_file.close()
    return
示例#41
0
def ref_date_file(ts_file, ref_date, outfile=None):
    """Change input file reference date to a different one.
    Parameters: ts_file : str, timeseries file to be changed
                ref_date : str, date in YYYYMMDD format
                outfile  : if str, save to a different file
                           if None, modify the data value in the existing input file
    """
    print('-'*50)
    print('change reference date for file: {}'.format(ts_file))
    atr = readfile.read_attribute(ts_file)
    if ref_date == atr['REF_DATE']:
        print('same reference date chosen as existing reference date.')
        if not outfile:
            print('Nothing to be done.')
            return ts_file
        else:
            print('Copy {} to {}'.format(ts_file, outfile))
            shutil.copy2(ts_file, outfile)
            return outfile
    else:
        obj = timeseries(ts_file)
        obj.open(print_msg=False)
        ref_idx = obj.dateList.index(ref_date)
        print('reading data ...')
        ts_data = readfile.read(ts_file)[0]

        ts_data -= np.tile(ts_data[ref_idx, :, :].reshape(1, obj.length, obj.width), (obj.numDate, 1, 1))

        if not outfile:
            print('open {} with r+ mode'.format(ts_file))
            with h5py.File(ts_file, 'r+') as f:
                print("update /timeseries dataset and 'REF_DATE' attribute value")
                f['timeseries'][:] = ts_data
                f.attrs['REF_DATE'] = ref_date
            print('close {}'.format(ts_file))
        else:
            atr['REF_DATE'] = ref_date
            writefile.write(ts_data, outfile, metadata=atr, ref_file=ts_file)
    return outfile
示例#42
0
def main(argv):
    try:
        timeseries_file = argv[0]
    except:
        usage()
        sys.exit(1)

    try:
        out_file = argv[1]
    except:
        out_file = 'sum_'+timeseries_file

    # Read Timeseries
    obj = timeseries(timeseries_file)
    obj.open()
    D = obj.read().reshape(obj.numDate, -1)

    # Calculate Sum
    sumD = np.zeros(D.shape)
    for i in range(obj.numDate):
        sumD[i, :] = np.sum(np.abs(D - D[i, :]), axis=0) / obj.numDate
        sys.stdout.write('\rcalculating epochs sum {}/{} ...'.format(i+1, obj.numDate))
        sys.stdout.flush()
    print('')
    del D

    # Normalize to 0 and 1
    # with high atmosphere equal to 0 and no atmosphere equal to 1
    sumD -= np.max(sumD, 0)
    sumD *= -1
    sumD /= np.max(sumD, 0)
    sumD[np.isnan(sumD)] = 1

    # Write sum epochs file
    sumD = np.reshape(sumD, (obj.numDate, obj.length, obj.width))
    atr = dict(obj.metadata)
    atr['UNIT'] = '1'
    writefile.write(sumD, out_file=out_file, metadata=atr, ref_file=timeseries_file)
    print('Done.')
示例#43
0
def run_unwrap_error_bridge(ifgram_file, water_mask_file, ramp_type=None, radius=50, 
                            ccName='connectComponent', dsNameIn='unwrapPhase',
                            dsNameOut='unwrapPhase_bridging'):
    """Run unwrapping error correction with bridging
    Parameters: ifgram_file     : str, path of ifgram stack file
                water_mask_file : str, path of water mask file
                ramp_type       : str, name of phase ramp to be removed during the phase jump estimation
                ccName          : str, dataset name of connected components
                dsNameIn        : str, dataset name of unwrap phase to be corrected
                dsNameOut       : str, dataset name of unwrap phase to be saved after correction
    Returns:    ifgram_file     : str, path of ifgram stack file
    """
    print('-'*50)
    print('correct unwrapping error in {} with bridging ...'.format(ifgram_file))
    if ramp_type is not None:
        print('estimate and remove a {} ramp while calculating phase offset'.format(ramp_type))

    # read water mask
    if water_mask_file and os.path.isfile(water_mask_file):
        print('read water mask from file:', water_mask_file)
        water_mask = readfile.read(water_mask_file)[0]
    else:
        water_mask = None

    # file info
    atr = readfile.read_attribute(ifgram_file)
    length, width = int(atr['LENGTH']), int(atr['WIDTH'])
    k = atr['FILE_TYPE']

    # correct unwrap error ifgram by ifgram
    if k == 'ifgramStack':
        date12_list = ifgramStack(ifgram_file).get_date12_list(dropIfgram=False)
        num_ifgram = len(date12_list)
        shape_out = (num_ifgram, length, width)

        # prepare output data writing
        print('open {} with r+ mode'.format(ifgram_file))
        f = h5py.File(ifgram_file, 'r+')
        print('input  dataset:', dsNameIn)
        print('output dataset:', dsNameOut)
        if dsNameOut in f.keys():
            ds = f[dsNameOut]
            print('access /{d} of np.float32 in size of {s}'.format(d=dsNameOut, s=shape_out))
        else:
            ds = f.create_dataset(dsNameOut,
                                  shape_out,
                                  maxshape=(None, None, None),
                                  chunks=True,
                                  compression=None)
            print('create /{d} of np.float32 in size of {s}'.format(d=dsNameOut, s=shape_out))

        # correct unwrap error ifgram by ifgram
        prog_bar = ptime.progressBar(maxValue=num_ifgram)
        for i in range(num_ifgram):
            # read unwrapPhase and connectComponent
            date12 = date12_list[i]
            unw = np.squeeze(f[dsNameIn][i, :, :])
            cc = np.squeeze(f[ccName][i, :, :])
            if water_mask is not None:
                cc[water_mask == 0] = 0

            # bridging
            cc_obj = connectComponent(conncomp=cc, metadata=atr)
            cc_obj.label()
            cc_obj.find_mst_bridge()
            unw_cor = cc_obj.unwrap_conn_comp(unw, ramp_type=ramp_type)

            # write to hdf5 file
            ds[i, :, :] = unw_cor
            prog_bar.update(i+1, suffix=date12)
        prog_bar.close()
        ds.attrs['MODIFICATION_TIME'] = str(time.time())
        f.close()
        print('close {} file.'.format(ifgram_file))

    if k == '.unw':
        # read unwrap phase
        unw = readfile.read(ifgram_file)[0]

        # read connected components
        cc_files0 = [ifgram_file+'.conncomp', os.path.splitext(ifgram_file)[0]+'_snap_connect.byt']
        cc_files = [i for i in cc_files0 if os.path.isfile(i)]
        if len(cc_files) == 0:
            raise FileNotFoundError(cc_files0)
        cc = readfile.read(cc_files[0])[0]
        if water_mask is not None:
            cc[water_mask == 0] = 0

        # bridging
        cc_obj = connectComponent(conncomp=cc, metadata=atr)
        cc_obj.label()
        cc_obj.find_mst_bridge()
        unw_cor = cc_obj.unwrap_conn_comp(unw, ramp_type=ramp_type)

        # write to hdf5 file
        out_file = '{}_unwCor{}'.format(os.path.splitext(ifgram_file)[0],
                                        os.path.splitext(ifgram_file)[1])
        print('writing >>> {}'.format(out_file))
        writefile.write(unw_cor, out_file=out_file, ref_file=ifgram_file)

    return ifgram_file
示例#44
0
def check_inputs(inps):
    parser = create_parser()

    # output directories/files
    atr = dict()
    mintpy_dir = None
    if inps.timeseries_file:
        atr = readfile.read_attribute(inps.timeseries_file)
        mintpy_dir = os.path.dirname(inps.timeseries_file)
        if not inps.outfile:
            fbase = os.path.splitext(inps.timeseries_file)[0]
            inps.outfile = '{}_{}.h5'.format(fbase, inps.trop_model)
    elif inps.geom_file:
        atr = readfile.read_attribute(inps.geom_file)
        mintpy_dir = os.path.join(os.path.dirname(inps.geom_file), '..')
    else:
        mintpy_dir = os.path.abspath(os.getcwd())

    # trop_file
    inps.trop_file = os.path.join(mintpy_dir, 'inputs/{}.h5'.format(inps.trop_model))
    print('output tropospheric delay file: {}'.format(inps.trop_file))

    # hour
    if not inps.hour:
        if 'CENTER_LINE_UTC' in atr.keys():
            inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.trop_model)
        else:
            parser.print_usage()
            raise Exception('no input for hour')
    print('time of cloest available product: {}:00 UTC'.format(inps.hour))

    # date list
    if inps.timeseries_file:
        print('read date list from timeseries file: {}'.format(inps.timeseries_file))
        ts_obj = timeseries(inps.timeseries_file)
        ts_obj.open(print_msg=False)
        inps.date_list = ts_obj.dateList
    elif len(inps.date_list) == 1:
        if os.path.isfile(inps.date_list[0]):
            print('read date list from text file: {}'.format(inps.date_list[0]))
            inps.date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list[0],
                                                       dtype=bytes,
                                                       usecols=(0,)).astype(str).tolist())
        else:
            parser.print_usage()
            raise Exception('ERROR: input date list < 2')

    # Grib data directory
    inps.grib_dir = os.path.join(inps.weather_dir, inps.trop_model)
    if not os.path.isdir(inps.grib_dir):
        os.makedirs(inps.grib_dir)
        print('making directory: '+inps.grib_dir)

    # Date list to grib file list
    inps.grib_file_list = date_list2grib_file(inps.date_list,
                                              inps.hour,
                                              inps.trop_model,
                                              inps.grib_dir)

    if 'REF_Y' in atr.keys():
        inps.ref_yx = [int(atr['REF_Y']), int(atr['REF_X'])]
        print('reference pixel: {}'.format(inps.ref_yx))

    # Coordinate system: geocoded or not
    inps.geocoded = False
    if 'Y_FIRST' in atr.keys():
        inps.geocoded = True
    print('geocoded: {}'.format(inps.geocoded))

    # Prepare DEM, inc_angle, lat/lon file for PyAPS to read
    if inps.geom_file:
        geom_atr = readfile.read_attribute(inps.geom_file)
        print('converting DEM/incAngle for PyAPS to read')
        # DEM
        data = readfile.read(inps.geom_file, datasetName='height', print_msg=False)[0]
        inps.dem_file = 'pyapsDem.hgt'
        writefile.write(data, inps.dem_file, metadata=geom_atr)

        # inc_angle
        inps.inc_angle = readfile.read(inps.geom_file, datasetName='incidenceAngle', print_msg=False)[0]
        inps.inc_angle_file = 'pyapsIncAngle.flt'
        writefile.write(inps.inc_angle, inps.inc_angle_file, metadata=geom_atr)

        # latitude
        try:
            data = readfile.read(inps.geom_file, datasetName='latitude', print_msg=False)[0]
            print('converting lat for PyAPS to read')
            inps.lat_file = 'pyapsLat.flt'
            writefile.write(data, inps.lat_file, metadata=geom_atr)
        except:
            inps.lat_file = None

        # longitude
        try:
            data = readfile.read(inps.geom_file, datasetName='longitude', print_msg=False)[0]
            print('converting lon for PyAPS to read')
            inps.lon_file = 'pyapsLon.flt'
            writefile.write(data, inps.lon_file, metadata=geom_atr)
        except:
            inps.lon_file = None
    return inps, atr
示例#45
0
文件: diff.py 项目: hfattahi/PySAR
def diff_file(file1, file2, outFile=None, force=False):
    """Subtraction/difference of two input files"""
    if not outFile:
        fbase, fext = os.path.splitext(file1)
        if len(file2) > 1:
            raise ValueError('Output file name is needed for more than 2 files input.')
        outFile = '{}_diff_{}{}'.format(fbase, os.path.splitext(os.path.basename(file2[0]))[0], fext)
    print('{} - {} --> {}'.format(file1, file2, outFile))

    # Read basic info
    atr1 = readfile.read_attribute(file1)
    k1 = atr1['FILE_TYPE']
    atr2 = readfile.read_attribute(file2[0])
    k2 = atr2['FILE_TYPE']
    print('input files are: {} and {}'.format(k1, k2))

    if k1 == 'timeseries':
        if k2 not in ['timeseries', 'giantTimeseries']:
            raise Exception('Input multiple dataset files are not the same file type!')
        if len(file2) > 1:
            raise Exception(('Only 2 files substraction is supported for time-series file,'
                             ' {} input.'.format(len(file2)+1)))

        obj1 = timeseries(file1)
        obj1.open()
        if k2 == 'timeseries':
            obj2 = timeseries(file2[0])
            unit_fac = 1.
        elif k2 == 'giantTimeseries':
            obj2 = giantTimeseries(file2[0])
            unit_fac = 0.001
        obj2.open()
        ref_date, ref_y, ref_x = _check_reference(obj1.metadata, obj2.metadata)

        # check dates shared by two timeseries files
        dateListShared = [i for i in obj1.dateList if i in obj2.dateList]
        dateShared = np.ones((obj1.numDate), dtype=np.bool_)
        if dateListShared != obj1.dateList:
            print('WARNING: {} does not contain all dates in {}'.format(file2, file1))
            if force:
                dateExcluded = list(set(obj1.dateList) - set(dateListShared))
                print('Continue and enforce the differencing for their shared dates only.')
                print('\twith following dates are ignored for differencing:\n{}'.format(dateExcluded))
                dateShared[np.array([obj1.dateList.index(i) for i in dateExcluded])] = 0
            else:
                raise Exception('To enforce the differencing anyway, use --force option.')

        # consider different reference_date/pixel
        data2 = readfile.read(file2[0], datasetName=dateListShared)[0] * unit_fac
        if ref_date:
            data2 -= np.tile(data2[obj2.dateList.index(ref_date), :, :],
                             (data2.shape[0], 1, 1))
        if ref_y and ref_x:
            data2 -= np.tile(data2[:, ref_y, ref_x].reshape(-1, 1, 1),
                             (1, data2.shape[1], data2.shape[2]))

        data = obj1.read()
        mask = data == 0.
        data[dateShared] -= data2
        data[mask] = 0.               # Do not change zero phase value
        del data2
        writefile.write(data, out_file=outFile, ref_file=file1)

    elif all(i == 'ifgramStack' for i in [k1, k2]):
        obj1 = ifgramStack(file1)
        obj1.open()
        obj2 = ifgramStack(file2[0])
        obj2.open()
        dsNames = list(set(obj1.datasetNames) & set(obj2.datasetNames))
        if len(dsNames) == 0:
            raise ValueError('no common dataset between two files!')
        dsName = [i for i in ifgramDatasetNames if i in dsNames][0]

        # read data
        print('reading {} from file {} ...'.format(dsName, file1))
        data1 = readfile.read(file1, datasetName=dsName)[0]
        print('reading {} from file {} ...'.format(dsName, file2[0]))
        data2 = readfile.read(file2[0], datasetName=dsName)[0]

        # consider reference pixel
        if 'unwrapphase' in dsName.lower():
            print('referencing to pixel ({},{}) ...'.format(obj1.refY, obj1.refX))
            ref1 = data1[:, obj1.refY, obj1.refX]
            ref2 = data2[:, obj2.refY, obj2.refX]
            for i in range(data1.shape[0]):
                data1[i,:][data1[i, :] != 0.] -= ref1[i]
                data2[i,:][data2[i, :] != 0.] -= ref2[i]

        # operation and ignore zero values
        data1[data1 == 0] = np.nan
        data2[data2 == 0] = np.nan
        data = data1 - data2
        del data1, data2
        data[np.isnan(data)] = 0.

        # write to file
        dsDict = {}
        dsDict[dsName] = data
        writefile.write(dsDict, out_file=outFile, ref_file=file1)

    # Sing dataset file
    else:
        data1 = readfile.read(file1)[0]
        data = np.array(data1, data1.dtype)
        for fname in file2:
            data2 = readfile.read(fname)[0]
            data = np.array(data, dtype=np.float32) - np.array(data2, dtype=np.float32)
            data = np.array(data, data1.dtype)
        print('writing >>> '+outFile)
        writefile.write(data, out_file=outFile, metadata=atr1)

    return outFile
示例#46
0
文件: utils1.py 项目: hfattahi/PySAR
def temporal_average(File, datasetName='coherence', updateMode=False, outFile=None):
    """Calculate temporal average of multi-temporal dataset, equivalent to stacking
    For ifgramStakc/unwrapPhase, return average phase velocity

    Parameters: File : string, file to be averaged in time
                datasetName : string, dataset to be read from input file, for multiple
                    datasets file - ifgramStack - only
                    e.g.: coherence, unwrapPhase
                updateMode : bool
                outFile : string, output filename
                    None for auto output filename
                    False for do not save as output file
    Returns:    dataMean : 2D array
                outFile : string, output file name
    Examples:   avgPhaseVel = ut.temporal_average('ifgramStack.h5', datasetName='unwrapPhase')[0]
                ut.temporal_average('ifgramStack.h5', datasetName='coherence',
                                    outFile='avgSpatialCoh.h5', updateMode=True)
    """
    atr = readfile.read_attribute(File, datasetName=datasetName)
    k = atr['FILE_TYPE']
    if k not in ['ifgramStack', 'timeseries']:
        print('WARNING: input file is not multi-temporal file: {}, return itself.'.format(File))
        data = readfile.read(File)[0]
        return data, File

    # Default output filename
    if outFile is None:
        ext = os.path.splitext(File)[1]
        if not outFile:
            if k == 'ifgramStack':
                if datasetName == 'coherence':
                    outFile = 'avgSpatialCoh.h5'
                elif 'unwrapPhase' in datasetName:
                    outFile = 'avgPhaseVelocity.h5'
                else:
                    outFile = 'avg{}.h5'.format(datasetName)
            elif k == 'timeseries':
                if k in File:
                    processMark = os.path.basename(File).split('timeseries')[1].split(ext)[0]
                    outFile = 'avgDisplacement{}.h5'.format(processMark)
            else:
                outFile = 'avg{}.h5'.format(File)

    if updateMode and os.path.isfile(outFile):
        dataMean = readfile.read(outFile)[0]
        return dataMean, outFile

    # Calculate temporal average
    if k == 'ifgramStack':
        dataMean = ifgramStack(File).temporal_average(datasetName=datasetName)
        if 'unwrapPhase' in datasetName:
            atr['FILE_TYPE'] = 'velocity'
            atr['UNIT'] = 'm/year'
        else:
            atr['FILE_TYPE'] = datasetName
    elif k == 'timeseries':
        dataMean = timeseries(File).temporal_average()
        atr['FILE_TYPE'] = 'displacement'

    if outFile:
        writefile.write(dataMean, out_file=outFile, metadata=atr)
    return dataMean, outFile
示例#47
0
文件: utils1.py 项目: hfattahi/PySAR
def run_deramp(fname, ramp_type, mask_file=None, out_file=None, datasetName=None):
    """ Remove ramp from each 2D matrix of input file
    Parameters: fname     : str, data file to be derampped
                ramp_type : str, name of ramp to be estimated.
                mask_file : str, file of mask of pixels used for ramp estimation
                out_file  : str, output file name
                datasetName : str, output dataset name, for ifgramStack file type only
    Returns:    out_file  : str, output file name
    """
    print('remove {} ramp from file: {}'.format(ramp_type, fname))
    if not out_file:
        fbase, fext = os.path.splitext(fname)
        out_file = '{}_ramp{}'.format(fbase, fext)

    start_time = time.time()
    atr = readfile.read_attribute(fname)

    # mask
    if os.path.isfile(mask_file):
        mask = readfile.read(mask_file, datasetName='mask')[0]
        print('read mask file: '+mask_file)
    else:
        mask = np.ones((int(atr['LENGTH']), int(atr['WIDTH'])))
        print('use mask of the whole area')

    # deramping
    k = atr['FILE_TYPE']
    if k == 'timeseries':
        print('reading data ...')
        data = readfile.read(fname)[0]
        print('estimating phase ramp ...')
        data = deramp(data, mask, ramp_type=ramp_type, metadata=atr)[0]
        writefile.write(data, out_file, ref_file=fname)

    elif k == 'ifgramStack':
        obj = ifgramStack(fname)
        obj.open(print_msg=False)
        if not datasetName:
            datasetName = 'unwrapPhase'
        with h5py.File(fname, 'a') as f:
            ds = f[datasetName]
            dsNameOut = '{}_ramp'.format(datasetName)
            if dsNameOut in f.keys():
                dsOut = f[dsNameOut]
                print('access HDF5 dataset /{}'.format(dsNameOut))
            else:
                dsOut = f.create_dataset(dsNameOut, shape=(obj.numIfgram, obj.length, obj.width),
                                         dtype=np.float32, chunks=True, compression=None)
                print('create HDF5 dataset /{}'.format(dsNameOut))

            prog_bar = ptime.progressBar(maxValue=obj.numIfgram)
            for i in range(obj.numIfgram):
                data = ds[i, :, :]
                data = deramp(data, mask, ramp_type=ramp_type, metadata=atr)[0]
                dsOut[i, :, :] = data
                prog_bar.update(i+1, suffix='{}/{}'.format(i+1, obj.numIfgram))
            prog_bar.close()
            print('finished writing to file: {}'.format(fname))

    # Single Dataset File
    else:
        data = readfile.read(fname)[0]
        data = deramp(data, mask, ramp_type, metadata=atr)[0]
        print('writing >>> {}'.format(out_file))
        writefile.write(data, out_file=out_file, ref_file=fname)

    m, s = divmod(time.time()-start_time, 60)
    print('time used: {:02.0f} mins {:02.1f} secs.'.format(m, s))
    return out_file
示例#48
0
文件: subset.py 项目: hfattahi/PySAR
def subset_file(fname, subset_dict_input, out_file=None):
    """Subset file with
    Inputs:
        fname        : str, path/name of file
        out_file     : str, path/name of output file
        subset_dict : dict, subsut parameter, including the following items:
                      subset_x   : list of 2 int,   subset in x direction,   default=None
                      subset_y   : list of 2 int,   subset in y direction,   default=None
                      subset_lat : list of 2 float, subset in lat direction, default=None
                      subset_lon : list of 2 float, subset in lon direction, default=None
                      fill_value : float, optional. filled value for area outside of data coverage. default=None
                                   None/not-existed to subset within data coverage only.
                      tight  : bool, tight subset or not, for lookup table file, i.e. geomap*.trans
    Outputs:
        out_file :  str, path/name of output file; 
                   out_file = 'subset_'+fname, if fname is in current directory;
                   out_file = fname, if fname is not in the current directory.
    """

    # Input File Info
    try:
        atr = readfile.read_attribute(fname)
    except:
        return None

    width = int(atr['WIDTH'])
    length = int(atr['LENGTH'])
    k = atr['FILE_TYPE']
    print('subset '+k+' file: '+fname+' ...')

    subset_dict = subset_dict_input.copy()
    # Read Subset Inputs into 4-tuple box in pixel and geo coord
    pix_box, geo_box = subset_input_dict2box(subset_dict, atr)

    coord = ut.coordinate(atr)
    # if fill_value exists and not None, subset data and fill assigned value for area out of its coverage.
    # otherwise, re-check subset to make sure it's within data coverage and initialize the matrix with np.nan
    outfill = False
    if 'fill_value' in subset_dict.keys() and subset_dict['fill_value']:
        outfill = True
    else:
        outfill = False
    if not outfill:
        pix_box = coord.check_box_within_data_coverage(pix_box)
        subset_dict['fill_value'] = np.nan

    geo_box = coord.box_pixel2geo(pix_box)
    data_box = (0, 0, width, length)
    print('data   range in y/x: '+str(data_box))
    print('subset range in y/x: '+str(pix_box))
    print('data   range in lat/lon: '+str(coord.box_pixel2geo(data_box)))
    print('subset range in lat/lon: '+str(geo_box))

    if pix_box == data_box:
        print('Subset range == data coverage, no need to subset. Skip.')
        return fname

    # Calculate Subset/Overlap Index
    pix_box4data, pix_box4subset = get_box_overlap_index(data_box, pix_box)

    ###########################  Data Read and Write  ######################
    # Output File Name
    if not out_file:
        if os.getcwd() == os.path.dirname(os.path.abspath(fname)):
            if 'tight' in subset_dict.keys() and subset_dict['tight']:
                out_file = '{}_tight{}'.format(os.path.splitext(fname)[0],
                                               os.path.splitext(fname)[1])
            else:
                out_file = 'subset_'+os.path.basename(fname)
        else:
            out_file = os.path.basename(fname)
    print('writing >>> '+out_file)

    # subset datasets one by one
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('subsetting {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(fname)))
        data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]

        # subset 2D data
        if len(data.shape) == 2:
            data_overlap = data[pix_box4data[1]:pix_box4data[3],
                                pix_box4data[0]:pix_box4data[2]]
            data = np.ones((pix_box[3] - pix_box[1],
                            pix_box[2] - pix_box[0]), data.dtype) * subset_dict['fill_value']
            data[pix_box4subset[1]:pix_box4subset[3],
                 pix_box4subset[0]:pix_box4subset[2]] = data_overlap

        # subset 3D data
        elif len(data.shape) == 3:
            data_overlap = data[:,
                                pix_box4data[1]:pix_box4data[3],
                                pix_box4data[0]:pix_box4data[2]]
            data = np.ones((data.shape[0],
                            pix_box[3] - pix_box[1],
                            pix_box[2] - pix_box[0]), data.dtype) * subset_dict['fill_value']
            data[:,
                 pix_box4subset[1]:pix_box4subset[3],
                 pix_box4subset[0]:pix_box4subset[2]] = data_overlap

        dsDict[dsName] = data

    atr = ut.subset_attribute(atr, pix_box)
    writefile.write(dsDict, out_file=out_file, metadata=atr, ref_file=fname)
    return out_file
示例#49
0
def detect_unwrap_error(ifgram_file, mask_file, mask_cc_file='maskConnComp.h5', unwDatasetName='unwrapPhase',
                        cutoff=1., min_num_pixel=1e4):
    """Detect unwrapping error based on phase closure and extract coherent conn comps
    based on its histogram distribution

    Check:
    https://en.wikipedia.org/wiki/Otsu%27s_method
    from skimage.filters import threshold_otsu
    
    Parameters: ifgram_file : string, path of ifgram stack file
                mask_file   : string, path of mask file, e.g. waterMask.h5, maskConnComp.h5
                mask_cc_file: string, path of mask file for coherent conn comps
                cutoff : float, cutoff value for the mean number of nonzero phase closure
                    to be selected as coherent conn comps candidate
                min_num_pixel : float, min number of pixels left after morphology operation
                    to be determined as coherent conn comps
    Returns:    mask_cc_file : string, path of mask file for coherent conn comps
    """
    print('-'*50)
    print('detect unwraping error based on phase closure')
    obj = ifgramStack(ifgram_file)
    obj.open(print_msg=False)
    C = obj.get_design_matrix4triplet(obj.get_date12_list(dropIfgram=False))

    num_nonzero_closure = get_nonzero_phase_closure(ifgram_file, unwDatasetName=unwDatasetName)

    # get histogram of num_nonzero_phase_closure
    mask = readfile.read(mask_file)[0]
    mask *= num_nonzero_closure != 0.

    fig, ax = plt.subplots(nrows=1, ncols=2, figsize=[12, 4])
    num4disp = np.array(num_nonzero_closure, dtype=np.float32)
    num4disp[mask == 0] = np.nan
    im = ax[0].imshow(num4disp)
    ax[0].set_xlabel('Range [pix.]')
    ax[0].set_ylabel('Azimuth [pix.]')
    ax[0] = pp.auto_flip_direction(obj.metadata, ax=ax[0], print_msg=False)
    cbar = fig.colorbar(im, ax=ax[0])
    cbar.set_label('number of non-zero phase closure')

    print('2. extract coherent conn comps with unwrap error based on histogram distribution')
    max_nonzero_closure = int(np.max(num_nonzero_closure[mask]))
    bin_value, bin_edge = ax[1].hist(num_nonzero_closure[mask].flatten(),
                                     range=(0, max_nonzero_closure),
                                     log=True,
                                     bins=max_nonzero_closure)[0:2]
    ax[1].set_xlabel('number of non-zero phase closure')
    ax[1].set_ylabel('number of pixels')

    if 'Closure' not in unwDatasetName:
        print('eliminate pixels with number of nonzero phase closure < 5% of total phase closure number')
        print('\twhich can be corrected using phase closure alone.')
        bin_value[:int(C.shape[0]*0.05)] = 0.
    bin_value_thres = ut.median_abs_deviation_threshold(bin_value, cutoff=cutoff)
    print('median abs deviation cutoff value: {}'.format(cutoff))

    plt.plot([0, max_nonzero_closure], [bin_value_thres, bin_value_thres])
    out_img = 'numUnwErr_stat.png'
    fig.savefig(out_img, bbox_inches='tight', transparent=True, dpi=300)
    print('save unwrap error detection result to {}'.format(out_img))

    # histogram --> candidates of coherence conn comps --> mask_cc
    # find pixel clusters sharing similar number of non-zero phase closure
    print('searching connected components with more than {} pixels'.format(min_num_pixel))
    bin_label, n_bins = ndimage.label(bin_value > bin_value_thres)

    mask_cc = np.zeros(num_nonzero_closure.shape, dtype=np.int16)
    # first conn comp - reference conn comp with zero non-zero phase closure
    num_cc = 1
    mask_cc1 = num_nonzero_closure == 0.
    mask_cc1s = ut.get_all_conn_components(mask_cc1, min_num_pixel=min_num_pixel)
    for mask_cc1 in mask_cc1s:
        mask_cc += mask_cc1

    # other conn comps - target conn comps to be corrected for unwrap error
    for i in range(n_bins):
        idx = np.where(bin_label == i+1)[0]
        mask_cci0 = np.multiply(num_nonzero_closure >= bin_edge[idx[0]],
                                num_nonzero_closure <  bin_edge[idx[-1]+1])
        mask_ccis = ut.get_all_conn_components(mask_cci0, min_num_pixel=min_num_pixel)
        if mask_ccis:
            for mask_cci in mask_ccis:
                num_cc += 1
                mask_cc += mask_cci * num_cc
    
                fig, ax = plt.subplots(nrows=1, ncols=2, figsize=[8, 4])
                im = ax[0].imshow(mask_cci0)
                im = ax[1].imshow(mask_cci)
                fig.savefig('mask_cc{}.png'.format(num_cc),
                            bbox_inches='tight', transparent=True, dpi=300)

    # save to hdf5 file
    num_bridge = num_cc - 1
    atr = dict(obj.metadata)
    atr['FILE_TYPE'] = 'mask'
    atr['UNIT'] = 1
    writefile.write(mask_cc, out_file=mask_cc_file, metadata=atr)

    # plot and save figure to img file
    out_img = '{}.png'.format(os.path.splitext(mask_cc_file)[0])
    fig, ax = plt.subplots(figsize=[6, 8])
    im = ax.imshow(mask_cc)
    ax = pp.auto_flip_direction(atr, ax=ax, print_msg=False)
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", "3%", pad="3%")
    cbar = plt.colorbar(im, cax=cax, ticks=np.arange(num_bridge+2))
    fig.savefig(out_img, bbox_inches='tight', transparent=True, dpi=300)
    print('save to {}'.format(out_img))

    return mask_cc_file
示例#50
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    # 1. Extract the common area of two input files
    # Basic info
    atr1 = readfile.read_attribute(inps.file[0])
    atr2 = readfile.read_attribute(inps.file[1])
    if any('X_FIRST' not in i for i in [atr1, atr2]):
        raise Exception('ERROR: Not all input files are geocoded.')

    k1 = atr1['FILE_TYPE']
    print('Input 1st file is '+k1)

    # Common AOI in lalo
    west, east, south, north = get_overlap_lalo(atr1, atr2)
    lon_step = float(atr1['X_STEP'])
    lat_step = float(atr1['Y_STEP'])
    width = int(round((east - west) / lon_step))
    length = int(round((south - north) / lat_step))

    # Read data in common AOI: LOS displacement, heading angle, incident angle
    u_los = np.zeros((2, width*length))
    heading = []
    incidence = []
    for i in range(len(inps.file)):
        fname = inps.file[i]
        print('---------------------')
        print('reading '+fname)
        atr = readfile.read_attribute(fname)

        coord = ut.coordinate(atr)
        [x0, x1] = coord.lalo2yx([west, east], coord_type='lon')
        [y0, y1] = coord.lalo2yx([north, south], coord_type='lat')
        V = readfile.read(fname, box=(x0, y0, x1, y1))[0]
        u_los[i, :] = V.flatten(0)

        heading_angle = float(atr['HEADING'])
        if heading_angle < 0.:
            heading_angle += 360.
        print('heading angle: '+str(heading_angle))
        heading_angle *= np.pi/180.
        heading.append(heading_angle)

        inc_angle = float(ut.incidence_angle(atr, dimension=0))
        inc_angle *= np.pi/180.
        incidence.append(inc_angle)

    # 2. Project displacement from LOS to Horizontal and Vertical components
    # math for 3D: cos(theta)*Uz - cos(alpha)*sin(theta)*Ux + sin(alpha)*sin(theta)*Uy = Ulos
    # math for 2D: cos(theta)*Uv - sin(alpha-az)*sin(theta)*Uh = Ulos   #Uh_perp = 0.0
    # This could be easily modified to support multiple view geometry
    # (e.g. two adjcent tracks from asc & desc) to resolve 3D

    # Design matrix
    A = np.zeros((2, 2))
    for i in range(len(inps.file)):
        A[i, 0] = np.cos(incidence[i])
        A[i, 1] = np.sin(incidence[i]) * np.sin(heading[i]-inps.azimuth)

    A_inv = np.linalg.pinv(A)
    u_vh = np.dot(A_inv, u_los)

    u_v = np.reshape(u_vh[0, :], (length, width))
    u_h = np.reshape(u_vh[1, :], (length, width))

    # 3. Output
    # Attributes
    atr = atr1.copy()
    atr['WIDTH'] = str(width)
    atr['LENGTH'] = str(length)
    atr['X_FIRST'] = str(west)
    atr['Y_FIRST'] = str(north)
    atr['X_STEP'] = str(lon_step)
    atr['Y_STEP'] = str(lat_step)

    print('---------------------')
    outname = inps.outfile[0]
    print('writing   vertical component to file: '+outname)
    writefile.write(u_v, out_file=outname, metadata=atr)

    outname = inps.outfile[1]
    print('writing horizontal component to file: '+outname)
    writefile.write(u_h, out_file=outname, metadata=atr)

    print('Done.')
    return
示例#51
0
文件: geocode.py 项目: hfattahi/PySAR
def run_geocode(inps):
    """geocode all input files"""
    start_time = time.time()

    # Prepare geometry for geocoding
    res_obj = resample(lookupFile=inps.lookupFile,
                       dataFile=inps.file[0],
                       SNWE=inps.SNWE,
                       laloStep=inps.laloStep,
                       processor=inps.processor)
    res_obj.open()

    # resample input files one by one
    for infile in inps.file:
        print('-' * 50+'\nresampling file: {}'.format(infile))
        ext = os.path.splitext(infile)[1]
        atr = readfile.read_attribute(infile, datasetName=inps.dset)
        outfile = auto_output_filename(infile, inps)
        if inps.updateMode and ut.run_or_skip(outfile, in_file=[infile, inps.lookupFile]) == 'skip':
            print('update mode is ON, skip geocoding.')
            continue

        # read source data and resample
        dsNames = readfile.get_dataset_list(infile, datasetName=inps.dset)
        maxDigit = max([len(i) for i in dsNames])
        dsResDict = dict()
        for dsName in dsNames:
            print('reading {d:<{w}} from {f} ...'.format(d=dsName,
                                                         w=maxDigit,
                                                         f=os.path.basename(infile)))
            if ext in ['.h5','.he5']:
                data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]
            else:
                data, atr = readfile.read(infile, datasetName=dsName, print_msg=False)

            # keep timeseries data as 3D matrix when there is only one acquisition
            # because readfile.read() will squeeze it to 2D
            if atr['FILE_TYPE'] == 'timeseries' and len(data.shape) == 2:
                data = np.reshape(data, (1, data.shape[0], data.shape[1]))

            res_data = res_obj.run_resample(src_data=data,
                                            interp_method=inps.interpMethod,
                                            fill_value=inps.fillValue,
                                            nprocs=inps.nprocs,
                                            print_msg=True)
            dsResDict[dsName] = res_data

        # update metadata
        if inps.radar2geo:
            atr = metadata_radar2geo(atr, res_obj)
        else:
            atr = metadata_geo2radar(atr, res_obj)
        #if len(dsNames) == 1 and dsName not in ['timeseries']:
        #    atr['FILE_TYPE'] = dsNames[0]
        #    infile = None

        writefile.write(dsResDict, out_file=outfile, metadata=atr, ref_file=infile)

    m, s = divmod(time.time()-start_time, 60)
    print('time used: {:02.0f} mins {:02.1f} secs.\n'.format(m, s))
    return outfile
示例#52
0
def reference_file(inps):
    """Seed input file with option from input namespace
    Return output file name if succeed; otherwise, return None
    """
    if not inps:
        inps = cmd_line_parse([''])
    atr = readfile.read_attribute(inps.file)
    if (inps.ref_y and inps.ref_x and 'REF_Y' in atr.keys()
            and inps.ref_y == int(atr['REF_Y']) and inps.ref_x == int(atr['REF_X'])
            and not inps.force):
        print('Same reference pixel is already selected/saved in file, skip updating.')
        return inps.file

    # Get stack and mask
    stack = ut.temporal_average(inps.file, datasetName='unwrapPhase', updateMode=True, outFile=False)[0]
    mask = np.multiply(~np.isnan(stack), stack != 0.)
    if np.nansum(mask) == 0.0:
        raise ValueError('no pixel found with valid phase value in all datasets.')

    if inps.ref_y and inps.ref_x and mask[inps.ref_y, inps.ref_x] == 0.:
        raise ValueError('reference y/x have nan value in some dataset. Please re-select.')

    # Find reference y/x
    if not inps.ref_y or not inps.ref_x:
        if inps.method == 'maxCoherence':
            inps.ref_y, inps.ref_x = select_max_coherence_yx(coh_file=inps.coherenceFile,
                                                             mask=mask,
                                                             min_coh=inps.minCoherence)
        elif inps.method == 'random':
            inps.ref_y, inps.ref_x = random_select_reference_yx(mask)
        elif inps.method == 'manual':
            inps = manual_select_reference_yx(stack, inps, mask)
    if not inps.ref_y or not inps.ref_x:
        raise ValueError('ERROR: no reference y/x found.')

    # Seeding file with reference y/x
    atrNew = reference_point_attribute(atr, y=inps.ref_y, x=inps.ref_x)
    if not inps.write_data:
        print('Add/update ref_x/y attribute to file: '+inps.file)
        print(atrNew)
        inps.outfile = ut.add_attribute(inps.file, atrNew)

    else:
        if not inps.outfile:
            inps.outfile = '{}_seeded{}'.format(os.path.splitext(inps.file)[0],
                                                os.path.splitext(inps.file)[1])
        k = atr['FILE_TYPE']

        # For ifgramStack file, update data value directly, do not write to new file
        if k == 'ifgramStack':
            f = h5py.File(inps.file, 'r+')
            ds = f[k].get('unwrapPhase')
            for i in range(ds.shape[0]):
                ds[i, :, :] -= ds[i, inps.ref_y, inps.ref_x]
            f[k].attrs.update(atrNew)
            f.close()
            inps.outfile = inps.file

        elif k == 'timeseries':
            data = timeseries(inps.file).read()
            for i in range(data.shape[0]):
                data[i, :, :] -= data[i, inps.ref_y, inps.ref_x]
            obj = timeseries(inps.outfile)
            atr.update(atrNew)
            obj.write2hdf5(data=data, metadata=atr, refFile=inps.file)
            obj.close()
        else:
            print('writing >>> '+inps.outfile)
            data = readfile.read(inps.file)[0]
            data -= data[inps.ref_y, inps.ref_x]
            atr.update(atrNew)
            writefile.write(data, out_file=inps.outfile, metadata=atr)
    ut.touch([inps.coherenceFile, inps.maskFile])
    return inps.outfile