コード例 #1
0
ファイル: incidence_angle.py プロジェクト: whigg/PySAR
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    # Calculate look angle
    atr = readfile.read_attribute(inps.file)
    dem = None
    if inps.dem_file:
        dem = readfile.read(inps.dem_file, datasetName='height')[0]
    angle = ut.incidence_angle(atr, dem=dem, dimension=2)

    # Geo coord
    if 'Y_FIRST' in atr.keys():
        print(
            'Input file is geocoded, only center incident angle is calculated: '
        )
        print(angle)
        length = int(atr['LENGTH'])
        width = int(atr['WIDTH'])
        angle_mat = np.zeros((length, width), np.float32)
        angle_mat[:] = angle
        angle = angle_mat

    atr['FILE_TYPE'] = 'mask'
    atr['UNIT'] = 'degree'
    if 'REF_DATE' in atr.keys():
        atr.pop('REF_DATE')

    if not inps.outfile:
        inps.outfile = 'incidenceAngle.h5'
    writefile.write(angle, out_file=inps.outfile, metadata=atr)
    return inps.outfile
コード例 #2
0
def timeseries2ifgram(ts_file, ifgram_file, out_file='reconUnwrapIfgram.h5'):
    # read time-series
    atr = readfile.read_attribute(ts_file)
    range2phase = -4. * np.pi / float(atr['WAVELENGTH'])
    print('reading timeseries data from file {} ...'.format(ts_file))
    ts_data = readfile.read(ts_file)[0] * range2phase
    num_date, length, width = ts_data.shape
    ts_data = ts_data.reshape(num_date, -1)

    # reconstruct unwrapPhase
    print('reconstructing the interferograms from timeseries')
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open(print_msg=False)
    A1 = stack_obj.get_design_matrix4timeseries_estimation(dropIfgram=False)[0]
    num_ifgram = A1.shape[0]
    A0 = -1. * np.ones((num_ifgram, 1))
    A = np.hstack((A0, A1))
    ifgram_est = np.dot(A, ts_data).reshape(num_ifgram, length, width)
    ifgram_est = np.array(ifgram_est, dtype=ts_data.dtype)
    del ts_data

    # write to ifgram file
    dsDict = {}
    dsDict['unwrapPhase'] = ifgram_est
    writefile.write(dsDict, out_file=out_file, ref_file=ifgram_file)
    return ifgram_file
コード例 #3
0
def get_nonzero_phase_closure(ifgram_file,
                              out_file=None,
                              thres=0.1,
                              unwDatasetName='unwrapPhase'):
    """Calculate/Read number of non-zero phase closure
    Parameters: ifgram_file : string, path of ifgram stack file
                out_file    : string, path of num non-zero phase closure file
    Returns:    num_nonzero_closure : 2D np.array in size of (length, width)
    """
    if not out_file:
        out_file = 'numNonzeroPhaseClosure_{}.h5'.format(unwDatasetName)
    if os.path.isfile(out_file) and readfile.read_attribute(out_file):
        print('1. read number of nonzero phase closure from file: {}'.format(
            out_file))
        num_nonzero_closure = readfile.read(out_file)[0]
    else:
        obj = ifgramStack(ifgram_file)
        obj.open(print_msg=False)
        length, width = obj.length, obj.width

        ref_phase = obj.get_reference_phase(unwDatasetName=unwDatasetName,
                                            dropIfgram=False)
        C = obj.get_design_matrix4triplet(
            obj.get_date12_list(dropIfgram=False))

        # calculate phase closure line by line to save memory usage
        num_nonzero_closure = np.zeros((length, width), np.float32)
        print(
            '1. calculating phase closure of all pixels from dataset - {} ...'.
            format(unwDatasetName))
        line_step = 10
        num_loop = int(np.ceil(length / line_step))
        prog_bar = ptime.progressBar(maxValue=num_loop)
        for i in range(num_loop):
            # read phase
            i0, i1 = i * line_step, min(length, (i + 1) * line_step)
            box = (0, i0, width, i1)
            pha_data = ifginv.read_unwrap_phase(obj,
                                                box,
                                                ref_phase,
                                                unwDatasetName=unwDatasetName,
                                                dropIfgram=False,
                                                print_msg=False)
            # calculate phase closure
            pha_closure = np.dot(C, pha_data)
            pha_closure = np.abs(pha_closure - ut.wrap(pha_closure))
            # get number of non-zero phase closure
            num_nonzero = np.sum(pha_closure >= thres, axis=0)
            num_nonzero_closure[i0:i1, :] = num_nonzero.reshape(i1 - i0, width)
            prog_bar.update(i + 1,
                            every=1,
                            suffix='{}/{} lines'.format((i + 1) * line_step,
                                                        length))
        prog_bar.close()

        atr = dict(obj.metadata)
        atr['FILE_TYPE'] = 'mask'
        atr['UNIT'] = 1
        writefile.write(num_nonzero_closure, out_file=out_file, metadata=atr)
    return num_nonzero_closure
コード例 #4
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)
    if not inps.outfile:
        inps.outfile = os.path.splitext(inps.file)[0]+'.h5'

    if inps.data_type:
        if inps.data_type in ['float', 'float32', 'np.float32']:
            inps.data_type = np.float32
        elif inps.data_type in ['float64', 'np.float64']:
            inps.data_type = np.float64
        elif inps.data_type in ['int', 'int16', 'np.int16']:
            inps.data_type = np.int16
        elif inps.data_type in ['bool', 'np.bool_']:
            inps.data_type = np.bool_
        elif inps.data_type in ['complex', 'np.complex64']:
            inps.data_type = np.complex64
        elif inps.data_type in ['complex128', 'np.complex128']:
            inps.data_type = np.complex128
        else:
            raise ValueError('un-recognized input data type: {}'.format(inps.data_type))

    atr = readfile.read_attribute(inps.file)
    dsNames = readfile.get_dataset_list(inps.file)
    dsDict = {}
    for dsName in dsNames:
        data = readfile.read(inps.file, datasetName=dsName)[0]
        if inps.data_type:
            data = np.array(data, inps.data_type)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=inps.outfile, metadata=atr)
    return inps.outfile
コード例 #5
0
ファイル: multilook.py プロジェクト: whigg/PySAR
def multilook_file(infile, lks_y, lks_x, outfile=None):
    lks_y = int(lks_y)
    lks_x = int(lks_x)

    # input file info
    atr = readfile.read_attribute(infile)
    k = atr['FILE_TYPE']
    print('multilooking {} {} file: {}'.format(atr['PROCESSOR'], k, infile))
    print('number of looks in y / azimuth direction: %d' % lks_y)
    print('number of looks in x / range   direction: %d' % lks_x)

    # output file name
    if not outfile:
        if os.getcwd() == os.path.dirname(os.path.abspath(infile)):
            ext = os.path.splitext(infile)[1]
            outfile = os.path.splitext(infile)[0] + '_' + str(
                lks_y) + 'alks_' + str(lks_x) + 'rlks' + ext
        else:
            outfile = os.path.basename(infile)
    #print('writing >>> '+outfile)

    # read source data and multilooking
    dsNames = readfile.get_dataset_list(infile)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('multilooking {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(infile)))
        data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]
        data = multilook_data(data, lks_y, lks_x)
        dsDict[dsName] = data
    atr = multilook_attribute(atr, lks_y, lks_x)
    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=infile)
    return outfile
コード例 #6
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    # read timeseries data
    obj = timeseries(inps.timeseries_file)
    obj.open()
    ts_data = obj.read()
    inps.date_list = list(obj.dateList)

    # read topographic data (DEM)
    dem = read_topographic_data(inps.geom_file, obj.metadata)

    # estimate phase/elevation ratio parameters
    X = estimate_phase_elevation_ratio(dem, ts_data, inps)

    # correct trop delay in timeseries
    trop_data = estimate_tropospheric_delay(dem, X, obj.metadata)
    mask = ts_data == 0.
    ts_data -= trop_data
    ts_data[mask] = 0.

    # write time-series file
    metadata = dict(obj.metadata)
    metadata['pysar.troposphericDelay.polyOrder'] = str(inps.poly_order)
    if not inps.outfile:
        inps.outfile = '{}_tropHgt.h5'.format(
            os.path.splitext(inps.timeseries_file)[0])
    writefile.write(ts_data,
                    out_file=inps.outfile,
                    metadata=metadata,
                    ref_file=inps.timeseries_file)
    return inps.outfile
コード例 #7
0
def add_file(fnames, out_file=None):
    """Generate sum of all input files
    Parameters: fnames : list of str, path/name of input files to be added
                out_file : str, optional, path/name of output file
    Returns:    out_file : str, path/name of output file
    Example:    'mask_all.h5' = add_file(['mask_1.h5','mask_2.h5','mask_3.h5'], 'mask_all.h5')
    """
    # Default output file name
    ext = os.path.splitext(fnames[0])[1]
    if not out_file:
        out_file = os.path.splitext(fnames[0])[0]
        for i in range(1, len(fnames)):
            out_file += '_plus_' + os.path.splitext(os.path.basename(
                fnames[i]))[0]
        out_file += ext

    atr = readfile.read_attribute(fnames[0])
    dsNames = readfile.get_dataset_list(fnames[0])
    dsDict = {}
    for dsName in dsNames:
        print('adding {} ...'.format(dsName))
        data = readfile.read(fnames[0], datasetName=dsName)[0]
        for i in range(1, len(fnames)):
            d = readfile.read(fnames[i], datasetName=dsName)[0]
            data = add_matrix(data, d)
        dsDict[dsName] = data
    writefile.write(dsDict,
                    out_file=out_file,
                    metadata=atr,
                    ref_file=fnames[0])
    return out_file
コード例 #8
0
ファイル: image_math.py プロジェクト: fossabot/PySAR
def file_operation(fname, operator, operand, out_file=None):
    """Mathmathic operation of file"""

    # Basic Info
    atr = readfile.read_attribute(fname)
    k = atr['FILE_TYPE']
    print('input is ' + k + ' file: ' + fname)
    print('operation: file %s %f' % (operator, operand))

    # default output filename
    if not out_file:
        if operator in ['+', 'plus', 'add', 'addition']:
            suffix = 'plus'
        elif operator in ['-', 'minus', 'substract', 'substraction']:
            suffix = 'minus'
        elif operator in ['*', 'times', 'multiply', 'multiplication']:
            suffix = 'multiply'
        elif operator in ['/', 'obelus', 'divide', 'division']:
            suffix = 'divide'
        elif operator in ['^', 'pow', 'power']:
            suffix = 'pow'
        out_file = '{}_{}{}{}'.format(
            os.path.splitext(fname)[0], suffix, str(operand),
            os.path.splitext(fname)[1])

    atr = readfile.read_attribute(fname)
    dsNames = readfile.get_dataset_list(fname)
    dsDict = {}
    for dsName in dsNames:
        data = readfile.read(fname, datasetName=dsName)[0]
        data = data_operation(data, operator, operand)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=out_file, metadata=atr, ref_file=fname)
    return out_file
コード例 #9
0
ファイル: mask.py プロジェクト: fossabot/PySAR
def mask_file(fname, mask_file, out_file=None, inps=None):
    """ Mask input fname with mask_file
    Inputs:
        fname/mask_file - string, 
        inps_dict - dictionary including the following options:
                    subset_x/y - list of 2 ints, subset in x/y direction
                    threshold - float, threshold/minValue to generate mask
    Output:
        out_file - string
    """
    if not inps:
        inps = cmd_line_parse()

    if not out_file:
        out_file = '{}_masked{}'.format(
            os.path.splitext(fname)[0],
            os.path.splitext(fname)[1])

    # read mask_file
    mask = readfile.read(mask_file)[0]
    mask = update_mask_with_inps(mask, inps)

    # masking input file
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = {}
    for dsName in dsNames:
        if dsName not in ['coherence']:
            print('masking {d:<{w}} from {f} ...'.format(d=dsName,
                                                         w=maxDigit,
                                                         f=fname))
            data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
            data = mask_matrix(data, mask, fill_value=inps.fill_value)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=out_file, ref_file=fname)
コード例 #10
0
def run_unwrap_error_closure(inps, dsNameIn='unwrapPhase', dsNameOut='unwrapPhase_phaseClosure', fast_mode=False):
    """Run unwrapping error correction in network of interferograms using phase closure.
    Parameters: inps : Namespace of input arguments including the following:
                    ifgram_file : string, path of ifgram stack file
                    maskFile    : string, path of mask file mark the pixels to be corrected
                dsNameIn  : string, dataset name to read in
                dsnameOut : string, dataset name to write out
                fast_mode : bool, enable fast processing mode or not
    Returns:    inps.ifgram_file : string, path of corrected ifgram stack file
    """
    print('-'*50)
    print('Unwrapping Error Coorrection based on Phase Closure Consistency (Fattahi, 2015) ...')
    if fast_mode:
        print('fast mode: ON, the following asuumption is ignored for fast processing')
        print('\tzero phase jump constraint on ifgrams without unwrap error')
    else:
        print(('this step can be very slow'
               'you could terminate this and try --fast option for quick correction process.'))
    print('-'*50)

    stack_obj = ifgramStack(inps.ifgram_file)
    stack_obj.open()
    ref_phase = stack_obj.get_reference_phase(unwDatasetName=dsNameIn, dropIfgram=False)

    num_nonzero_closure = np.zeros((stack_obj.length, stack_obj.width), dtype=np.int16)
    # split ifgram_file into blocks to save memory
    num_tri = stack_obj.get_design_matrix4ifgram_triangle(dropIfgram=True).shape[0]
    length, width = stack_obj.get_size()[1:3]
    box_list = ifginv.split_into_boxes(dataset_shape=(num_tri, length, width), chunk_size=200e6)
    num_box = len(box_list)
    for i in range(num_box):
        box = box_list[i]
        if num_box > 1:
            print('\n------- Processing Patch {} out of {} --------------'.format(i+1, num_box))

        # estimate/correct ifgram
        unw_cor, num_closure = run_unwrap_error_patch(inps.ifgram_file,
                                                      box=box,
                                                      mask_file=inps.maskFile,
                                                      ref_phase=ref_phase,
                                                      fast_mode=fast_mode,
                                                      dsNameIn=dsNameIn)
        num_nonzero_closure[box[1]:box[3], box[0]:box[2]] = num_closure

        # write ifgram
        write_hdf5_file_patch(inps.ifgram_file,
                              data=unw_cor,
                              box=box,
                              dsName=dsNameOut)

    # write number of nonzero phase closure into file
    num_file = 'numNonzeroPhaseClosure.h5'
    atr = dict(stack_obj.metadata)
    atr['FILE_TYPE'] = 'mask'
    atr['UNIT'] = '1'
    writefile.write(num_nonzero_closure, out_file=num_file, metadata=atr)
    print('writing >>> {}'.format(num_file))

    return inps.ifgram_file
コード例 #11
0
ファイル: geocode.py プロジェクト: wchch1010/PySAR
def run_geocode(inps):
    """geocode all input files"""
    start_time = time.time()

    # Prepare geometry for geocoding
    res_obj = resample(lookupFile=inps.lookupFile,
                       dataFile=inps.file[0],
                       SNWE=inps.SNWE,
                       laloStep=inps.laloStep,
                       processor=inps.processor)
    res_obj.open()

    if not inps.nprocs:
        inps.nprocs = multiprocessing.cpu_count()

    # resample input files one by one
    for infile in inps.file:
        print('-' * 50+'\nresampling file: {}'.format(infile))
        atr = readfile.read_attribute(infile, datasetName=inps.dset)
        outfile = auto_output_filename(infile, inps)
        if inps.updateMode and ut.run_or_skip(outfile, in_file=[infile, inps.lookupFile]) == 'skip':
            print('update mode is ON, skip geocoding.')
            continue

        # read source data and resample
        dsNames = readfile.get_dataset_list(infile, datasetName=inps.dset)
        maxDigit = max([len(i) for i in dsNames])
        dsResDict = dict()
        for dsName in dsNames:
            print('reading {d:<{w}} from {f} ...'.format(d=dsName,
                                                         w=maxDigit,
                                                         f=os.path.basename(infile)))
            data = readfile.read(infile,
                                 datasetName=dsName,
                                 print_msg=False)[0]

            if atr['FILE_TYPE'] == 'timeseries' and len(data.shape) == 2:
                data = np.reshape(data, (1, data.shape[0], data.shape[1]))
            res_data = res_obj.run_resample(src_data=data,
                                            interp_method=inps.interpMethod,
                                            fill_value=inps.fillValue,
                                            nprocs=inps.nprocs,
                                            print_msg=True)
            dsResDict[dsName] = res_data

        # update metadata
        if inps.radar2geo:
            atr = metadata_radar2geo(atr, res_obj)
        else:
            atr = metadata_geo2radar(atr, res_obj)
        #if len(dsNames) == 1 and dsName not in ['timeseries']:
        #    atr['FILE_TYPE'] = dsNames[0]
        #    infile = None

        writefile.write(dsResDict, out_file=outfile, metadata=atr, ref_file=infile)

    m, s = divmod(time.time()-start_time, 60)
    print('time used: {:02.0f} mins {:02.1f} secs.\n'.format(m, s))
    return outfile
コード例 #12
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    data, atr, out_file = read_data(inps)

    print('writing >>> {}'.format(out_file))
    writefile.write(data, out_file=out_file, metadata=atr)
    return inps.outfile
コード例 #13
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    data, atr, out_file = read_data(inps)

    atr = clean_metadata4roipac(atr)

    writefile.write(data, out_file=out_file, metadata=atr)
    return inps.outfile
コード例 #14
0
def correct_local_oscilator_drift(fname, rg_dist_file=None, out_file=None):
    print('-'*50)
    print('correct Local Oscilator Drift for Envisat using an empirical model (Marinkovic and Larsen, 2013)')
    print('-'*50)
    atr = readfile.read_attribute(fname)

    # Check Sensor Type
    platform = atr['PLATFORM']
    print('platform: '+platform)
    if not platform.lower() in ['env', 'envisat']:
        print('No need to correct LOD for '+platform)
        return

    # output file name
    if not out_file:
        out_file = '{}_LODcor{}'.format(os.path.splitext(fname)[0], os.path.splitext(fname)[1])

    # Get LOD ramp rate from empirical model
    if not rg_dist_file:
        print('calculate range distance from file metadata')
        rg_dist = get_relative_range_distance(atr)
    else:
        print('read range distance from file: %s' % (rg_dist_file))
        rg_dist = readfile.read(rg_dist_file, datasetName='slantRangeDistance', print_msg=False)[0]
        rg_dist -= rg_dist[int(atr['REF_Y']), int(atr['REF_X'])]
    ramp_rate = np.array(rg_dist * 3.87e-7, np.float32)

    # Correct LOD Ramp for Input fname
    range2phase = -4*np.pi / float(atr['WAVELENGTH'])
    k = atr['FILE_TYPE']
    if k == 'timeseries':
        # read
        obj = timeseries(fname)
        obj.open()
        data = obj.read()

        # correct LOD
        diff_year = np.array(obj.yearList)
        diff_year -= diff_year[obj.refIndex]
        for i in range(data.shape[0]):
            data[i, :, :] -= ramp_rate * diff_year[i]

        # write
        obj_out = timeseries(out_file)
        obj_out.write2hdf5(data, refFile=fname)

    elif k in ['.unw']:
        data, atr = readfile.read(fname)

        dates = ptime.yyyymmdd2years(ptime.yyyymmdd(atr['DATE12'].split('-')))
        dt = dates[1] - dates[0]
        data -= ramp_rate * range2phase * dt

        writefile.write(data, out_file=out_file, metadata=atr)
    else:
        print('No need to correct for LOD for %s file' % (k))
    return out_file
コード例 #15
0
ファイル: pysarApp.py プロジェクト: wchch1010/PySAR
    def run_load_data(self, step_name):
        """Load InSAR stacks into HDF5 files in ./INPUTS folder.
        It 1) copy auxiliary files into work directory (for Unvi of Miami only)
           2) load all interferograms stack files into PYSAR/INPUTS directory.
           3) check loading result
           4) add custom metadata (optional, for HDF-EOS5 format only)
        """
        # 1) copy aux files (optional)
        self._copy_aux_file()

        # 2) loading data
        scp_args = '--template {}'.format(self.templateFile)
        if self.customTemplateFile:
            scp_args += ' {}'.format(self.customTemplateFile)
        if self.projectName:
            scp_args += ' --project {}'.format(self.projectName)
        # run
        print("load_data.py", scp_args)
        pysar.load_data.main(scp_args.split())
        os.chdir(self.workDir)

        # 3) check loading result
        load_complete, stack_file, geom_file = ut.check_loaded_dataset(self.workDir, print_msg=True)[0:3]

        # 3.1) output waterMask.h5
        water_mask_file = 'waterMask.h5'
        if 'waterMask' in readfile.get_dataset_list(geom_file):
            print('generate {} from {} for conveniency'.format(water_mask_file, geom_file))
            if ut.run_or_skip(out_file=water_mask_file, in_file=geom_file) == 'run':
                water_mask, atr = readfile.read(geom_file, datasetName='waterMask')
                atr['FILE_TYPE'] = 'waterMask'
                writefile.write(water_mask, out_file=water_mask_file, metadata=atr)

        # 4) add custom metadata (optional)
        if self.customTemplateFile:
            print('updating {}, {} metadata based on custom template file: {}'.format(
                os.path.basename(stack_file),
                os.path.basename(geom_file),
                os.path.basename(self.customTemplateFile)))
            # use ut.add_attribute() instead of add_attribute.py because of
            # better control of special metadata, such as SUBSET_X/YMIN
            ut.add_attribute(stack_file, self.customTemplate)
            ut.add_attribute(geom_file, self.customTemplate)

        # 5) if not load_complete, plot and raise exception
        if not load_complete:
            # plot result if error occured
            self.plot_result(print_aux=False, plot=plot)

            # go back to original directory
            print('Go back to directory:', self.cwd)
            os.chdir(self.cwd)

            # raise error
            msg = 'step {}: NOT all required dataset found, exit.'.format(step_name)
            raise RuntimeError(msg)
        return
コード例 #16
0
def filter_file(fname, filter_type, filter_par=None, fname_out=None):
    """Filter 2D matrix with selected filter
    Inputs:
        fname       : string, name/path of file to be filtered
        filter_type : string, filter type
        filter_par  : string, optional, parameter for low/high pass filter
                      for low/highpass_avg, it's kernel size in int
                      for low/highpass_gaussain, it's sigma in float
    Output:
        fname_out   : string, optional, output file name/path
    """
    # Info
    filter_type = filter_type.lower()
    atr = readfile.read_attribute(fname)
    k = atr['FILE_TYPE']
    msg = 'filtering {} file: {} using {} filter'.format(k, fname, filter_type)
    if filter_type.endswith('avg'):
        if not filter_par:
            filter_par = 5
        msg += ' with kernel size of {}'.format(filter_par)
    elif filter_type.endswith('gaussian'):
        if not filter_par:
            filter_par = 3.0
        msg += ' with sigma of {:.1f}'.format(filter_par)
    print(msg)

    # output filename
    if not fname_out:
        fname_out = '{}_{}{}'.format(
            os.path.splitext(fname)[0], filter_type,
            os.path.splitext(fname)[1])

    # filtering file
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        msg = 'filtering {d:<{w}} from {f} '.format(d=dsName,
                                                    w=maxDigit,
                                                    f=os.path.basename(fname))
        data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
        if len(data.shape) == 3:
            num_loop = data.shape[0]
            for i in range(num_loop):
                data[i, :, :] = filter_data(data[i, :, :], filter_type,
                                            filter_par)
                sys.stdout.write('\r{} {}/{} ...'.format(msg, i + 1, num_loop))
                sys.stdout.flush()
            print('')
        else:
            data = filter_data(data, filter_type, filter_par)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=fname_out, metadata=atr, ref_file=fname)
    return fname_out
コード例 #17
0
def water_mask2conn_comp_mask(water_mask_file,
                              ref_yx,
                              out_file='maskConnComp.h5',
                              min_num_pixel=5e4,
                              display=False):
    """Generate connected component mask file from water mask file
    Parameters: water_mask_file : str, path of water mask file
                ref_yx          : tuple of 2 int, row/col number of reference point
                out_file        : str, filename of output connected components mask
                min_num_pixel   : float, min number of pixels to be identified as conn comp
                display         : bool, display generated conn comp mask
    Returns:    out_file        : str, filename of output connected components mask
    """
    print('-' * 50)
    print('generate connected component mask from water mask file: ',
          water_mask_file)
    water_mask, atr = readfile.read(water_mask_file)

    mask_cc = np.zeros(water_mask.shape, dtype=np.int16)
    # first conn comp - reference conn comp
    num_cc = 1
    label_mask = ndimage.label(water_mask)[0]
    mask_cc += label_mask == label_mask[ref_yx[0], ref_yx[1]]

    # all the other conn comps
    water_mask ^= mask_cc == mask_cc[ref_yx[0], ref_yx[1]]
    mask_ccs = ut.get_all_conn_components(water_mask,
                                          min_num_pixel=min_num_pixel)
    if mask_ccs:
        for mask_cci in mask_ccs:
            num_cc += 1
            mask_cc += mask_cci * num_cc

    # write file
    atr['FILE_TYPE'] = 'mask'
    atr['REF_Y'] = str(ref_yx[0])
    atr['REF_X'] = str(ref_yx[1])
    writefile.write(mask_cc, out_file=out_file, metadata=atr)

    # plot
    out_img = '{}.png'.format(os.path.splitext(out_file)[0])
    fig, ax = plt.subplots(figsize=[6, 8])
    im = ax.imshow(mask_cc)
    # colorbar
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", "3%", pad="3%")
    cbar = plt.colorbar(im, cax=cax, ticks=np.arange(num_cc + 1))
    ax = pp.auto_flip_direction(atr, ax=ax, print_msg=False)
    fig.savefig(out_img, bbox_inches='tight', transparent=True, dpi=300)
    print('save figure to {}'.format(out_img))
    if display:
        plt.show()
    return out_file
コード例 #18
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)
    if not inps.outfile:
        inps.outfile = os.path.splitext(inps.file)[0] + '.h5'

    atr = readfile.read_attribute(inps.file)
    dsNames = readfile.get_dataset_list(inps.file)
    dsDict = {}
    for dsName in dsNames:
        data = readfile.read(inps.file, datasetName=dsName)[0]
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=inps.outfile, metadata=atr)
    return inps.outfile
コード例 #19
0
ファイル: utils1.py プロジェクト: wchch1010/PySAR
def nonzero_mask(File, out_file='maskConnComp.h5', datasetName=None):
    """Generate mask file for non-zero value of input multi-group hdf5 file"""
    atr = readfile.read_attribute(File)
    k = atr['FILE_TYPE']
    if k == 'ifgramStack':
        mask = ifgramStack(File).nonzero_mask(datasetName=datasetName)
    else:
        print('Only ifgramStack file is supported for now, input is '+k)
        return None

    atr['FILE_TYPE'] = 'mask'
    writefile.write(mask, out_file=out_file, metadata=atr)
    return out_file
コード例 #20
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    temp_coh = calculate_temporal_coherence(ifgram_file=inps.ifgram_file,
                                            timeseries_file=inps.timeseries_file,
                                            ifg_num_file=inps.ifg_num_file)

    # write file
    atr = readfile.read_attribute(inps.timeseries_file)
    atr['FILE_TYPE'] = 'temporalCoherence'
    atr['UNIT'] = '1'
    writefile.write(temp_coh, out_file=inps.outfile, metadata=atr)
    return inps.outfile
コード例 #21
0
ファイル: generate_mask.py プロジェクト: fossabot/PySAR
def create_threshold_mask(inps):
    if inps.dset:
        print('read %s %s' % (inps.file, inps.dset))
    else:
        print('read %s' % (inps.file))
    data, atr = readfile.read(inps.file, datasetName=inps.dset)
    if len(data.shape) > 2:
        print('ERROR: Only 2D dataset is supported for threshold method, input is 3D')
        sys.exit(1)
    length = int(atr['LENGTH'])
    width = int(atr['WIDTH'])

    print('create initial mask with the same size as the input file and all = 1')
    mask = np.ones((length, width), dtype=np.float32)

    if inps.nonzero:
        print('all pixels with zero value = 0')
        mask[data == 0] = 0

    # min threshold
    if inps.vmin is not None:
        mask[data < inps.vmin] = 0
        print('all pixels with value < %s = 0' % str(inps.vmin))

    # max threshold
    if inps.vmax is not None:
        mask[data > inps.vmax] = 0
        print('all pixels with value > %s = 0' % str(inps.vmax))

    # nan value
    mask[np.isnan(data)] = 0
    print('all pixels with nan value = 0')

    # subset in Y
    if inps.subset_y is not None:
        y0, y1 = sorted(inps.subset_y)
        mask[0:y0, :] = 0
        mask[y1:length, :] = 0
        print('all pixels with y OUT of [%d, %d] = 0' % (y0, y1))

    # subset in x
    if inps.subset_x is not None:
        x0, x1 = sorted(inps.subset_x)
        mask[:, 0:x0] = 0
        mask[:, x1:width] = 0
        print('all pixels with x OUT of [%d, %d] = 0' % (x0, x1))

    # Write mask file
    atr['FILE_TYPE'] = 'mask'
    writefile.write(mask, out_file=inps.outfile, metadata=atr)
    return inps.outfile
コード例 #22
0
ファイル: geocode.py プロジェクト: fossabot/PySAR
def run_resample(inps):
    """resample all input files"""
    start_time = time.time()

    # Prepare geometry for geocoding
    res_obj = resample(lookupFile=inps.lookupFile, dataFile=inps.file[0],
                       SNWE=inps.SNWE, laloStep=inps.laloStep)
    res_obj.get_geometry_definition()

    inps.nprocs = multiprocessing.cpu_count()

    # resample input files one by one
    for infile in inps.file:
        print('-' * 50+'\nresampling file: {}'.format(infile))
        outfile = auto_output_filename(infile, inps)
        if inps.updateMode and not ut.update_file(outfile, [infile, inps.lookupFile]):
            print('update mode is ON, skip geocoding.')
            return outfile

        # read source data and resample
        dsNames = readfile.get_dataset_list(infile, datasetName=inps.dset)
        maxDigit = max([len(i) for i in dsNames])
        dsResDict = dict()
        for dsName in dsNames:
            print('resampling {d:<{w}} from {f} using {n} processor cores ...'.format(
                d=dsName, w=maxDigit, f=os.path.basename(infile), n=inps.nprocs))
            data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]
            res_data = resample_data(data, inps, res_obj)
            dsResDict[dsName] = res_data

        # update metadata
        atr = readfile.read_attribute(infile, datasetName=inps.dset)
        if inps.radar2geo:
            atr = metadata_radar2geo(atr, res_obj)
        else:
            atr = metadata_geo2radar(atr, res_obj)
        if len(dsNames) == 1 and dsName not in ['timeseries']:
            atr['FILE_TYPE'] = dsNames[0]
            infile = None

        writefile.write(dsResDict, out_file=outfile, metadata=atr, ref_file=infile)

    m, s = divmod(time.time()-start_time, 60)
    print('\ntime used: {:02.0f} mins {:02.1f} secs\nDone.'.format(m, s))
    return outfile
コード例 #23
0
def prepare_roipac_dem(demFile, geocoded=False):
    print('convert input DEM to ROIPAC format')
    dem, atr = readfile.read(demFile, datasetName='height')
    if geocoded:
        ext = '.dem'
    else:
        ext = '.hgt'
    demFileOut = '{}4pyaps{}'.format(os.path.splitext(demFile)[0], ext)
    demFileOut = writefile.write(dem, out_file=demFileOut, metadata=atr)
    return demFileOut
コード例 #24
0
ファイル: reference_date.py プロジェクト: xuubing/PySAR
def ref_date_file(ts_file, ref_date, outfile=None):
    """Change input file reference date to a different one.
    Parameters: ts_file : str, timeseries file to be changed
                ref_date : str, date in YYYYMMDD format
                outfile  : if str, save to a different file
                           if None, modify the data value in the existing input file
    """
    print('-' * 50)
    print('change reference date for file: {}'.format(ts_file))
    atr = readfile.read_attribute(ts_file)
    if ref_date == atr['REF_DATE']:
        print('same reference date chosen as existing reference date.')
        if not outfile:
            print('Nothing to be done.')
            return ts_file
        else:
            print('Copy {} to {}'.format(ts_file, outfile))
            shutil.copy2(ts_file, outfile)
            return outfile
    else:
        obj = timeseries(ts_file)
        obj.open(print_msg=False)
        ref_idx = obj.dateList.index(ref_date)
        print('reading data ...')
        ts_data = readfile.read(ts_file)[0]

        ts_data -= np.tile(
            ts_data[ref_idx, :, :].reshape(1, obj.length, obj.width),
            (obj.numDate, 1, 1))

        if not outfile:
            print('open {} with r+ mode'.format(ts_file))
            with h5py.File(ts_file, 'r+') as f:
                print(
                    "update /timeseries dataset and 'REF_DATE' attribute value"
                )
                f['timeseries'][:] = ts_data
                f.attrs['REF_DATE'] = ref_date
            print('close {}'.format(ts_file))
        else:
            atr['REF_DATE'] = ref_date
            writefile.write(ts_data, outfile, metadata=atr, ref_file=ts_file)
    return outfile
コード例 #25
0
ファイル: sum_epochs.py プロジェクト: fossabot/PySAR
def main(argv):
    try:
        timeseries_file = argv[0]
    except:
        usage()
        sys.exit(1)

    try:
        out_file = argv[1]
    except:
        out_file = 'sum_' + timeseries_file

    # Read Timeseries
    obj = timeseries(timeseries_file)
    obj.open()
    D = obj.read().reshape(obj.numDate, -1)

    # Calculate Sum
    sumD = np.zeros(D.shape)
    for i in range(obj.numDate):
        sumD[i, :] = np.sum(np.abs(D - D[i, :]), axis=0) / obj.numDate
        sys.stdout.write('\rcalculating epochs sum {}/{} ...'.format(
            i + 1, obj.numDate))
        sys.stdout.flush()
    print('')
    del D

    # Normalize to 0 and 1
    # with high atmosphere equal to 0 and no atmosphere equal to 1
    sumD -= np.max(sumD, 0)
    sumD *= -1
    sumD /= np.max(sumD, 0)
    sumD[np.isnan(sumD)] = 1

    # Write sum epochs file
    sumD = np.reshape(sumD, (obj.numDate, obj.length, obj.width))
    atr = dict(obj.metadata)
    atr['UNIT'] = '1'
    writefile.write(sumD,
                    out_file=out_file,
                    metadata=atr,
                    ref_file=timeseries_file)
    print('Done.')
コード例 #26
0
ファイル: ifgram_inversion.py プロジェクト: fossabot/PySAR
def split_ifgram_file(ifgram_file, chunk_size=100e6):
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open(print_msg=False)
    metadata = dict(stack_obj.metadata)

    # get reference phase
    ref_phase = get_ifgram_reference_phase(ifgram_file)

    # get list of boxes
    box_list = split_into_boxes(ifgram_file,
                                chunk_size=chunk_size,
                                print_msg=True)
    num_box = len(box_list)

    # read/write each patch file
    outfile_list = []
    for i in range(num_box):
        box = box_list[i]
        outfile = '{}_{:03d}{}'.format(
            os.path.splitext(ifgram_file)[0], i + 1,
            os.path.splitext(ifgram_file)[1])

        # datasets
        print('-' * 50)
        print('reading all datasets in {} from file: {} ...'.format(
            box, ifgram_file))
        dsNames = readfile.get_dataset_list(ifgram_file)
        dsDict = {}
        dsDict['refPhase'] = ref_phase
        for dsName in dsNames:
            data = stack_obj.read(datasetName=dsName, box=box, print_msg=False)
            dsDict[dsName] = data

        # metadata
        metadata['LENGTH'] = box[3] - box[1]
        metadata['WIDTH'] = box[2] - box[0]
        writefile.write(dsDict,
                        out_file=outfile,
                        metadata=metadata,
                        ref_file=ifgram_file)
        outfile_list.append(outfile)
    return outfile_list
コード例 #27
0
def main(argv):
    try:
        dem_file = argv[1]
        dem_error_file = argv[2]
    except:
        usage()
        sys.exit(1)

    print('Correcting the DEM')

    dem, demrsc = readfile.read(dem_file)
    dem_error = readfile.read(dem_error_file)

    dem_out = dem + dem_error
    writefile.write(dem_out, out_file='DEM_w_error.dem', metadata=demrsc)

    date12_file = open('111111-222222_baseline.rsc', 'w')
    date12_file.write('P_BASELINE_TOP_ODR' + '     ' + '000')
    date12_file.close()
    return
コード例 #28
0
ファイル: timeseries2velocity.py プロジェクト: whigg/PySAR
def estimate_linear_velocity(inps):
    # read time-series data
    print('reading data from file {} ...'.format(inps.timeseries_file))
    ts_data, atr = readfile.read(inps.timeseries_file)
    ts_data = ts_data[inps.dropDate, :, :].reshape(inps.numDate, -1)
    if atr['UNIT'] == 'mm':
        ts_data *= 1. / 1000.
    length, width = int(atr['LENGTH']), int(atr['WIDTH'])

    # The following is equivalent
    # X = scipy.linalg.lstsq(A, ts_data, cond=1e-15)[0]
    # It is not used because it can not handle NaN value in ts_data
    A = design_matrix(inps.dateList)
    X = np.dot(np.linalg.pinv(A), ts_data)
    vel = np.array(X[0, :].reshape(length, width), dtype=dataType)

    # velocity STD (Eq. (10), Fattahi and Amelung, 2016)
    ts_diff = ts_data - np.dot(A, X)
    t_diff = A[:, 0] - np.mean(A[:, 0])
    vel_std = np.sqrt(
        np.sum(ts_diff**2, axis=0) / np.sum(t_diff**2) / (inps.numDate - 2))
    vel_std = np.array(vel_std.reshape(length, width), dtype=dataType)

    # prepare attributes
    atr['FILE_TYPE'] = 'velocity'
    atr['UNIT'] = 'm/year'
    atr['START_DATE'] = inps.dateList[0]
    atr['END_DATE'] = inps.dateList[-1]
    atr['DATE12'] = '{}_{}'.format(inps.dateList[0], inps.dateList[-1])
    # config parameter
    print('add/update the following configuration metadata:\n{}'.format(
        configKeys))
    for key in configKeys:
        atr[key_prefix + key] = str(vars(inps)[key])

    # write to HDF5 file
    dsDict = dict()
    dsDict['velocity'] = vel
    dsDict['velocityStd'] = vel_std
    writefile.write(dsDict, out_file=inps.outfile, metadata=atr)
    return inps.outfile
コード例 #29
0
def main(argv):
    try:
        File = argv[0]
        atr = readfile.read_attribute(File)
    except:
        usage()
        sys.exit(1)

    try:
        outFile = argv[1]
    except:
        outFile = 'rangeDistance.h5'

    # Calculate look angle
    range_dis = ut.range_distance(atr, dimension=2)

    # Geo coord
    if 'Y_FIRST' in atr.keys():
        print(
            'Input file is geocoded, only center range distance is calculated: '
        )
        print(range_dis)
        length = int(atr['LENGTH'])
        width = int(atr['WIDTH'])
        range_dis_mat = np.zeros((length, width), np.float32)
        range_dis_mat[:] = range_dis
        range_dis = range_dis_mat

    print('writing >>> ' + outFile)
    atr['FILE_TYPE'] = 'mask'
    atr['UNIT'] = 'm'
    try:
        atr.pop('REF_DATE')
    except:
        pass
    writefile.write(range_dis, out_file=outFile, metadata=atr)
    return outFile
コード例 #30
0
def write2hdf5_file(ifgram_file,
                    metadata,
                    ts,
                    temp_coh,
                    ts_std=None,
                    num_inv_ifg=None,
                    suffix='',
                    inps=None):
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open(print_msg=False)
    date_list = stack_obj.get_date_list(dropIfgram=True)

    # File 1 - timeseries.h5
    ts_file = '{}{}.h5'.format(suffix, os.path.splitext(inps.outfile[0])[0])
    metadata['REF_DATE'] = date_list[0]
    metadata['FILE_TYPE'] = 'timeseries'
    metadata['UNIT'] = 'm'

    print('-' * 50)
    print('converting phase to range')
    phase2range = -1 * float(stack_obj.metadata['WAVELENGTH']) / (4. * np.pi)
    ts *= phase2range

    print('calculating perpendicular baseline timeseries')
    pbase = stack_obj.get_perp_baseline_timeseries(dropIfgram=True)

    ts_obj = timeseries(ts_file)
    ts_obj.write2hdf5(data=ts, dates=date_list, bperp=pbase, metadata=metadata)

    # File 2 - temporalCoherence.h5
    out_file = '{}{}.h5'.format(suffix, os.path.splitext(inps.outfile[1])[0])
    metadata['FILE_TYPE'] = 'temporalCoherence'
    metadata['UNIT'] = '1'
    print('-' * 50)
    writefile.write(temp_coh, out_file=out_file, metadata=metadata)

    # File 3 - timeseriesDecorStd.h5
    if not np.all(ts_std == 0.):
        out_file = 'timeseriesDecorStd{}.h5'.format(suffix)
        metadata['FILE_TYPE'] = 'timeseries'
        metadata['UNIT'] = 'm'
        ts_std *= abs(phase2range)
        print('-' * 50)
        writefile.write(ts_std,
                        out_file=out_file,
                        metadata=metadata,
                        ref_file=ts_file)

    # File 4 - numInvIfgram.h5
    out_file = 'numInvIfgram{}.h5'.format(suffix)
    metadata['FILE_TYPE'] = 'mask'
    metadata['UNIT'] = '1'
    print('-' * 50)
    writefile.write(num_inv_ifg, out_file=out_file, metadata=metadata)
    return