コード例 #1
0
ファイル: geocode.py プロジェクト: louisemaubant/PySAR
def main(argv):
    inps = cmdLineParse()
    if inps.template_file:
        inps = read_template2inps(inps.template_file, inps)

    inps.file = ut.get_file_list(inps.file)
    print 'number of files to geocode: '+str(len(inps.file))
    print inps.file
    if len(inps.file) > 1:
        inps.outfile = None
    print 'fill_value: '+str(inps.fill_value)

    ##Check Lookup table
    inps.lookup_file = ut.get_lookup_file(inps.lookup_file)
    if not inps.lookup_file:
        sys.exit('No lookup table found! Can not geocode without it.')

    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(len(inps.file))

    #####
    if inps.parallel:
        Parallel(n_jobs=num_cores)(delayed(geocode_file)\
                                   (fname, inps.lookup_file, inps.outfile, inps) for fname in inps.file)
    else:
        for fname in inps.file:
            geocode_file(fname, inps.lookup_file, inps.outfile, inps)

    print 'Done.'
    return
コード例 #2
0
ファイル: multilook.py プロジェクト: louisemaubant/PySAR
def main(argv):
    inps = cmdLineParse()
    inps.file = ut.get_file_list(inps.file)

    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(inps.file))

    # multilooking
    if len(inps.file) == 1:
        multilook_file(inps.file[0], inps.lks_y, inps.lks_x, inps.outfile)

    elif inps.parallel:
        #num_cores = min(multiprocessing.cpu_count(), len(inps.file))
        #print 'parallel processing using %d cores ...'%(num_cores)
        Parallel(n_jobs=num_cores)(
            delayed(multilook_file)(file, inps.lks_y, inps.lks_x)
            for file in inps.file)
    else:
        for File in inps.file:
            print '-------------------------------------------'
            multilook_file(File, inps.lks_y, inps.lks_x)

    print 'Done.'
    return
コード例 #3
0
def main(argv):
    inps = cmdLineParse()
    inps.file = ut.get_file_list(inps.file)
    print 'number of files to geocode: ' + str(len(inps.file))
    print inps.file
    print 'interpolation method: ' + inps.method
    print 'fill_value: ' + str(inps.fill_value)

    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(inps.file))

    #####
    if len(inps.file) == 1:
        geocode_file_with_geo_lut(inps.file[0], inps.lookup_file, inps.method,
                                  inps.fill_value, inps.outfile)
    elif inps.parallel:
        Parallel(n_jobs=num_cores)(delayed(geocode_file_with_geo_lut)\
                                   (fname, inps.lookup_file, inps.method, inps.fill_value) for fname in inps.file)
    else:
        for fname in inps.file:
            geocode_file_with_geo_lut(fname, inps.lookup_file, inps.method,
                                      inps.fill_value)

    print 'Done.'
    return
コード例 #4
0
def main(argv):

    inps = cmdLineParse()
    #print '\n****************** mask *********************'
    inps.file = ut.get_file_list(inps.file)
    print 'number of file to mask: ' + str(len(inps.file))
    print inps.file

    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(inps.file))

    # masking
    if len(inps.file) == 1:
        mask_file(inps.file[0], inps.mask_file, inps.outfile, vars(inps))

    elif inps.parallel:
        #num_cores = min(multiprocessing.cpu_count(), len(inps.file))
        #print 'parallel processing using %d cores ...'%(num_cores)
        Parallel(n_jobs=num_cores)(
            delayed(mask_file)(File, inps.mask_file, inps_dict=vars(inps))
            for File in inps.file)
    else:
        for File in inps.file:
            print '-------------------------------------------'
            mask_file(File, inps.mask_file, inps_dict=vars(inps))

    print 'Done.'
    return
コード例 #5
0
ファイル: prep_roipac.py プロジェクト: zyh900908/PySAR
def main(argv):
    inps = cmdLineParse()
    inps.file = ut.get_file_list(inps.file, abspath=True)

    # Check input file type
    ext = os.path.splitext(inps.file[0])[1]
    if ext not in ['.unw', '.cor', '.int']:
        print 'No need to extract attributes for ROI_PAC ' + ext + ' file'
        return

    print 'number of files: ' + str(len(inps.file))

    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(inps.file))

    if len(inps.file) == 1:
        extract_attribute(inps.file[0])
    elif inps.parallel:
        Parallel(n_jobs=num_cores)(delayed(extract_attribute)(file)
                                   for file in inps.file)
    else:
        for File in inps.file:
            extract_attribute(File)

    return
コード例 #6
0
def subset_file_list(fileList, inps):
    '''Subset file list'''
    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(fileList))

    ##### Subset files
    if len(fileList) == 1:
        subset_file(fileList[0], vars(inps), inps.outfile)

    elif inps.parallel:
        #num_cores = min(multiprocessing.cpu_count(), len(fileList))
        #print 'parallel processing using %d cores ...'%(num_cores)
        Parallel(n_jobs=num_cores)(delayed(subset_file)(file, vars(inps))
                                   for file in fileList)
    else:
        for File in fileList:
            print '----------------------------------------------------'
            subset_file(File, vars(inps))
    return
コード例 #7
0
ファイル: prep_gamma.py プロジェクト: louisemaubant/PySAR
def main(argv):
    inps = cmdLineParse()
    inps.file = ut.get_file_list(inps.file, abspath=True)
    print 'number of files: ' + str(len(inps.file))

    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(inps.file))

    ##### multiple datasets files
    ext = os.path.splitext(inps.file[0])[1]
    if ext in ['.unw', '.cor', '.int']:
        if len(inps.file) == 1:
            extract_attribute_interferogram(inps.file[0])
        elif inps.parallel:
            Parallel(n_jobs=num_cores)(
                delayed(extract_attribute_interferogram)(file)
                for file in inps.file)
        else:
            for File in inps.file:
                extract_attribute_interferogram(File)

    ##### Single dataset files
    elif inps.file[0].endswith('.utm.dem'):
        for File in inps.file:
            atr_file = extract_attribute_dem_geo(File)
    elif inps.file[0].endswith(('.rdc.dem', '.hgt_sim')):
        for File in inps.file:
            atr_file = extract_attribute_dem_radar(File)
    elif ext in ['.UTM_TO_RDC']:
        for File in inps.file:
            atr_file = extract_attribute_lookup_table(File)
    else:
        print 'No need to extract attributes for Gamma ' + ext + ' file'

    print 'Done.'
    return
コード例 #8
0
ファイル: remove_plane.py プロジェクト: louisemaubant/PySAR
def main(argv):

    inps = cmdLineParse()
    inps.file = ut.get_file_list(inps.file)
    print 'input file(s): ' + str(len(inps.file))
    print inps.file

    #print '\n*************** Phase Ramp Removal ***********************'
    atr = readfile.read_attribute(inps.file[0])
    length = int(atr['FILE_LENGTH'])
    width = int(atr['WIDTH'])

    # Read mask file if inputed
    if inps.mask_file == 'no': inps.mask_file = None
    if inps.mask_file:
        try:
            mask_atr = readfile.read_attribute(inps.mask_file)
        except:
            print 'Can not open mask file: ' + inps.mask_file
            inps.mask_file = None

    # Update mask for multiple surfaces
    if inps.ysub:
        # Read mask
        if not inps.mask_file:
            Mask_temp = readfile.read(inps.mask_file, epoch='mask')[0]
            Mask = np.zeros((length, width), dtype=np.float32)
            Mask[Mask_temp != 0] = 1
        else:
            Mask = np.ones((length, width))

        # Update mask for multiple surface from inps.ysub
        mask_multiSurface = np.zeros((length, width), dtype=np.float32)
        surfNum = len(inps.ysub) / 2
        if surfNum == 1:
            mask_multiSurface = Mask
        else:
            i = 0
            mask_multiSurface[inps.ysub[2 * i]:inps.ysub[2 * i + 1], :] = Mask[
                inps.ysub[2 * i]:inps.ysub[2 * i + 1], :]
            for i in range(1, surfNum):
                if inps.ysub[2 * i] < inps.ysub[2 * i - 1]:
                    mask_multiSurface[
                        inps.ysub[2 * i]:inps.ysub[2 * i - 1], :] += Mask[
                            inps.ysub[2 * i]:inps.ysub[2 * i - 1], :] * (i + 1)
                    mask_multiSurface[inps.ysub[2 * i]:inps.ysub[2 * i -
                                                                 1], :] /= 2
                    mask_multiSurface[
                        inps.ysub[2 * i - 1]:inps.ysub[2 * i + 1], :] = Mask[
                            inps.ysub[2 * i - 1]:inps.ysub[2 * i +
                                                           1], :] * (i + 1)
                else:
                    mask_multiSurface[
                        inps.ysub[2 * i]:inps.ysub[2 * i + 1], :] = Mask[
                            inps.ysub[2 * i]:inps.ysub[2 * i + 1], :] * (i + 1)

        # Write updated mask for multiple surfaces into file
        outFile = 'mask_' + str(surfNum) + inps.surface_type + '.h5'
        atr['FILE_TYPE'] = 'mask'
        writefile.write(mask_multiSurface, atr, outFile)
        print 'saved mask to ' + outFile

    ############################## Removing Phase Ramp #######################################
    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(inps.file))

    if len(inps.file) == 1:
        rm.remove_surface(inps.file[0], inps.surface_type, inps.mask_file,
                          inps.outfile, inps.ysub)

    elif inps.parallel:
        #num_cores = min(multiprocessing.cpu_count(), len(inps.file))
        #print 'parallel processing using %d cores ...'%(num_cores)
        Parallel(n_jobs=num_cores)(delayed(rm.remove_surface)(file, inps.surface_type, inps.mask_file, ysub=inps.ysub)\
                                   for file in inps.file)

    else:
        for File in inps.file:
            print '------------------------------------------'
            rm.remove_surface(File,
                              inps.surface_type,
                              inps.mask_file,
                              ysub=inps.ysub)

    print 'Done.'
    return
コード例 #9
0
def main(argv):
    inps = cmdLineParse()
    inps.file = ut.get_file_list(inps.file)

    atr = readfile.read_attribute(inps.file[0])
    length = int(atr['FILE_LENGTH'])
    width = int(atr['WIDTH'])

    if inps.reset:
        print '----------------------------------------------------------------------------'
        for file in inps.file:
            remove_reference_pixel(file)
        return

    ##### Check Input Coordinates
    # Read ref_y/x/lat/lon from reference/template
    # priority: Direct Input > Reference File > Template File
    if inps.template_file:
        print 'reading reference info from template: ' + inps.template_file
        inps = read_seed_template2inps(inps.template_file, inps)
    if inps.reference_file:
        print 'reading reference info from reference: ' + inps.reference_file
        inps = read_seed_reference2inps(inps.reference_file, inps)

    ## Do not use ref_lat/lon input for file in radar-coord
    #if not 'X_FIRST' in atr.keys() and (inps.ref_lat or inps.ref_lon):
    #    print 'Lat/lon reference input is disabled for file in radar coord.'
    #    inps.ref_lat = None
    #    inps.ref_lon = None

    # Convert ref_lat/lon to ref_y/x
    if inps.ref_lat and inps.ref_lon:
        if 'X_FIRST' in atr.keys():
            inps.ref_y = subset.coord_geo2radar(inps.ref_lat, atr, 'lat')
            inps.ref_x = subset.coord_geo2radar(inps.ref_lon, atr, 'lon')
        else:
            # Convert lat/lon to az/rg for radar coord file using geomap*.trans file
            inps.ref_y, inps.ref_x = ut.glob2radar(np.array(inps.ref_lat), np.array(inps.ref_lon),\
                                                   inps.trans_file, atr)[0:2]
        print 'Input reference point in lat/lon: ' + str(
            [inps.ref_lat, inps.ref_lon])
    print 'Input reference point in   y/x  : ' + str([inps.ref_y, inps.ref_x])

    # Do not use ref_y/x outside of data coverage
    if (inps.ref_y and inps.ref_x
            and not (0 <= inps.ref_y <= length and 0 <= inps.ref_x <= width)):
        inps.ref_y = None
        inps.ref_x = None
        print 'WARNING: input reference point is OUT of data coverage!'
        print 'Continue with other method to select reference point.'

    # Do not use ref_y/x in masked out area
    if inps.ref_y and inps.ref_x and inps.mask_file:
        print 'mask: ' + inps.mask_file
        mask = readfile.read(inps.mask_file)[0]
        if mask[inps.ref_y, inps.ref_x] == 0:
            inps.ref_y = None
            inps.ref_x = None
            print 'WARNING: input reference point is in masked OUT area!'
            print 'Continue with other method to select reference point.'

    ##### Select method
    if inps.ref_y and inps.ref_x:
        inps.method = 'input-coord'
    elif inps.coherence_file:
        if os.path.isfile(inps.coherence_file):
            inps.method = 'max-coherence'
        else:
            inps.coherence_file = None

    if inps.method == 'manual':
        inps.parallel = False
        print 'Parallel processing is disabled for manual seeding method.'

    ##### Seeding file by file
    # check outfile and parallel option
    if inps.parallel:
        num_cores, inps.parallel, Parallel, delayed = ut.check_parallel(
            len(inps.file))

    if len(inps.file) == 1:
        seed_file_inps(inps.file[0], inps, inps.outfile)

    elif inps.parallel:
        #num_cores = min(multiprocessing.cpu_count(), len(inps.file))
        #print 'parallel processing using %d cores ...'%(num_cores)
        Parallel(n_jobs=num_cores)(delayed(seed_file_inps)(file, inps)
                                   for file in inps.file)
    else:
        for File in inps.file:
            seed_file_inps(File, inps)

    print 'Done.'
    return
コード例 #10
0
def ifgram_inversion(ifgramFile='unwrapIfgram.h5', coherenceFile='coherence.h5', meta=None):
    '''Implementation of the SBAS algorithm.
    modified from sbas.py written by scott baker, 2012 

    Inputs:
        ifgramFile    - string, HDF5 file name of the interferograms
        coherenceFile - string, HDF5 file name of the coherence
        meta          - dict, including the following options:
                        weight_function
                        chunk_size - float, max number of data (ifgram_num*row_num*col_num)
                                     to read per loop; to control the memory
    Output:
        timeseriesFile - string, HDF5 file name of the output timeseries
        tempCohFile    - string, HDF5 file name of temporal coherence
    Example:
        meta = dict()
        meta['weight_function'] = 'variance'
        meta['chunk_size'] = 0.5e9
        meta['timeseriesFile'] = 'timeseries_var.h5'
        meta['tempCohFile'] = 'temporalCoherence_var.h5'
        ifgram_inversion('unwrapIfgram.h5', 'coherence.h5', meta)
    '''
    if 'tempCohFile' not in meta.keys():
        meta['tempCohFile'] = 'temporalCoherence.h5'
    meta['timeseriesStdFile'] = 'timeseriesDecorStd.h5'
    total = time.time()

    if not meta:
        meta = vars(cmdLineParse())

    if meta['update_mode'] and not ut.update_file(meta['timeseriesFile'], ifgramFile):
        return meta['timeseriesFile'], meta['tempCohFile']

    ##### Basic Info
    # length/width
    atr = readfile.read_attribute(ifgramFile)
    length = int(atr['FILE_LENGTH'])
    width  = int(atr['WIDTH'])
    meta['length'] = length
    meta['width']  = width

    # ifgram_list
    h5ifgram = h5py.File(ifgramFile,'r')
    ifgram_list = sorted(h5ifgram['interferograms'].keys())
    #if meta['weight_function'] in ['no','uniform']:
    #    ifgram_list = ut.check_drop_ifgram(h5ifgram)
    ifgram_list = ut.check_drop_ifgram(h5ifgram)
    meta['ifgram_list'] = ifgram_list
    ifgram_num = len(ifgram_list)

    # date12_list/date8_list/tbase_diff
    date12_list = ptime.list_ifgram2date12(ifgram_list)
    m_dates = [i.split('-')[0] for i in date12_list]
    s_dates = [i.split('-')[1] for i in date12_list]
    date8_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
    date_num = len(date8_list)
    meta['date8_list'] = date8_list
    meta['date12_list'] = date12_list

    tbase_list = ptime.date_list2tbase(date8_list)[0]
    tbase_diff = np.diff(tbase_list).reshape((-1,1))
    meta['tbase_diff'] = tbase_diff

    print 'number of interferograms: %d' % (ifgram_num)
    print 'number of acquisitions  : %d' % (date_num)
    print 'number of columns: %d' % (width)
    print 'number of lines  : %d' % (length)

    ##### ref_y/x/value
    try:
        ref_x = int(atr['ref_x'])
        ref_y = int(atr['ref_y'])
        print 'reference pixel in y/x: [%d, %d]' % (ref_y, ref_x)
        ref_value = np.zeros((ifgram_num,1), np.float32)
        for j in range(ifgram_num):
            ifgram = ifgram_list[j]
            dset = h5ifgram['interferograms'][ifgram].get(ifgram)
            ref_value[j] = dset[ref_y,ref_x]
        meta['ref_y'] = ref_y
        meta['ref_x'] = ref_x
        meta['ref_value'] = ref_value
    except:
        if meta['skip_ref']:
            meta['ref_value'] = 0.0
            print 'skip checking reference pixel info - This is for SIMULATION ONLY.'
        else:
            print 'ERROR: No ref_x/y found! Can not invert interferograms without reference in space.'
            print 'run seed_data.py '+ifgramFile+' --mark-attribute for a quick referencing.'
            sys.exit(1)
    h5ifgram.close()

    ##### Rank of Design matrix for weighted inversion
    A, B = ut.design_matrix(ifgramFile, date12_list)
    print '-------------------------------------------------------------------------------'
    if meta['weight_function'] in ['no','uniform']:
        print 'generic least square inversion with min-norm phase velocity'
        print '    based on Berardino et al. (2002, IEEE-TGRS)'
        print '    OLS for pixels with fully     connected network'
        print '    SVD for pixels with partially connected network'
        if np.linalg.matrix_rank(A) < date_num-1:
            print 'WARNING: singular design matrix! Inversion result can be biased!'
            print 'continue using its SVD solution on all pixels'
    else:
        print 'weighted least square (WLS) inversion with min-norm phase, pixelwise'
        if np.linalg.matrix_rank(A) < date_num-1:
            print 'ERROR: singular design matrix!'
            print '    Input network of interferograms is not fully connected!'
            print '    Can not invert the weighted least square solution.'
            print 'You could try:'
            print '    1) Add more interferograms to make the network fully connected:'
            print '       a.k.a., no multiple subsets nor network islands'
            print "    2) Use '-w no' option for non-weighted SVD solution."
            sys.exit(-1)
    print '-------------------------------------------------------------------------------'


    ##### Invert time-series phase
    ##Check parallel environment
    if meta['weight_function'] in ['no','uniform']:
        meta['parallel'] = False
    if meta['parallel']:
        num_cores, meta['parallel'], Parallel, delayed = ut.check_parallel(1000, print_msg=False)

    ##Split into chunks to reduce memory usage
    r_step = meta['chunk_size']/ifgram_num/width         #split in lines
    if meta['weight_function'] not in ['no','uniform']:  #more memory usage (coherence) for WLS
        r_step /= 2.0
        if meta['parallel']:
            r_step /= num_cores
    r_step = int(ceil_to_1(r_step))
    meta['row_step'] = r_step
    chunk_num = int((length-1)/r_step)+1

    if chunk_num > 1:
        print 'maximum chunk size: %.1E' % (meta['chunk_size'])
        print 'split %d lines into %d patches for processing' % (length, chunk_num)
        print '    with each patch up to %d lines' % (r_step)
        if meta['parallel']:
            print 'parallel processing using %d cores ...' % (min([num_cores,chunk_num]))

    ##Computing the inversion
    box_list = []
    for i in range(chunk_num):
        r0 = i*r_step
        r1 = min([length, r0+r_step])
        box = (0,r0,width,r1)
        box_list.append(box)
    box_num = len(box_list)

    if not meta['parallel']:
        timeseries = np.zeros((date_num, length, width), np.float32)
        timeseriesStd = np.zeros((date_num, length, width), np.float32)
        tempCoh = np.zeros((length, width), np.float32)
        for i in range(box_num):
            if box_num > 1:
                print '\n------- Processing Patch %d out of %d --------------' % (i+1, box_num)
            box = box_list[i]
            ts, tcoh, tsStd = ifgram_inversion_patch(ifgramFile, coherenceFile, meta, box)
            tempCoh[box[1]:box[3],box[0]:box[2]] = tcoh
            timeseries[:,box[1]:box[3],box[0]:box[2]] = ts
            timeseriesStd[:,box[1]:box[3],box[0]:box[2]] = tsStd

    else:
        ##Temp file list
        meta['ftemp_base'] = 'timeseries_temp_'
        temp_file_list = [meta['ftemp_base']+str(i)+'.h5' for i in range(chunk_num)]

        ##Computation
        Parallel(n_jobs=num_cores)(delayed(ifgram_inversion_patch)\
                                   (ifgramFile, coherenceFile, meta, box) for box in box_list)

        ##Concatenate temp files
        print 'concatenating temporary timeseries files ...'
        timeseries = np.zeros((date_num, length, width), np.float32)
        tempCoh = np.zeros((length, width), np.float32)
        rmCmd = 'rm'
        for i in range(chunk_num):
            fname = temp_file_list[i]
            box = box_list[i]
            print 'reading '+fname
            h5temp = h5py.File(fname, 'r')
            dset = h5temp['timeseries'].get('timeseries')
            timeseries[:,box[1]:box[3],box[0]:box[2]] = dset[0:-1,:,:]
            tempCoh[box[1]:box[3],box[0]:box[2]] = dset[-1,:,:]
            h5temp.close()
            rmCmd += ' '+fname
        print rmCmd
        os.system(rmCmd)

    print 'converting phase to range'
    phase2range = -1*float(atr['WAVELENGTH'])/(4.*np.pi)
    timeseries *= phase2range
    timeseriesStd *= abs(phase2range)

    ##### Calculate time-series attributes
    print 'calculating perpendicular baseline timeseries'
    pbase, pbase_top, pbase_bottom = ut.perp_baseline_ifgram2timeseries(ifgramFile, ifgram_list)
    pbase = str(pbase.tolist()).translate(None,'[],')  # convert np.array into string separated by white space
    pbase_top = str(pbase_top.tolist()).translate(None,'[],')
    pbase_bottom = str(pbase_bottom.tolist()).translate(None,'[],')
    atr['P_BASELINE_TIMESERIES'] = pbase
    atr['P_BASELINE_TOP_TIMESERIES'] = pbase_top
    atr['P_BASELINE_BOTTOM_TIMESERIES'] = pbase_bottom
    atr['ref_date'] = date8_list[0]
    atr['FILE_TYPE'] = 'timeseries'
    atr['UNIT'] = 'm'

    ##### Output
    ## 1. Write time-series file
    meta['timeseriesFile'] = write_timeseries_hdf5_file(timeseries, date8_list, atr,\
                                                        timeseriesFile=meta['timeseriesFile'])
    if not np.all(timeseriesStd == 0.):
        meta['timeseriesStdFile'] = write_timeseries_hdf5_file(timeseriesStd, date8_list, atr,\
                                                               timeseriesFile=meta['timeseriesStdFile'])

    ## 2. Write Temporal Coherence File
    print 'writing >>> '+meta['tempCohFile']
    atr['FILE_TYPE'] = 'temporal_coherence'
    atr['UNIT'] = '1'
    meta['tempCohFile'] = writefile.write(tempCoh, atr, meta['tempCohFile'])

    print 'Time series inversion took ' + str(time.time()-total) +' secs\nDone.'
    return meta['timeseriesFile'], meta['tempCohFile']