def runSwathMosaic(self): '''mosaic subswaths ''' catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name) self.updateParamemetersFromUser() referenceTrack = self._insar.loadTrack(reference=True) secondaryTrack = self._insar.loadTrack(reference=False) for i, frameNumber in enumerate(self._insar.referenceFrames): frameDir = 'f{}_{}'.format(i + 1, frameNumber) os.chdir(frameDir) mosaicDir = 'mosaic' os.makedirs(mosaicDir, exist_ok=True) os.chdir(mosaicDir) if not ( ((self._insar.modeCombination == 21) or \ (self._insar.modeCombination == 22) or \ (self._insar.modeCombination == 31) or \ (self._insar.modeCombination == 32)) and (self._insar.endingSwath-self._insar.startingSwath+1 > 1) ): import shutil swathDir = 's{}'.format( referenceTrack.frames[i].swaths[0].swathNumber) if not os.path.isfile(self._insar.interferogram): os.symlink( os.path.join('../', swathDir, self._insar.interferogram), self._insar.interferogram) shutil.copy2( os.path.join('../', swathDir, self._insar.interferogram + '.vrt'), self._insar.interferogram + '.vrt') shutil.copy2( os.path.join('../', swathDir, self._insar.interferogram + '.xml'), self._insar.interferogram + '.xml') if not os.path.isfile(self._insar.amplitude): os.symlink( os.path.join('../', swathDir, self._insar.amplitude), self._insar.amplitude) shutil.copy2( os.path.join('../', swathDir, self._insar.amplitude + '.vrt'), self._insar.amplitude + '.vrt') shutil.copy2( os.path.join('../', swathDir, self._insar.amplitude + '.xml'), self._insar.amplitude + '.xml') # os.rename(os.path.join('../', swathDir, self._insar.interferogram), self._insar.interferogram) # os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt') # os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml') # os.rename(os.path.join('../', swathDir, self._insar.amplitude), self._insar.amplitude) # os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt') # os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml') #update frame parameters ######################################################### frame = referenceTrack.frames[i] infImg = isceobj.createImage() infImg.load(self._insar.interferogram + '.xml') #mosaic size frame.numberOfSamples = infImg.width frame.numberOfLines = infImg.length #NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE #range parameters frame.startingRange = frame.swaths[0].startingRange frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate frame.rangePixelSize = frame.swaths[0].rangePixelSize #azimuth parameters frame.sensingStart = frame.swaths[0].sensingStart frame.prf = frame.swaths[0].prf frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval #update frame parameters, secondary ######################################################### frame = secondaryTrack.frames[i] #mosaic size frame.numberOfSamples = int(frame.swaths[0].numberOfSamples / self._insar.numberRangeLooks1) frame.numberOfLines = int(frame.swaths[0].numberOfLines / self._insar.numberAzimuthLooks1) #NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE #range parameters frame.startingRange = frame.swaths[0].startingRange frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate frame.rangePixelSize = frame.swaths[0].rangePixelSize #azimuth parameters frame.sensingStart = frame.swaths[0].sensingStart frame.prf = frame.swaths[0].prf frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval os.chdir('../') #save parameter file self._insar.saveProduct(referenceTrack.frames[i], self._insar.referenceFrameParameter) self._insar.saveProduct(secondaryTrack.frames[i], self._insar.secondaryFrameParameter) os.chdir('../') continue #choose offsets numberOfFrames = len(referenceTrack.frames) numberOfSwaths = len(referenceTrack.frames[i].swaths) if self.swathOffsetMatching: #no need to do this as the API support 2-d list #rangeOffsets = (np.array(self._insar.swathRangeOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths) #azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths) rangeOffsets = self._insar.swathRangeOffsetMatchingReference azimuthOffsets = self._insar.swathAzimuthOffsetMatchingReference else: #rangeOffsets = (np.array(self._insar.swathRangeOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths) #azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths) rangeOffsets = self._insar.swathRangeOffsetGeometricalReference azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalReference rangeOffsets = rangeOffsets[i] azimuthOffsets = azimuthOffsets[i] #list of input files inputInterferograms = [] inputAmplitudes = [] for j, swathNumber in enumerate( range(self._insar.startingSwath, self._insar.endingSwath + 1)): swathDir = 's{}'.format(swathNumber) inputInterferograms.append( os.path.join('../', swathDir, self._insar.interferogram)) inputAmplitudes.append( os.path.join('../', swathDir, self._insar.amplitude)) #note that frame parameters are updated after mosaicking #mosaic amplitudes swathMosaic(referenceTrack.frames[i], inputAmplitudes, self._insar.amplitude, rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, resamplingMethod=0) #mosaic interferograms swathMosaic(referenceTrack.frames[i], inputInterferograms, self._insar.interferogram, rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, updateFrame=True, resamplingMethod=1) create_xml(self._insar.amplitude, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'amp') create_xml(self._insar.interferogram, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'int') #update secondary frame parameters here #no matching for secondary, always use geometry rangeOffsets = self._insar.swathRangeOffsetGeometricalSecondary azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalSecondary rangeOffsets = rangeOffsets[i] azimuthOffsets = azimuthOffsets[i] swathMosaicParameters(secondaryTrack.frames[i], rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1) os.chdir('../') #save parameter file self._insar.saveProduct(referenceTrack.frames[i], self._insar.referenceFrameParameter) self._insar.saveProduct(secondaryTrack.frames[i], self._insar.secondaryFrameParameter) os.chdir('../') catalog.printToLog(logger, "runSwathMosaic") self._insar.procDoc.addAllFromCatalog(catalog)
def runRgoffset(self): numLocationAcross = self._insar.getNumberLocationAcrossPrf() numLocationDown = self._insar.getNumberLocationDownPrf() firstAc = self._insar.getFirstSampleAcrossPrf() firstDown = self._insar.getFirstSampleDownPrf() #Fake amplitude image as a complex image imageAmp = self._insar.getResampAmpImage() objAmp = isceobj.createImage() objAmp.setAccessMode('read') objAmp.dataType = 'CFLOAT' objAmp.bands = 1 objAmp.setFilename(imageAmp.filename) objAmp.setWidth(imageAmp.width) objAmp.createImage() widthAmp = objAmp.getWidth() intLength = objAmp.getLength() imageSim = self._insar.getSimAmpImage() objSim = isceobj.createImage() objSim.setFilename(imageSim.filename) objSim.setWidth(imageSim.width) objSim.dataType = 'FLOAT' objSim.setAccessMode('read') objSim.createImage() simWidth = imageSim.getWidth() simLength = imageSim.getLength() fs1 = self._insar.getMasterFrame().getInstrument().getRangeSamplingRate( ) ##check delRg1 = CN.SPEED_OF_LIGHT / (2 * fs1) ## if it's correct objAmpcor = Ampcor(name='insarapp_intsim_ampcor') objAmpcor.configure() objAmpcor.setImageDataType1('real') objAmpcor.setImageDataType2('complex') ####Adjust first and last values using window sizes xMargin = 2 * objAmpcor.searchWindowSizeWidth + objAmpcor.windowSizeWidth yMargin = 2 * objAmpcor.searchWindowSizeHeight + objAmpcor.windowSizeHeight if not objAmpcor.acrossGrossOffset: coarseAcross = 0 else: coarseAcross = objAmpcor.acrossGrossOffset if not objAmpcor.downGrossOffset: coarseDown = 0 else: coarseDown = objAmpcor.downGrossOffset offAc = max(firstAc, -coarseAcross) + xMargin + 1 offDn = max(firstDown, -coarseDown) + yMargin + 1 lastAc = int(min(widthAmp, simWidth - offAc) - xMargin - 1) lastDn = int(min(intLength, simLength - offDn) - yMargin - 1) if not objAmpcor.firstSampleAcross: objAmpcor.setFirstSampleAcross(offAc) if not objAmpcor.lastSampleAcross: objAmpcor.setLastSampleAcross(lastAc) if not objAmpcor.numberLocationAcross: objAmpcor.setNumberLocationAcross(numLocationAcross) if not objAmpcor.firstSampleDown: objAmpcor.setFirstSampleDown(offDn) if not objAmpcor.lastSampleDown: objAmpcor.setLastSampleDown(lastDn) if not objAmpcor.numberLocationDown: objAmpcor.setNumberLocationDown(numLocationDown) #set the tag used in the outfile. each message is precided by this tag #is the writer is not of "file" type the call has no effect self._stdWriter.setFileTag("rgoffset", "log") self._stdWriter.setFileTag("rgoffset", "err") self._stdWriter.setFileTag("rgoffset", "out") objAmpcor.setStdWriter(self._stdWriter) prf = self._insar.getMasterFrame().getInstrument( ).getPulseRepetitionFrequency() objAmpcor.setFirstPRF(prf) objAmpcor.setSecondPRF(prf) if not objAmpcor.acrossGrossOffset: objAmpcor.setAcrossGrossOffset(coarseAcross) if not objAmpcor.downGrossOffset: objAmpcor.setDownGrossOffset(coarseDown) objAmpcor.setFirstRangeSpacing(delRg1) objAmpcor.setSecondRangeSpacing(delRg1) objAmpcor.ampcor(objSim, objAmp) # Record the inputs and outputs from isceobj.Catalog import recordInputsAndOutputs recordInputsAndOutputs(self._insar.procDoc, objAmpcor, "runRgoffset_ampcor", \ logger, "runRgoffset_ampcor") self._insar.setOffsetField(objAmpcor.getOffsetField()) self._insar.setRefinedOffsetField(objAmpcor.getOffsetField()) objAmp.finalizeImage() objSim.finalizeImage()
def estimateOffsetField(reference, secondary, inps=None): """Estimte offset field using PyCuAmpcor. Parameters: reference - str, path of the reference SLC file secondary - str, path of the secondary SLC file inps - Namespace, input configuration Returns: objOffset - PyCuAmpcor object geomDict - dict, geometry location info of the offset field """ # update file path in xml file if inps.fixImageXml: for fname in [reference, secondary]: fname = os.path.abspath(fname) img = IML.loadImage(fname)[0] img.filename = fname img.setAccessMode('READ') img.renderHdr() if inps.fixImageVrt: for fname in [reference, secondary]: fname = os.path.abspath(fname) img = IML.loadImage(fname)[0] img.renderVRT() ###Loading the secondary image object sim = isceobj.createSlcImage() sim.load(secondary + '.xml') sim.setAccessMode('READ') sim.createImage() ###Loading the reference image object sar = isceobj.createSlcImage() sar.load(reference + '.xml') sar.setAccessMode('READ') sar.createImage() width = sar.getWidth() length = sar.getLength() # create a PyCuAmpcor instance objOffset = PyCuAmpcor() objOffset.algorithm = inps.algorithm objOffset.deviceID = inps.gpuid objOffset.nStreams = inps.nstreams #cudaStreams objOffset.derampMethod = inps.deramp print('deramp method (0 for magnitude, 1 for complex): ', objOffset.derampMethod) objOffset.referenceImageName = reference + '.vrt' objOffset.referenceImageHeight = length objOffset.referenceImageWidth = width objOffset.secondaryImageName = secondary + '.vrt' objOffset.secondaryImageHeight = length objOffset.secondaryImageWidth = width print("image length:", length) print("image width:", width) # if using gross offset, adjust the margin margin = max(inps.margin, abs(inps.azshift), abs(inps.rgshift)) # determine the number of windows down and across # that's also the size of the output offset field objOffset.numberWindowDown = inps.numWinDown if inps.numWinDown > 0 \ else (length-2*margin-2*inps.srchgt-inps.winhgt)//inps.skiphgt objOffset.numberWindowAcross = inps.numWinAcross if inps.numWinAcross > 0 \ else (width-2*margin-2*inps.srcwidth-inps.winwidth)//inps.skipwidth print('the number of windows: {} by {}'.format( objOffset.numberWindowDown, objOffset.numberWindowAcross)) # window size objOffset.windowSizeHeight = inps.winhgt objOffset.windowSizeWidth = inps.winwidth print('window size for cross-correlation: {} by {}'.format( objOffset.windowSizeHeight, objOffset.windowSizeWidth)) # search range objOffset.halfSearchRangeDown = inps.srchgt objOffset.halfSearchRangeAcross = inps.srcwidth print('initial search range: {} by {}'.format(inps.srchgt, inps.srcwidth)) # starting pixel objOffset.referenceStartPixelDownStatic = inps.startpixeldw if inps.startpixeldw != -1 \ else margin + objOffset.halfSearchRangeDown # use margin + halfSearchRange instead objOffset.referenceStartPixelAcrossStatic = inps.startpixelac if inps.startpixelac != -1 \ else margin + objOffset.halfSearchRangeAcross print('the first pixel in reference image is: ({}, {})'.format( objOffset.referenceStartPixelDownStatic, objOffset.referenceStartPixelAcrossStatic)) # skip size objOffset.skipSampleDown = inps.skiphgt objOffset.skipSampleAcross = inps.skipwidth print('search step: {} by {}'.format(inps.skiphgt, inps.skipwidth)) # oversample raw data (SLC) objOffset.rawDataOversamplingFactor = inps.raw_oversample # correlation surface objOffset.corrStatWindowSize = inps.corr_stat_win_size corr_win_size = 2 * inps.corr_srch_size * inps.raw_oversample objOffset.corrSurfaceZoomInWindow = corr_win_size print('correlation surface zoom-in window size:', corr_win_size) objOffset.corrSurfaceOverSamplingMethod = inps.corr_oversamplemethod objOffset.corrSurfaceOverSamplingFactor = inps.corr_oversample print('correlation surface oversampling factor:', inps.corr_oversample) # output filenames fbase = '{}{}'.format(inps.outprefix, inps.outsuffix) objOffset.offsetImageName = fbase + '.bip' objOffset.grossOffsetImageName = fbase + '_gross.bip' objOffset.snrImageName = fbase + '_snr.bip' objOffset.covImageName = fbase + '_cov.bip' print("offsetfield: ", objOffset.offsetImageName) print("gross offsetfield: ", objOffset.grossOffsetImageName) print("snr: ", objOffset.snrImageName) print("cov: ", objOffset.covImageName) # whether to include the gross offset in offsetImage objOffset.mergeGrossOffset = inps.merge_gross_offset try: offsetImageName = objOffset.offsetImageName.decode('utf8') grossOffsetImageName = objOffset.grossOffsetImageName.decode('utf8') snrImageName = objOffset.snrImageName.decode('utf8') covImageName = objOffset.covImageName.decode('utf8') except: offsetImageName = objOffset.offsetImageName grossOffsetImageName = objOffset.grossOffsetImageName snrImageName = objOffset.snrImageName covImageName = objOffset.covImageName # generic control objOffset.numberWindowDownInChunk = inps.numWinDownInChunk objOffset.numberWindowAcrossInChunk = inps.numWinAcrossInChunk objOffset.useMmap = inps.usemmap objOffset.mmapSize = inps.mmapsize # setup and check parameters objOffset.setupParams() ## Set Gross Offset ### if inps.gross == 0: # use static grossOffset print('Set constant grossOffset ({}, {})'.format( inps.azshift, inps.rgshift)) objOffset.setConstantGrossOffset(inps.azshift, inps.rgshift) else: # use varying offset print("Set varying grossOffset from file {}".format( inps.gross_offset_file)) grossOffset = np.fromfile(inps.gross_offset_file, dtype=np.int32) numberWindows = objOffset.numberWindowDown * objOffset.numberWindowAcross if grossOffset.size != 2 * numberWindows: print(( 'WARNING: The input gross offsets do not match the number of windows:' ' {} by {} in int32 type').format( objOffset.numberWindowDown, objOffset.numberWindowAcross)) return 0 grossOffset = grossOffset.reshape(numberWindows, 2) grossAzimuthOffset = grossOffset[:, 0] grossRangeOffset = grossOffset[:, 1] # enforce C-contiguous flag grossAzimuthOffset = grossAzimuthOffset.copy(order='C') grossRangeOffset = grossRangeOffset.copy(order='C') # set varying gross offset objOffset.setVaryingGrossOffset(grossAzimuthOffset, grossRangeOffset) # check objOffset.checkPixelInImageRange() # save output geometry location info geomDict = { 'x_start': objOffset.referenceStartPixelAcrossStatic + int(objOffset.windowSizeWidth / 2.), 'y_start': objOffset.referenceStartPixelDownStatic + int(objOffset.windowSizeHeight / 2.), 'x_step': objOffset.skipSampleAcross, 'y_step': objOffset.skipSampleDown, 'x_win_num': objOffset.numberWindowAcross, 'y_win_num': objOffset.numberWindowDown, } # check redo print('redo: ', inps.redo) if not inps.redo: offsetImageName = '{}{}.bip'.format(inps.outprefix, inps.outsuffix) if os.path.exists(offsetImageName): print( 'offset field file: {} exists and w/o redo, skip re-estimation.' .format(offsetImageName)) return objOffset, geomDict # Run the code print('Running PyCuAmpcor') objOffset.runAmpcor() print('Finished') sar.finalizeImage() sim.finalizeImage() # Finalize the results # offsetfield outImg = isceobj.createImage() outImg.setDataType('FLOAT') outImg.setFilename(offsetImageName) outImg.setBands(2) outImg.scheme = 'BIP' outImg.setWidth(objOffset.numberWindowAcross) outImg.setLength(objOffset.numberWindowDown) outImg.setAccessMode('read') outImg.renderHdr() # gross offsetfield outImg = isceobj.createImage() outImg.setDataType('FLOAT') outImg.setFilename(grossOffsetImageName) outImg.setBands(2) outImg.scheme = 'BIP' outImg.setWidth(objOffset.numberWindowAcross) outImg.setLength(objOffset.numberWindowDown) outImg.setAccessMode('read') outImg.renderHdr() # snr snrImg = isceobj.createImage() snrImg.setFilename(snrImageName) snrImg.setDataType('FLOAT') snrImg.setBands(1) snrImg.setWidth(objOffset.numberWindowAcross) snrImg.setLength(objOffset.numberWindowDown) snrImg.setAccessMode('read') snrImg.renderHdr() # cov covImg = isceobj.createImage() covImg.setFilename(covImageName) covImg.setDataType('FLOAT') covImg.setBands(3) covImg.scheme = 'BIP' covImg.setWidth(objOffset.numberWindowAcross) covImg.setLength(objOffset.numberWindowDown) covImg.setAccessMode('read') covImg.renderHdr() return objOffset, geomDict
def genFinalMask(mName, width): print('\nReading and combining masks...') with open('tsnbMaskImg.bil', 'rb') as fid: arr = np.fromfile(fid, dtype='float32').reshape(-1, width) tsnbHist = plt.hist(arr.flatten(), bins=256)[0][1:] # histogram of values 1-256 in mask plt.close() tVals = sum(tsnbHist) for i in range(1, 256): if ((sum(tsnbHist[255 - i:]) / tVals) > 0.8 ): # Looking to eliminate first sigma of values (just a guess) TSNBthresh = 254 - i # set threshold break print('TSNB threshold cutoff set as:', TSNBthresh) arr2 = (arr > TSNBthresh).astype(int) with open('tvwbMaskImg.bil', 'rb') as fid: tarr = np.transpose( np.fromfile(fid, dtype='float32').reshape(-1, len(arr))) tvwbHist = plt.hist(tarr.flatten(), bins=100)[0][1:] # histogram of values 1-100 in mask plt.close() tVals = sum(tvwbHist) for i in range(1, 100): if ((sum(tvwbHist[99 - i:]) / tVals) > 0.8): TVWBthresh = 98 - i break print('TVWB threshold cutoff set as:', TVWBthresh) tarr2 = (tarr > TVWBthresh).astype(int) fArr = arr2 | tarr2 # Combine masks print('\nPrinting combined and separate masks to', mName, '...') # Mask channels as follows: # CH 1: Final mask used (combined and thresholded TSNB/TVWB masks) # CH 2: TSNB mask pre-threshold # CH 3: TSNB mask thresholded # CH 4: TVWB mask pre-threshold # CH 5: TVWB mask thresholded fMaskImg = isceobj.createImage() fMaskImg.bands = 5 fMaskImg.scheme = 'BIL' fMaskImg.dataType = 'FLOAT' fMaskImg.setWidth(len(fArr[0])) fMaskImg.setLength(len(fArr)) fMaskImg.setFilename(mName) with open(mName, 'wb') as fid: for i in range(len(fArr)): fArr[i].astype(np.float32).tofile(fid) # CH 1 arr[i].astype(np.float32).tofile(fid) # CH 2 arr2[i].astype(np.float32).tofile(fid) # CH 3 tarr[i].astype(np.float32).tofile(fid) # CH 4 tarr2[i].astype(np.float32).tofile(fid) # CH 5 fMaskImg.renderHdr() print('Finished.') # finalRFImasks.bil will contain all masks, so no need for these anymore... os.remove('tsnbMaskImg.bil') os.remove('tsnbMaskImg.bil.xml') os.remove('tvwbMaskImg.bil') os.remove('tvwbMaskImg.bil.xml') return fArr, np.sum(arr2), np.sum(tarr2)
def estimateOffsetField(burst, simfile,offset=0.0): ''' Estimate offset field between burst and simamp. ''' sim = isceobj.createImage() sim.load(simfile+'.xml') sim.setAccessMode('READ') sim.createImage() sar = isceobj.createSlcImage() sar.load(burst.getImage().filename + '.xml') sar.setAccessMode('READ') sar.createImage() width = sar.getWidth() length = sar.getLength() objOffset = Ampcor(name='reference_offset') objOffset.configure() objOffset.setWindowSizeWidth(128) objOffset.setWindowSizeHeight(128) objOffset.setSearchWindowSizeWidth(16) objOffset.setSearchWindowSizeHeight(16) margin = 2*objOffset.searchWindowSizeWidth + objOffset.windowSizeWidth nAcross = 40 nDown = 40 if not objOffset.firstSampleAcross: objOffset.setFirstSampleAcross(margin+101) if not objOffset.lastSampleAcross: objOffset.setLastSampleAcross(width-margin-101) if not objOffset.firstSampleDown: objOffset.setFirstSampleDown(margin+offset+101) if not objOffset.lastSampleDown: objOffset.setLastSampleDown(length - margin-101) if not objOffset.acrossGrossOffset: objOffset.setAcrossGrossOffset(0.0) if not objOffset.downGrossOffset: objOffset.setDownGrossOffset(offset) if not objOffset.numberLocationAcross: objOffset.setNumberLocationAcross(nAcross) if not objOffset.numberLocationDown: objOffset.setNumberLocationDown(nDown) objOffset.setFirstPRF(1.0) objOffset.setSecondPRF(1.0) objOffset.setImageDataType1('complex') objOffset.setImageDataType2('real') objOffset.ampcor(sar, sim) sar.finalizeImage() sim.finalizeImage() result = objOffset.getOffsetField() return result
if not os.path.isdir(params['intdir']): os.system('mkdir ' + params['intdir']) msk_filt = cv2.filter2D(gamma0, -1, win) pair = params['pairs'][0] for pair in params['pairs']: #loop through each ifg and save to if not os.path.isdir(params['intdir'] + '/' + pair): os.system('mkdir ' + params['intdir'] + '/' + pair) if not os.path.isfile(params['intdir'] + '/' + pair + '/fine_lk.int'): print('working on ' + pair) #Open a file to save stuff to out = isceobj.createImage() # Copy the interferogram image from before out.dataType = 'CFLOAT' out.filename = params['intdir'] + '/' + pair + '/fine_lk.int' out.width = params['nxl'] out.length = params['nyl'] out.dump(out.filename + '.xml') # Write out xml fid = open(out.filename, "ab+") # open a cor file too outc = isceobj.createImage( ) # Copy the interferogram image from before outc.dataType = 'FLOAT' outc.filename = params['intdir'] + '/' + pair + '/cor_lk.r4' outc.width = params['nxl'] outc.length = params['nyl'] outc.dump(outc.filename + '.xml') # Write out xml
def main(iargs=None): ''' ''' inps = cmdLineParse(iargs) # #convert 1-d list to 2-d list # if len(inps.masked_areas) % 4 != 0: # raise Exception('each maksed area must have four elements') # else: # masked_areas = [] # n = np.int32(len(inps.masked_areas)/4) # for i in range(n): # masked_areas.append([inps.masked_areas[i*4+0], inps.masked_areas[i*4+1], inps.masked_areas[i*4+2], inps.masked_areas[i*4+3]]) # inps.masked_areas = masked_areas ################################### #SET PARAMETERS HERE #THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self) corThresholdAdj = 0.85 ################################### print('computing ionosphere') #get files lowerUnwfile = inps.lower upperUnwfile = inps.upper corfile = inps.coherence #use image size from lower unwrapped interferogram img = isceobj.createImage() img.load(lowerUnwfile + '.xml') width = img.width length = img.length lowerUnw = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape( length * 2, width))[1:length * 2:2, :] upperUnw = (np.fromfile(upperUnwfile, dtype=np.float32).reshape( length * 2, width))[1:length * 2:2, :] #lowerAmp = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :] #upperAmp = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :] cor = (np.fromfile(corfile, dtype=np.float32).reshape(length * 2, width))[1:length * 2:2, :] #amp = np.sqrt(lowerAmp**2+upperAmp**2) amp = (np.fromfile(corfile, dtype=np.float32).reshape(length * 2, width))[0:length * 2:2, :] #masked out user-specified areas if inps.masked_areas != None: maskedAreas = reformatMaskedAreas(inps.masked_areas, length, width) for area in maskedAreas: lowerUnw[area[0]:area[1], area[2]:area[3]] = 0 upperUnw[area[0]:area[1], area[2]:area[3]] = 0 cor[area[0]:area[1], area[2]:area[3]] = 0 ionParamObj = ionParam() ionParamObj.configure() #compute ionosphere fl = SPEED_OF_LIGHT / ionParamObj.radarWavelengthLower fu = SPEED_OF_LIGHT / ionParamObj.radarWavelengthUpper adjFlag = 1 ionos = computeIonosphere(lowerUnw, upperUnw, cor, fl, fu, adjFlag, corThresholdAdj, 0) #dump ionosphere outFilename = inps.ionosphere os.makedirs(os.path.dirname(inps.ionosphere), exist_ok=True) ion = np.zeros((length * 2, width), dtype=np.float32) ion[0:length * 2:2, :] = amp ion[1:length * 2:2, :] = ionos ion.astype(np.float32).tofile(outFilename) img.filename = outFilename img.extraFilename = outFilename + '.vrt' img.renderHdr() #dump coherence outFilename = inps.coherence_output os.makedirs(os.path.dirname(inps.coherence_output), exist_ok=True) ion[1:length * 2:2, :] = cor ion.astype(np.float32).tofile(outFilename) img.filename = outFilename img.extraFilename = outFilename + '.vrt' img.renderHdr()
def main(iargs=None): '''Compute baseline. ''' inps = cmdLineParse(iargs) from isceobj.Planet.Planet import Planet from isceobj.Util.Poly2D import Poly2D import numpy as np import shelve baselineDir = os.path.dirname(inps.baselineFile) if baselineDir != '': os.makedirs(baselineDir, exist_ok=True) with shelve.open(os.path.join(inps.reference, 'data'), flag='r') as mdb: reference = mdb['frame'] with shelve.open(os.path.join(inps.secondary, 'data'), flag='r') as mdb: secondary = mdb['frame'] # check if the reference and secondary shelf are the same, i.e. it is baseline grid for the reference reference_SensingStart = reference.getSensingStart() secondary_SensingStart = secondary.getSensingStart() if reference_SensingStart == secondary_SensingStart: referenceBaseline = True else: referenceBaseline = False refElp = Planet(pname='Earth').ellipsoid dr = reference.instrument.rangePixelSize dt = 1. / reference.PRF #reference.azimuthTimeInterval mStartingRange = reference.startingRange #min([x.startingRange for x in referenceswaths]) mFarRange = reference.startingRange + dr * ( reference.numberOfSamples - 1 ) #max([x.farRange for x in referenceswaths]) mSensingStart = reference.sensingStart # min([x.sensingStart for x in referenceswaths]) mSensingStop = reference.sensingStop #max([x.sensingStop for x in referenceswaths]) mOrb = getMergedOrbit(reference) nPixels = int(np.round((mFarRange - mStartingRange) / dr)) + 1 nLines = int(np.round( (mSensingStop - mSensingStart).total_seconds() / dt)) + 1 sOrb = getMergedOrbit(secondary) rangeLimits = mFarRange - mStartingRange # To make sure that we have at least 30 points nRange = int(np.max([30, int(np.ceil(rangeLimits / 7000.))])) slantRange = mStartingRange + np.arange(nRange) * rangeLimits / (nRange - 1.0) azimuthLimits = (mSensingStop - mSensingStart).total_seconds() nAzimuth = int(np.max([30, int(np.ceil(azimuthLimits))])) azimuthTime = [ mSensingStart + datetime.timedelta(seconds=x * azimuthLimits / (nAzimuth - 1.0)) for x in range(nAzimuth) ] doppler = Poly2D() doppler.initPoly(azimuthOrder=0, rangeOrder=0, coeffs=[[0.]]) Bperp = np.zeros((nAzimuth, nRange), dtype=np.float32) Bpar = np.zeros((nAzimuth, nRange), dtype=np.float32) fid = open(inps.baselineFile, 'wb') print('Baseline file {0} dims: {1}L x {2}P'.format(inps.baselineFile, nAzimuth, nRange)) if referenceBaseline: Bperp = np.zeros((nAzimuth, nRange), dtype=np.float32) Bperp.tofile(fid) else: for ii, taz in enumerate(azimuthTime): referenceSV = mOrb.interpolate(taz, method='hermite') mxyz = np.array(referenceSV.getPosition()) mvel = np.array(referenceSV.getVelocity()) for jj, rng in enumerate(slantRange): target = mOrb.rdr2geo(taz, rng) targxyz = np.array( refElp.LLH(target[0], target[1], target[2]).ecef().tolist()) slvTime, slvrng = sOrb.geo2rdr(target, doppler=doppler, wvl=0) secondarySV = sOrb.interpolateOrbit(slvTime, method='hermite') sxyz = np.array(secondarySV.getPosition()) aa = np.linalg.norm(sxyz - mxyz) costheta = (rng * rng + aa * aa - slvrng * slvrng) / (2. * rng * aa) Bpar[ii, jj] = aa * costheta perp = aa * np.sqrt(1 - costheta * costheta) direction = np.sign( np.dot(np.cross(targxyz - mxyz, sxyz - mxyz), mvel)) Bperp[ii, jj] = direction * perp Bperp.tofile(fid) fid.close() ####Write XML img = isceobj.createImage() img.setFilename(inps.baselineFile) img.bands = 1 img.scheme = 'BIP' img.dataType = 'FLOAT' img.setWidth(nRange) img.setAccessMode('READ') img.setLength(nAzimuth) img.renderHdr() img.renderVRT() ###Create oversampled VRT file cmd = 'gdal_translate -of VRT -ot Float32 -r bilinear -outsize {xsize} {ysize} {infile}.vrt {infile}.full.vrt'.format( xsize=nPixels, ysize=nLines, infile=inps.baselineFile) status = os.system(cmd) if status: raise Exception('cmd: {0} Failed'.format(cmd))
def runSlcOffset(self): '''estimate SLC offsets ''' catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name) self.updateParamemetersFromUser() masterTrack = self._insar.loadTrack(master=True) slaveTrack = self._insar.loadTrack(master=False) demFile = os.path.abspath(self._insar.dem) wbdFile = os.path.abspath(self._insar.wbd) for i, frameNumber in enumerate(self._insar.masterFrames): frameDir = 'f{}_{}'.format(i+1, frameNumber) os.chdir(frameDir) for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)): swathDir = 's{}'.format(swathNumber) os.chdir(swathDir) print('estimating offset frame {}, swath {}'.format(frameNumber, swathNumber)) masterSwath = masterTrack.frames[i].swaths[j] slaveSwath = slaveTrack.frames[i].swaths[j] ########################################## #1. set number of matching points ########################################## #set initinial numbers if (self._insar.modeCombination == 21) or (self._insar.modeCombination == 22): numberOfOffsetsRange = 10 numberOfOffsetsAzimuth = 40 else: numberOfOffsetsRange = 20 numberOfOffsetsAzimuth = 20 #change the initial numbers using water body if self.useWbdForNumberOffsets and (self._insar.wbd != None): numberRangeLooks=100 numberAzimuthLooks=100 #compute land ratio using topo module topo(masterSwath, masterTrack, demFile, 'lat.rdr', 'lon.rdr', 'hgt.rdr', losFile='los.rdr', incFile=None, mskFile=None, numberRangeLooks=numberRangeLooks, numberAzimuthLooks=numberAzimuthLooks, multilookTimeOffset=False) waterBodyRadar('lat.rdr', 'lon.rdr', wbdFile, 'wbd.rdr') wbdImg = isceobj.createImage() wbdImg.load('wbd.rdr.xml') width = wbdImg.width length = wbdImg.length wbd = np.fromfile('wbd.rdr', dtype=np.byte).reshape(length, width) landRatio = np.sum(wbd==0) / (length*width) if (landRatio <= 0.00125): print('\n\nWARNING: land too small for estimating slc offsets at frame {}, swath {}'.format(frameNumber, swathNumber)) print('proceed to use geometric offsets for forming interferogram') print('but please consider not using this swath\n\n') catalog.addItem('warning message', 'land too small for estimating slc offsets at frame {}, swath {}, use geometric offsets'.format(frameNumber, swathNumber), 'runSlcOffset') #compute geomtricla offsets geo2rdr(slaveSwath, slaveTrack, 'lat.rdr', 'lon.rdr', 'hgt.rdr', 'rg.rdr', 'az.rdr', numberRangeLooks=numberRangeLooks, numberAzimuthLooks=numberAzimuthLooks, multilookTimeOffset=False) reformatGeometricalOffset('rg.rdr', 'az.rdr', 'cull.off', rangeStep=numberRangeLooks, azimuthStep=numberAzimuthLooks, maximumNumberOfOffsets=2000) os.remove('lat.rdr') os.remove('lat.rdr.vrt') os.remove('lat.rdr.xml') os.remove('lon.rdr') os.remove('lon.rdr.vrt') os.remove('lon.rdr.xml') os.remove('hgt.rdr') os.remove('hgt.rdr.vrt') os.remove('hgt.rdr.xml') os.remove('los.rdr') os.remove('los.rdr.vrt') os.remove('los.rdr.xml') os.remove('wbd.rdr') os.remove('wbd.rdr.vrt') os.remove('wbd.rdr.xml') os.remove('rg.rdr') os.remove('rg.rdr.vrt') os.remove('rg.rdr.xml') os.remove('az.rdr') os.remove('az.rdr.vrt') os.remove('az.rdr.xml') os.chdir('../') continue os.remove('lat.rdr') os.remove('lat.rdr.vrt') os.remove('lat.rdr.xml') os.remove('lon.rdr') os.remove('lon.rdr.vrt') os.remove('lon.rdr.xml') os.remove('hgt.rdr') os.remove('hgt.rdr.vrt') os.remove('hgt.rdr.xml') os.remove('los.rdr') os.remove('los.rdr.vrt') os.remove('los.rdr.xml') os.remove('wbd.rdr') os.remove('wbd.rdr.vrt') os.remove('wbd.rdr.xml') #put the results on a grid with a specified interval interval = 0.2 axisRatio = int(np.sqrt(landRatio)/interval)*interval + interval if axisRatio > 1: axisRatio = 1 numberOfOffsetsRange = int(numberOfOffsetsRange/axisRatio) numberOfOffsetsAzimuth = int(numberOfOffsetsAzimuth/axisRatio) else: catalog.addItem('warning message', 'no water mask used to determine number of matching points. frame {} swath {}'.format(frameNumber, swathNumber), 'runSlcOffset') #user's settings if self.numberRangeOffsets != None: numberOfOffsetsRange = self.numberRangeOffsets[i][j] if self.numberAzimuthOffsets != None: numberOfOffsetsAzimuth = self.numberAzimuthOffsets[i][j] catalog.addItem('number of offsets range frame {} swath {}'.format(frameNumber, swathNumber), numberOfOffsetsRange, 'runSlcOffset') catalog.addItem('number of offsets azimuth frame {} swath {}'.format(frameNumber, swathNumber), numberOfOffsetsAzimuth, 'runSlcOffset') ########################################## #2. match using ampcor ########################################## ampcor = Ampcor(name='insarapp_slcs_ampcor') ampcor.configure() mSLC = isceobj.createSlcImage() mSLC.load(self._insar.masterSlc+'.xml') mSLC.setAccessMode('read') mSLC.createImage() sSLC = isceobj.createSlcImage() sSLC.load(self._insar.slaveSlc+'.xml') sSLC.setAccessMode('read') sSLC.createImage() ampcor.setImageDataType1('complex') ampcor.setImageDataType2('complex') ampcor.setMasterSlcImage(mSLC) ampcor.setSlaveSlcImage(sSLC) #MATCH REGION #compute an offset at image center to use rgoff, azoff = computeOffsetFromOrbit(masterSwath, masterTrack, slaveSwath, slaveTrack, masterSwath.numberOfSamples * 0.5, masterSwath.numberOfLines * 0.5) #it seems that we cannot use 0, haven't look into the problem if rgoff == 0: rgoff = 1 if azoff == 0: azoff = 1 firstSample = 1 if rgoff < 0: firstSample = int(35 - rgoff) firstLine = 1 if azoff < 0: firstLine = int(35 - azoff) ampcor.setAcrossGrossOffset(rgoff) ampcor.setDownGrossOffset(azoff) ampcor.setFirstSampleAcross(firstSample) ampcor.setLastSampleAcross(mSLC.width) ampcor.setNumberLocationAcross(numberOfOffsetsRange) ampcor.setFirstSampleDown(firstLine) ampcor.setLastSampleDown(mSLC.length) ampcor.setNumberLocationDown(numberOfOffsetsAzimuth) #MATCH PARAMETERS #full-aperture mode if (self._insar.modeCombination == 21) or \ (self._insar.modeCombination == 22) or \ (self._insar.modeCombination == 31) or \ (self._insar.modeCombination == 32): ampcor.setWindowSizeWidth(64) ampcor.setWindowSizeHeight(512) #note this is the half width/length of search area, number of resulting correlation samples: 32*2+1 ampcor.setSearchWindowSizeWidth(32) ampcor.setSearchWindowSizeHeight(32) #triggering full-aperture mode matching ampcor.setWinsizeFilt(8) ampcor.setOversamplingFactorFilt(64) #regular mode else: ampcor.setWindowSizeWidth(64) ampcor.setWindowSizeHeight(64) ampcor.setSearchWindowSizeWidth(32) ampcor.setSearchWindowSizeHeight(32) #REST OF THE STUFF ampcor.setAcrossLooks(1) ampcor.setDownLooks(1) ampcor.setOversamplingFactor(64) ampcor.setZoomWindowSize(16) #1. The following not set #Matching Scale for Sample/Line Directions (-) = 1. 1. #should add the following in Ampcor.py? #if not set, in this case, Ampcor.py'value is also 1. 1. #ampcor.setScaleFactorX(1.) #ampcor.setScaleFactorY(1.) #MATCH THRESHOLDS AND DEBUG DATA #2. The following not set #in roi_pac the value is set to 0 1 #in isce the value is set to 0.001 1000.0 #SNR and Covariance Thresholds (-) = {s1} {s2} #should add the following in Ampcor? #THIS SHOULD BE THE ONLY THING THAT IS DIFFERENT FROM THAT OF ROI_PAC #ampcor.setThresholdSNR(0) #ampcor.setThresholdCov(1) ampcor.setDebugFlag(False) ampcor.setDisplayFlag(False) #in summary, only two things not set which are indicated by 'The following not set' above. #run ampcor ampcor.ampcor() offsets = ampcor.getOffsetField() ampcorOffsetFile = 'ampcor.off' writeOffset(offsets, ampcorOffsetFile) #finalize image, and re-create it #otherwise the file pointer is still at the end of the image mSLC.finalizeImage() sSLC.finalizeImage() ########################################## #3. cull offsets ########################################## refinedOffsets = cullOffsets(offsets) if refinedOffsets == None: print('******************************************************************') print('WARNING: There are not enough offsets left, so we are forced to') print(' use offset without culling. frame {}, swath {}'.format(frameNumber, swathNumber)) print('******************************************************************') catalog.addItem('warning message', 'not enough offsets left, use offset without culling. frame {} swath {}'.format(frameNumber, swathNumber), 'runSlcOffset') refinedOffsets = offsets cullOffsetFile = 'cull.off' writeOffset(refinedOffsets, cullOffsetFile) os.chdir('../') os.chdir('../') catalog.printToLog(logger, "runSlcOffset") self._insar.procDoc.addAllFromCatalog(catalog)
def geo2RdrGPU(slaveTrack, numberRangeLooks, numberAzimuthLooks, latFile, lonFile, hgtFile, rangeOffsetFile, azimuthOffsetFile): ''' currently we cannot set left/right looking. works for right looking, but left looking probably not supported. ''' import datetime from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr from isceobj.Planet.Planet import Planet from iscesys import DateTimeUtil as DTU latImage = isceobj.createImage() latImage.load(latFile + '.xml') latImage.setAccessMode('READ') latImage.createImage() lonImage = isceobj.createImage() lonImage.load(lonFile + '.xml') lonImage.setAccessMode('READ') lonImage.createImage() demImage = isceobj.createImage() demImage.load(hgtFile + '.xml') demImage.setAccessMode('READ') demImage.createImage() #####Run Geo2rdr planet = Planet(pname='Earth') grdr = PyGeo2rdr() grdr.setRangePixelSpacing(numberRangeLooks * slaveTrack.rangePixelSize) grdr.setPRF(1.0 / (numberAzimuthLooks * slaveTrack.azimuthLineInterval)) grdr.setRadarWavelength(slaveTrack.radarWavelength) #CHECK IF THIS WORKS!!! grdr.createOrbit(0, len(slaveTrack.orbit.stateVectors.list)) count = 0 for sv in slaveTrack.orbit.stateVectors.list: td = DTU.seconds_since_midnight(sv.getTime()) pos = sv.getPosition() vel = sv.getVelocity() grdr.setOrbitVector(count, td, pos[0], pos[1], pos[2], vel[0], vel[1], vel[2]) count += 1 grdr.setOrbitMethod(0) grdr.setWidth(slaveTrack.numberOfSamples) grdr.setLength(slaveTrack.numberOfLines) grdr.setSensingStart( DTU.seconds_since_midnight(slaveTrack.sensingStart + datetime.timedelta( seconds=(numberAzimuthLooks - 1.0) / 2.0 * slaveTrack.azimuthLineInterval))) grdr.setRangeFirstSample(slaveTrack.startingRange + (numberRangeLooks - 1.0) / 2.0 * slaveTrack.rangePixelSize) grdr.setNumberRangeLooks(1) grdr.setNumberAzimuthLooks(1) grdr.setEllipsoidMajorSemiAxis(planet.ellipsoid.a) grdr.setEllipsoidEccentricitySquared(planet.ellipsoid.e2) grdr.createPoly(0, 0., 1.) grdr.setPolyCoeff(0, 0.) grdr.setDemLength(demImage.getLength()) grdr.setDemWidth(demImage.getWidth()) grdr.setBistaticFlag(0) rangeOffsetImage = isceobj.createImage() rangeOffsetImage.setFilename(rangeOffsetFile) rangeOffsetImage.setAccessMode('write') rangeOffsetImage.setDataType('FLOAT') rangeOffsetImage.setCaster('write', 'DOUBLE') rangeOffsetImage.setWidth(demImage.width) rangeOffsetImage.createImage() azimuthOffsetImage = isceobj.createImage() azimuthOffsetImage.setFilename(azimuthOffsetFile) azimuthOffsetImage.setAccessMode('write') azimuthOffsetImage.setDataType('FLOAT') azimuthOffsetImage.setCaster('write', 'DOUBLE') azimuthOffsetImage.setWidth(demImage.width) azimuthOffsetImage.createImage() grdr.setLatAccessor(latImage.getImagePointer()) grdr.setLonAccessor(lonImage.getImagePointer()) grdr.setHgtAccessor(demImage.getImagePointer()) grdr.setAzAccessor(0) grdr.setRgAccessor(0) grdr.setAzOffAccessor(azimuthOffsetImage.getImagePointer()) grdr.setRgOffAccessor(rangeOffsetImage.getImagePointer()) grdr.geo2rdr() rangeOffsetImage.finalizeImage() rangeOffsetImage.renderHdr() azimuthOffsetImage.finalizeImage() azimuthOffsetImage.renderHdr() latImage.finalizeImage() lonImage.finalizeImage() demImage.finalizeImage() return
def _run_ampcor(self, firstAc, lastAc, firstDn, lastDn, numAc, numDn, firstind, lastind): ''' Individual calls to ampcor. ''' objAmpcor = Ampcor() objAmpcor.setWindowSizeWidth(self.windowSizeWidth) objAmpcor.setWindowSizeHeight(self.windowSizeHeight) objAmpcor.setSearchWindowSizeWidth(self.searchWindowSizeWidth) objAmpcor.setSearchWindowSizeHeight(self.searchWindowSizeHeight) objAmpcor.setImageDataType1(self.imageDataType1) objAmpcor.setImageDataType2(self.imageDataType2) objAmpcor.setFirstSampleAcross(firstAc) objAmpcor.setLastSampleAcross(lastAc) objAmpcor.setNumberLocationAcross(numAc) objAmpcor.setFirstSampleDown(firstDn) objAmpcor.setLastSampleDown(lastDn) objAmpcor.setNumberLocationDown(numDn) objAmpcor.setAcrossGrossOffset(self.acrossGrossOffset) objAmpcor.setDownGrossOffset(self.downGrossOffset) objAmpcor.setFirstPRF(self.prf1) objAmpcor.setSecondPRF(self.prf2) objAmpcor.setFirstRangeSpacing(self.rangeSpacing1) objAmpcor.setSecondRangeSpacing(self.rangeSpacing2) objAmpcor.thresholdSNR = 1.0e-6 objAmpcor.thresholdCov = self.thresholdCov mSlc = isceobj.createImage() IU.copyAttributes(self.slcImage1, mSlc) mSlc.setAccessMode('read') mSlc.createImage() sSlc = isceobj.createImage() IU.copyAttributes(self.slcImage2, sSlc) sSlc.setAccessMode('read') sSlc.createImage() objAmpcor.ampcor(mSlc, sSlc) mSlc.finalizeImage() sSlc.finalizeImage() j = 0 length = len(objAmpcor.locationDown) for i in range(lastind - firstind): acInd = firstAc + self.pixLocOffAc + ( i % numAc) * self.skipSampleAcross downInd = firstDn + self.pixLocOffDn + ( i // numAc) * self.skipSampleDown if j < length and objAmpcor.locationDown[ j] == downInd and objAmpcor.locationAcross[j] == acInd: self.locationDown[firstind + i] = objAmpcor.locationDown[j] self.locationDownOffset[firstind + i] = objAmpcor.locationDownOffset[j] self.locationAcross[firstind + i] = objAmpcor.locationAcross[j] self.locationAcrossOffset[ firstind + i] = objAmpcor.locationAcrossOffset[j] self.snr[firstind + i] = objAmpcor.snrRet[j] j += 1 else: self.locationDown[firstind + i] = downInd self.locationDownOffset[firstind + i] = -10000. self.locationAcross[firstind + i] = acInd self.locationAcrossOffset[firstind + i] = -10000. self.snr[firstind + i] = 0. return
geom['lon_ifg'] = lo geom['lat_ifg'] = la geom['hgt_ifg'] = hgt np.save('geom_2.npy', geom) params['nxl'] = nxl2 params['nyl'] = nyl2 params['ymin'] = 0 params['ymax'] = nyl2 np.save('params_2.npy', params) # crop ints for pair in params['pairs']: infile = params['intdir'] + '/' + pair + '/fine_lk.r4' outfile = params['intdir'] + '/' + pair + '/fine_lk_crop.r4' i = isceobj.createImage() i.load(infile + '.xml') i1 = i.memMap()[:, :, 0] i2 = i1[y1:y2, x1:x2] # Write out the xml file for the cropped ifg out = i.clone() # Copy the interferogram image from before out.filename = outfile out.width = nxl2 out.length = nyl2 out.dump(outfile + '.xml') # Write out xml i2.tofile(outfile) out.renderHdr() out.renderVRT() # crop cor files
def runFilter(self, filterStrength): logger.info("Applying power-spectral filter") # Initialize the flattened interferogram topoflatIntFilename = self.insar.topophaseFlatFilename intImage = isceobj.createIntImage() widthInt = self.insar.resampIntImage.width intImage.setFilename(topoflatIntFilename) intImage.setWidth(widthInt) intImage.setAccessMode('read') intImage.createImage() # Create the filtered interferogram filtIntFilename = 'filt_' + topoflatIntFilename filtImage = isceobj.createIntImage() filtImage.setFilename(filtIntFilename) filtImage.setWidth(widthInt) filtImage.setAccessMode('write') filtImage.createImage() objFilter = Filter() objFilter.wireInputPort(name='interferogram',object=intImage) objFilter.wireOutputPort(name='filtered interferogram',object=filtImage) if filterStrength is not None: self.insar.filterStrength = filterStrength objFilter.goldsteinWerner(alpha=self.insar.filterStrength) intImage.finalizeImage() filtImage.finalizeImage() del filtImage #Create phase sigma correlation file here filtImage = isceobj.createIntImage() filtImage.setFilename(filtIntFilename) filtImage.setWidth(widthInt) filtImage.setAccessMode('read') filtImage.createImage() phsigImage = isceobj.createImage() phsigImage.dataType='FLOAT' phsigImage.bands = 1 phsigImage.setWidth(widthInt) phsigImage.setFilename(self.insar.phsigFilename) phsigImage.setAccessMode('write') phsigImage.setImageType('cor')#the type in this case is not for mdx.py displaying but for geocoding method phsigImage.createImage() ampImage = isceobj.createAmpImage() IU.copyAttributes(self.insar.resampAmpImage, ampImage) ampImage.setAccessMode('read') ampImage.createImage() icuObj = Icu(name='insarapp_filter_icu') icuObj.configure() icuObj.unwrappingFlag = False icuObj.icu(intImage = filtImage, ampImage=ampImage, phsigImage=phsigImage) filtImage.finalizeImage() phsigImage.finalizeImage() ampImage.finalizeImage() phsigImage.renderHdr() # Set the filtered image to be the one geocoded self.insar.topophaseFlatFilename = filtIntFilename
def swathMosaic(frame, inputFiles, outputfile, rangeOffsets, azimuthOffsets, numberOfRangeLooks, numberOfAzimuthLooks, updateFrame=False, phaseCompensation=False, phaseDiff=None, phaseDiffFixed=None, snapThreshold=None, pcRangeLooks=1, pcAzimuthLooks=4, filt=False, resamplingMethod=0): ''' mosaic swaths #PART 1. REGULAR INPUT PARAMTERS frame: frame inputFiles: input file list outputfile: output mosaic file rangeOffsets: range offsets azimuthOffsets: azimuth offsets numberOfRangeLooks: number of range looks of the input files numberOfAzimuthLooks: number of azimuth looks of the input files updateFrame: whether update frame parameters #PART 2. PARAMETERS FOR COMPUTING PHASE DIFFERENCE BETWEEN SUBSWATHS phaseCompensation: whether do phase compensation for each swath phaseDiff: pre-computed compensation phase for each swath phaseDiffFixed: if provided, the estimated value will snap to one of these values, which is nearest to the estimated one. snapThreshold: this is used with phaseDiffFixed pcRangeLooks: number of range looks to take when compute swath phase difference pcAzimuthLooks: number of azimuth looks to take when compute swath phase difference filt: whether do filtering when compute swath phase difference #PART 3. RESAMPLING METHOD resamplingMethod: 0: amp resampling. 1: int resampling. ''' from contrib.alos2proc_f.alos2proc_f import rect_with_looks from contrib.alos2proc.alos2proc import mosaicsubswath from isceobj.Alos2Proc.Alos2ProcPublic import multilook from isceobj.Alos2Proc.Alos2ProcPublic import cal_coherence_1 from isceobj.Alos2Proc.Alos2ProcPublic import filterInterferogram numberOfSwaths = len(frame.swaths) swaths = frame.swaths rangeScale = [] azimuthScale = [] rectWidth = [] rectLength = [] for i in range(numberOfSwaths): rangeScale.append(swaths[0].rangePixelSize / swaths[i].rangePixelSize) azimuthScale.append(swaths[0].azimuthLineInterval / swaths[i].azimuthLineInterval) if i == 0: rectWidth.append( int(swaths[i].numberOfSamples / numberOfRangeLooks)) rectLength.append( int(swaths[i].numberOfLines / numberOfAzimuthLooks)) else: rectWidth.append( int(1.0 / rangeScale[i] * int(swaths[i].numberOfSamples / numberOfRangeLooks))) rectLength.append( int(1.0 / azimuthScale[i] * int(swaths[i].numberOfLines / numberOfAzimuthLooks))) #convert original offset to offset for images with looks #use list instead of np.array to make it consistent with the rest of the code rangeOffsets1 = [i / numberOfRangeLooks for i in rangeOffsets] azimuthOffsets1 = [i / numberOfAzimuthLooks for i in azimuthOffsets] #get offset relative to the first frame rangeOffsets2 = [0.0] azimuthOffsets2 = [0.0] for i in range(1, numberOfSwaths): rangeOffsets2.append(0.0) azimuthOffsets2.append(0.0) for j in range(1, i + 1): rangeOffsets2[i] += rangeOffsets1[j] azimuthOffsets2[i] += azimuthOffsets1[j] #resample each swath rinfs = [] for i, inf in enumerate(inputFiles): rinfs.append("{}_{}{}".format( os.path.splitext(os.path.basename(inf))[0], i, os.path.splitext(os.path.basename(inf))[1])) #do not resample first swath if i == 0: if os.path.isfile(rinfs[i]): os.remove(rinfs[i]) os.symlink(inf, rinfs[i]) else: infImg = isceobj.createImage() infImg.load(inf + '.xml') rangeOffsets2Frac = rangeOffsets2[i] - int(rangeOffsets2[i]) azimuthOffsets2Frac = azimuthOffsets2[i] - int(azimuthOffsets2[i]) if resamplingMethod == 0: rect_with_looks(inf, rinfs[i], infImg.width, infImg.length, rectWidth[i], rectLength[i], rangeScale[i], 0.0, 0.0, azimuthScale[i], rangeOffsets2Frac * rangeScale[i], azimuthOffsets2Frac * azimuthScale[i], 1, 1, 1, 1, 'COMPLEX', 'Bilinear') elif resamplingMethod == 1: #decompose amplitude and phase phaseFile = 'phase' amplitudeFile = 'amplitude' data = np.fromfile(inf, dtype=np.complex64).reshape( infImg.length, infImg.width) phase = np.exp(np.complex64(1j) * np.angle(data)) phase[np.nonzero(data == 0)] = 0 phase.astype(np.complex64).tofile(phaseFile) amplitude = np.absolute(data) amplitude.astype(np.float32).tofile(amplitudeFile) #resampling phaseRectFile = 'phaseRect' amplitudeRectFile = 'amplitudeRect' rect_with_looks(phaseFile, phaseRectFile, infImg.width, infImg.length, rectWidth[i], rectLength[i], rangeScale[i], 0.0, 0.0, azimuthScale[i], rangeOffsets2Frac * rangeScale[i], azimuthOffsets2Frac * azimuthScale[i], 1, 1, 1, 1, 'COMPLEX', 'Sinc') rect_with_looks(amplitudeFile, amplitudeRectFile, infImg.width, infImg.length, rectWidth[i], rectLength[i], rangeScale[i], 0.0, 0.0, azimuthScale[i], rangeOffsets2Frac * rangeScale[i], azimuthOffsets2Frac * azimuthScale[i], 1, 1, 1, 1, 'REAL', 'Bilinear') #recombine amplitude and phase phase = np.fromfile(phaseRectFile, dtype=np.complex64).reshape( rectLength[i], rectWidth[i]) amplitude = np.fromfile(amplitudeRectFile, dtype=np.float32).reshape( rectLength[i], rectWidth[i]) (phase * amplitude).astype(np.complex64).tofile(rinfs[i]) #tidy up os.remove(phaseFile) os.remove(amplitudeFile) os.remove(phaseRectFile) os.remove(amplitudeRectFile) #determine output width and length #actually no need to calculate in range direction xs = [] xe = [] ys = [] ye = [] for i in range(numberOfSwaths): if i == 0: xs.append(0) xe.append(rectWidth[i] - 1) ys.append(0) ye.append(rectLength[i] - 1) else: xs.append(0 - int(rangeOffsets2[i])) xe.append(rectWidth[i] - 1 - int(rangeOffsets2[i])) ys.append(0 - int(azimuthOffsets2[i])) ye.append(rectLength[i] - 1 - int(azimuthOffsets2[i])) (xmin, xminIndex) = min((v, i) for i, v in enumerate(xs)) (xmax, xmaxIndex) = max((v, i) for i, v in enumerate(xe)) (ymin, yminIndex) = min((v, i) for i, v in enumerate(ys)) (ymax, ymaxIndex) = max((v, i) for i, v in enumerate(ye)) outWidth = xmax - xmin + 1 outLength = ymax - ymin + 1 #prepare offset for mosaicing rangeOffsets3 = [] azimuthOffsets3 = [] for i in range(numberOfSwaths): azimuthOffsets3.append( int(azimuthOffsets2[i]) - int(azimuthOffsets2[yminIndex])) if i != 0: rangeOffsets3.append( int(rangeOffsets2[i]) - int(rangeOffsets2[i - 1])) else: rangeOffsets3.append(0) delta = int(30 / numberOfRangeLooks) #compute compensation phase for each swath diffMean2 = [0.0 for i in range(numberOfSwaths)] phaseDiffEst = [None for i in range(numberOfSwaths)] #True if: # (1) used diff phase from input # (2) used estimated diff phase after snapping to a fixed diff phase provided #False if: # (1) used purely estimated diff phase phaseDiffSource = ['estimated' for i in range(numberOfSwaths)] # 1. 'estimated': estimated from subswath overlap # 2. 'estimated+snap': estimated from subswath overlap and snap to a fixed value # 3. 'input': pre-computed # confidence level: 3 > 2 > 1 if phaseCompensation: #compute swath phase offset diffMean = [0.0] for i in range(1, numberOfSwaths): #no need to estimate diff phase if provided from input ##################################################################### if phaseDiff != None: if phaseDiff[i] != None: diffMean.append(phaseDiff[i]) phaseDiffSource[i] = 'input' print('using pre-computed phase offset given from input') print('phase offset: subswath{} - subswath{}: {}'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber, phaseDiff[i])) continue ##################################################################### #all indexes start with zero, all the computed start/end sample/line indexes are included. #no need to add edge here, as we are going to find first/last nonzero sample/lines later #edge = delta edge = 0 #image i-1 startSample1 = edge + 0 - int(rangeOffsets2[i]) + int( rangeOffsets2[i - 1]) endSample1 = -edge + rectWidth[i - 1] - 1 startLine1 = edge + max( 0 - int(azimuthOffsets2[i]) + int(azimuthOffsets2[i - 1]), 0) endLine1 = -edge + min( rectLength[i] - 1 - int(azimuthOffsets2[i]) + int(azimuthOffsets2[i - 1]), rectLength[i - 1] - 1) data1 = readImage(rinfs[i - 1], rectWidth[i - 1], rectLength[i - 1], startSample1, endSample1, startLine1, endLine1) #image i startSample2 = edge + 0 endSample2 = -edge + rectWidth[i - 1] - 1 - int( rangeOffsets2[i - 1]) + int(rangeOffsets2[i]) startLine2 = edge + max( 0 - int(azimuthOffsets2[i - 1]) + int(azimuthOffsets2[i]), 0) endLine2 = -edge + min( rectLength[i - 1] - 1 - int(azimuthOffsets2[i - 1]) + int(azimuthOffsets2[i]), rectLength[i] - 1) data2 = readImage(rinfs[i], rectWidth[i], rectLength[i], startSample2, endSample2, startLine2, endLine2) #remove edge due to incomplete covolution in resampling edge = 9 (startLine0, endLine0, startSample0, endSample0) = findNonzero( np.logical_and((data1 != 0), (data2 != 0))) data1 = data1[startLine0 + edge:endLine0 + 1 - edge, startSample0 + edge:endSample0 + 1 - edge] data2 = data2[startLine0 + edge:endLine0 + 1 - edge, startSample0 + edge:endSample0 + 1 - edge] #take looks data1 = multilook(data1, pcAzimuthLooks, pcRangeLooks) data2 = multilook(data2, pcAzimuthLooks, pcRangeLooks) #filter if filt: data1 /= (np.absolute(data1) + (data1 == 0)) data2 /= (np.absolute(data2) + (data2 == 0)) data1 = filterInterferogram(data1, 3.0, 64, 1) data2 = filterInterferogram(data2, 3.0, 64, 1) #get difference dataDiff = data1 * np.conj(data2) cor = cal_coherence_1(dataDiff, win=5) index = np.nonzero(np.logical_and(cor > 0.85, dataDiff != 0)) DEBUG = False if DEBUG: from isceobj.Alos2Proc.Alos2ProcPublic import create_xml (length7, width7) = dataDiff.shape filename = 'diff_ori_s{}-s{}.int'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber) dataDiff.astype(np.complex64).tofile(filename) create_xml(filename, width7, length7, 'int') filename = 'cor_ori_s{}-s{}.cor'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber) cor.astype(np.float32).tofile(filename) create_xml(filename, width7, length7, 'float') print('\ncompute phase difference between subswaths {} and {}'. format(frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber)) print('number of pixels with coherence > 0.85: {}'.format( index[0].size)) #if already filtered the subswath overlap interferograms (MAI), do not filtered differential interferograms if (filt == False) and (index[0].size < 4000): #coherence too low, filter subswath overlap differential interferogram diffMean0 = 0.0 breakFlag = False for (filterStrength, filterWinSize) in zip([3.0, 9.0], [64, 128]): dataDiff = data1 * np.conj(data2) dataDiff /= (np.absolute(dataDiff) + (dataDiff == 0)) dataDiff = filterInterferogram(dataDiff, filterStrength, filterWinSize, 1) cor = cal_coherence_1(dataDiff, win=7) DEBUG = False if DEBUG: from isceobj.Alos2Proc.Alos2ProcPublic import create_xml (length7, width7) = dataDiff.shape filename = 'diff_filt_s{}-s{}_strength_{}_winsize_{}.int'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber, filterStrength, filterWinSize) dataDiff.astype(np.complex64).tofile(filename) create_xml(filename, width7, length7, 'int') filename = 'cor_filt_s{}-s{}_strength_{}_winsize_{}.cor'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber, filterStrength, filterWinSize) cor.astype(np.float32).tofile(filename) create_xml(filename, width7, length7, 'float') for corth in [0.99999, 0.9999]: index = np.nonzero( np.logical_and(cor > corth, dataDiff != 0)) if index[0].size > 30000: breakFlag = True break if breakFlag: break if index[0].size < 100: diffMean0 = 0.0 print( '\n\nWARNING: too few high coherence pixels for swath phase difference estimation' ) print(' number of high coherence pixels: {}\n\n'. format(index[0].size)) else: print( 'filtered coherence threshold used: {}, number of pixels used: {}' .format(corth, index[0].size)) angle = np.mean(np.angle(dataDiff[index]), dtype=np.float64) diffMean0 += angle data2 *= np.exp(np.complex64(1j) * angle) print( 'phase offset: %15.12f rad with filter strength: %f, window size: %3d' % (diffMean0, filterStrength, filterWinSize)) else: diffMean0 = 0.0 for k in range(30): dataDiff = data1 * np.conj(data2) cor = cal_coherence_1(dataDiff, win=5) if filt: index = np.nonzero( np.logical_and(cor > 0.95, dataDiff != 0)) else: index = np.nonzero( np.logical_and(cor > 0.85, dataDiff != 0)) if index[0].size < 100: diffMean0 = 0.0 print( '\n\nWARNING: too few high coherence pixels for swath phase difference estimation' ) print( ' number of high coherence pixels: {}\n\n'. format(index[0].size)) break angle = np.mean(np.angle(dataDiff[index]), dtype=np.float64) diffMean0 += angle data2 *= np.exp(np.complex64(1j) * angle) print('phase offset: %15.12f rad after loop: %3d' % (diffMean0, k)) DEBUG = False if DEBUG and (k == 0): from isceobj.Alos2Proc.Alos2ProcPublic import create_xml (length7, width7) = dataDiff.shape filename = 'diff_ori_s{}-s{}_loop_{}.int'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber, k) dataDiff.astype(np.complex64).tofile(filename) create_xml(filename, width7, length7, 'int') filename = 'cor_ori_s{}-s{}_loop_{}.cor'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber, k) cor.astype(np.float32).tofile(filename) create_xml(filename, width7, length7, 'float') #save purely estimated diff phase phaseDiffEst[i] = diffMean0 #if fixed diff phase provided and the estimated diff phase is close enough to a fixed value, snap to it ############################################################################################################ if phaseDiffFixed != None: phaseDiffTmp = np.absolute( np.absolute(np.array(phaseDiffFixed)) - np.absolute(diffMean0)) phaseDiffTmpMinIndex = np.argmin(phaseDiffTmp) if phaseDiffTmp[phaseDiffTmpMinIndex] < snapThreshold: diffMean0 = np.sign(diffMean0) * np.absolute( phaseDiffFixed[phaseDiffTmpMinIndex]) phaseDiffSource[i] = 'estimated+snap' ############################################################################################################ diffMean.append(diffMean0) print('phase offset: subswath{} - subswath{}: {}'.format( frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber, diffMean0)) for i in range(1, numberOfSwaths): for j in range(1, i + 1): diffMean2[i] += diffMean[j] #mosaic swaths diffflag = 1 oflag = [0 for i in range(numberOfSwaths)] mosaicsubswath(outputfile, outWidth, outLength, delta, diffflag, numberOfSwaths, rinfs, rectWidth, rangeOffsets3, azimuthOffsets3, diffMean2, oflag) #remove tmp files for x in rinfs: os.remove(x) #update frame parameters if updateFrame: #mosaic size frame.numberOfSamples = outWidth frame.numberOfLines = outLength #NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE #range parameters frame.startingRange = frame.swaths[0].startingRange frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate frame.rangePixelSize = frame.swaths[0].rangePixelSize #azimuth parameters azimuthTimeOffset = -max([ int(x) for x in azimuthOffsets2 ]) * numberOfAzimuthLooks * frame.swaths[0].azimuthLineInterval frame.sensingStart = frame.swaths[0].sensingStart + datetime.timedelta( seconds=azimuthTimeOffset) frame.prf = frame.swaths[0].prf frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval if phaseCompensation: # estimated phase diff, used phase diff, used phase diff source return (phaseDiffEst, diffMean, phaseDiffSource)
def runPrepESD(self): ''' Create additional layers for performing ESD. ''' if not self.doESD: return swathList = self._insar.getValidSwathList(self.swaths) for swath in swathList: if self._insar.numberOfCommonBursts[swath-1] < 2: print('Skipping prepesd for swath IW{0}'.format(swath)) continue minBurst, maxBurst = self._insar.commonMasterBurstLimits(swath-1) slaveBurstStart, slaveBurstEnd = self._insar.commonSlaveBurstLimits(swath-1) ####Load full products master = self._insar.loadProduct( os.path.join(self._insar.masterSlcProduct, 'IW{0}.xml'.format(swath))) slave = self._insar.loadProduct( os.path.join(self._insar.slaveSlcProduct, 'IW{0}.xml'.format(swath))) ####Estimate relative shifts relShifts = getRelativeShifts(master, slave, minBurst, maxBurst, slaveBurstStart) maxBurst = maxBurst - 1 ###For overlaps ####Load metadata for burst IFGs ifgTop = self._insar.loadProduct( os.path.join(self._insar.coarseIfgOverlapProduct, 'top_IW{0}.xml'.format(swath))) ifgBottom = self._insar.loadProduct( os.path.join(self._insar.coarseIfgOverlapProduct, 'bottom_IW{0}.xml'.format(swath))) print('Relative shifts for swath {0}:'.format(swath)) pprint.pprint(relShifts) ####Create ESD output directory esddir = self._insar.esdDirname os.makedirs(esddir, exist_ok=True) ####Overlap offsets directory offdir = os.path.join( self._insar.coarseOffsetsDirname, self._insar.overlapsSubDirname, 'IW{0}'.format(swath)) ifglist = [] factorlist = [] offsetlist = [] cohlist = [] for ii in range(minBurst, maxBurst): ind = ii - minBurst ###Index into overlaps sind = slaveBurstStart + ind ###Index into slave topShift = relShifts[sind] botShift = relShifts[sind+1] topBurstIfg = ifgTop.bursts[ind] botBurstIfg = ifgBottom.bursts[ind] ####Double difference interferograms topInt = np.memmap( topBurstIfg.image.filename, dtype=np.complex64, mode='r', shape = (topBurstIfg.numberOfLines, topBurstIfg.numberOfSamples)) botInt = np.memmap( botBurstIfg.image.filename, dtype=np.complex64, mode='r', shape = (botBurstIfg.numberOfLines, botBurstIfg.numberOfSamples)) intName = os.path.join(esddir, 'overlap_IW%d_%02d.int'%(swath,ii+1)) freqName = os.path.join(esddir, 'freq_IW%d_%02d.bin'%(swath,ii+1)) with open(intName, 'wb') as fid: fid.write( topInt * np.conj(botInt)) img = isceobj.createIntImage() img.setFilename(intName) img.setLength(topBurstIfg.numberOfLines) img.setWidth(topBurstIfg.numberOfSamples) img.setAccessMode('READ') img.renderHdr() multIntName= multilook(intName, alks = self.esdAzimuthLooks, rlks=self.esdRangeLooks) ifglist.append(multIntName) ####Estimate coherence of double different interferograms multCor = createCoherence(multIntName) cohlist.append(multCor) ####Estimate the frequency difference azTop = os.path.join(offdir, 'azimuth_top_%02d_%02d.off'%(ii+1,ii+2)) rgTop = os.path.join(offdir, 'range_top_%02d_%02d.off'%(ii+1,ii+2)) azBot = os.path.join(offdir, 'azimuth_bot_%02d_%02d.off'%(ii+1,ii+2)) rgBot = os.path.join(offdir, 'range_bot_%02d_%02d.off'%(ii+1,ii+2)) mFullTop = master.bursts[ii] mFullBot = master.bursts[ii+1] sFullTop = slave.bursts[sind] sFullBot = slave.bursts[sind+1] freqdiff = overlapSpectralSeparation(topBurstIfg, botBurstIfg, mFullTop, mFullBot, sFullTop, sFullBot, azTop, rgTop, azBot, rgBot) with open(freqName, 'wb') as fid: (freqdiff * 2 * np.pi * mFullTop.azimuthTimeInterval).astype(np.float32).tofile(fid) img = isceobj.createImage() img.setFilename(freqName) img.setWidth(topBurstIfg.numberOfSamples) img.setLength(topBurstIfg.numberOfLines) img.setAccessMode('READ') img.bands = 1 img.dataType = 'FLOAT' img.renderHdr() multConstName = multilook(freqName, alks = self.esdAzimuthLooks, rlks = self.esdRangeLooks) factorlist.append(multConstName)
def run(imageAmp, imageSim, numBand, infos, nstages, scale, stdWriter, catalog=None, sceneid='NO_ID'): logger.info("Running Rgoffset: %s" % sceneid) coarseAcross = 0 coarseDown = 0 firstAc = infos['firstSampleAcross'] firstDown = infos['firstSampleDown'] numLocationAcross = infos['numberLocationAcross'] numLocationDown = infos['numberLocationDown'] slaveWidth = imageAmp.getWidth() slaveLength = imageAmp.getLength() objAmp = isceobj.createSlcImage() objAmp.dataType = 'CFLOAT' objAmp.bands = 1 objAmp.setFilename(imageAmp.getFilename()) objAmp.setAccessMode('read') objAmp.setWidth(slaveWidth) objAmp.createImage() masterWidth = imageSim.getWidth() objSim = isceobj.createImage() objSim.setFilename(imageSim.getFilename()) objSim.dataType = 'FLOAT' objSim.setWidth(masterWidth) objSim.setAccessMode('read') objSim.createImage() masterLength = imageSim.getLength() finalIteration = False for iterNum in xrange(nstages-1,-1,-1): ####Rewind the images try: objAmp.rewind() objSim.rewind() except: print('Issues when rewinding images.') #KK sys.exit? ###### logger.debug('Starting Iteration Stage : %d'%(iterNum)) logger.debug("Gross Across: %s" % (coarseAcross)) logger.debug("Gross Down: %s" % (coarseDown)) ####Clear objs objAmpcor = None objOff = None offField = None objAmpcor = Ampcor() objAmpcor.setImageDataType1('real') objAmpcor.setImageDataType2('complex') ####Dummy values as there is no scale difference at this step objAmpcor.setFirstPRF(1.0) objAmpcor.setSecondPRF(1.0) objAmpcor.setFirstRangeSpacing(1.0) objAmpcor.setSecondRangeSpacing(1.0) #####Scale all the reference and search windows scaleFactor = scale**iterNum objAmpcor.windowSizeWidth *= scaleFactor objAmpcor.windowSizeHeight *= scaleFactor objAmpcor.searchWindowSizeWidth *= scaleFactor objAmpcor.searchWindowSizeHeight *= scaleFactor xMargin = 2*objAmpcor.searchWindowSizeWidth + objAmpcor.windowSizeWidth yMargin = 2*objAmpcor.searchWindowSizeHeight + objAmpcor.windowSizeHeight #####Set image limits for search offAc = max(firstAc,-coarseAcross)+xMargin offDn = max(firstDn,-coarseDown)+yMargin offAcmax = int(coarseAcross) logger.debug("Gross Max Across: %s" % (offAcmax)) lastAc = int(min(masterWidth, slaveWidth-offAcmax) - xMargin) offDnmax = int(coarseDown) logger.debug("Gross Max Down: %s" % (offDnmax)) lastDn = int(min(masterLength, slaveLength-offDnmax) - yMargin) logger.debug("Last Down: %s" %(lastDn)) objAmpcor.setFirstSampleAcross(offAc) objAmpcor.setLastSampleAcross(lastAc) objAmpcor.setFirstSampleDown(offDn) objAmpcor.setLastSampleDown(lastDn) objAmpcor.setAcrossGrossOffset(coarseAcross) objAmpcor.setDownGrossOffset(coarseDown) if (offAc > lastAc) or (offDn > lastDn): print('Search window scale is too large.') print('Skipping Scale: %d'%(iterNum+1)) continue if ((lastAc - offAc) <= (2*xMargin)) or ((lastDn - offDn) <= (2*yMargin)): print('Image not large enough accounting for margins.') print('Skipping Scale: %d'%(iterNum+1)) continue logger.debug('Looks = %d'%scaleFactor) logger.debug('Correlation window sizes: %d %d'%(objAmpcor.windowSizeWidth, objAmpcor.windowSizeHeight)) logger.debug('Search window sizes: %d %d'%(objAmpcor.searchWindowSizeWidth, objAmpcor.searchWindowSizeHeight)) logger.debug(' Across pos: %d %d out of (%d,%d)'%(objAmpcor.firstSampleAcross, objAmpcor.lastSampleAcross, masterWidth, slaveWidth)) logger.debug(' Down pos: %d %d out of (%d,%d)'%(objAmpcor.firstSampleDown, objAmpcor.lastSampleDown, masterLength, slaveLength)) if (iterNum == 0) or finalIteration: if catalog is not None: # Record the inputs isceobj.Catalog.recordInputs(catalog, objAmpcor, "runRgoffset.%s" % sceneid, logger, "runRgoffset.%s" % sceneid) objAmpcor.setNumberLocationAcross(numLocationAcross) objAmpcor.setNumberLocationDown(numLocationDown) else: objAmpcor.setNumberLocationAcross(20) objAmpcor.setNumberLocationDown(20) objAmpcor.setAcrossLooks(scaleFactor) objAmpcor.setDownLooks(scaleFactor) objAmpcor.setZoomWindowSize(scale*objAmpcor.zoomWindowSize) objAmpcor.setOversamplingFactor(2) objAmpcor.ampcor(objSim,objAmp) offField = objAmpcor.getOffsetField() if (iterNum == 0) or finalIteration: if catalog is not None: # Record the outputs isceobj.Catalog.recordOutputs(catalog, objAmpcor, "runRgoffset.%s" % sceneid, logger, "runRgoffset.%s" % sceneid) else: objOff = isceobj.createOffoutliers() objOff.wireInputPort(name='offsets', object=offField) objOff.setSNRThreshold(2.0) objOff.setDistance(10) objOff.setStdWriter = stdWriter.set_file_tags("nstage_offoutliers"+str(iterNum), "log", "err", "out") objOff.offoutliers() coarseAcross = int(objOff.averageOffsetAcross) coarseDown = int(objOff.averageOffsetDown) objSim.finalizeImage() objAmp.finalizeImage() objOff = None objAmpcor = None return offField
def runUnwrapSnaphuSd(self): '''unwrap filtered interferogram ''' catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name) self.updateParamemetersFromUser() masterTrack = self._insar.loadTrack(master=True) #slaveTrack = self._insar.loadTrack(master=False) sdDir = 'sd' if not os.path.exists(sdDir): os.makedirs(sdDir) os.chdir(sdDir) ############################################################ # STEP 1. unwrap interferogram ############################################################ nsd = len(self._insar.filteredInterferogramSd) img = isceobj.createImage() img.load(self._insar.filteredInterferogramSd[0] + '.xml') width = img.width length = img.length if shutil.which('snaphu') != None: print( '\noriginal snaphu program found, use it for unwrapping interferograms' ) useOriginalSnaphu = True #create an amplitude for use # amplitude = os.path.join('../insar', self._insar.amplitude) # amplitudeMultilook = 'tmp.amp' # img = isceobj.createImage() # img.load(amplitude+'.xml') # look(amplitude, amplitudeMultilook, img.width, self._insar.numberRangeLooksSd, self._insar.numberAzimuthLooksSd, 4, 1, 1) else: useOriginalSnaphu = False for sdCoherence, sdInterferogramFilt, sdInterferogramUnwrap in zip( self._insar.multilookCoherenceSd, self._insar.filteredInterferogramSd, self._insar.unwrappedInterferogramSd): if useOriginalSnaphu: amplitudeMultilook = 'tmp.amp' cmd = "imageMath.py -e='sqrt(abs(a));sqrt(abs(a))' --a={} -o {} -t float -s BSQ".format( sdInterferogramFilt, amplitudeMultilook) runCmd(cmd) snaphuUnwrapOriginal(sdInterferogramFilt, sdCoherence, amplitudeMultilook, sdInterferogramUnwrap, costMode='s', initMethod='mcf') os.remove(amplitudeMultilook) os.remove(amplitudeMultilook + '.vrt') os.remove(amplitudeMultilook + '.xml') else: tmid = masterTrack.sensingStart + datetime.timedelta( seconds=(self._insar.numberAzimuthLooks1 - 1.0) / 2.0 * masterTrack.azimuthLineInterval + masterTrack.numberOfLines / 2.0 * self._insar.numberAzimuthLooks1 * masterTrack.azimuthLineInterval) snaphuUnwrap(masterTrack, tmid, sdInterferogramFilt, sdCoherence, sdInterferogramUnwrap, self._insar.numberRangeLooks1 * self._insar.numberRangeLooksSd, self._insar.numberAzimuthLooks1 * self._insar.numberAzimuthLooksSd, costMode='SMOOTH', initMethod='MCF', defomax=2, initOnly=True) #if useOriginalSnaphu: # os.remove(amplitudeMultilook) ############################################################ # STEP 2. mask using connected components ############################################################ for sdInterferogramUnwrap, sdInterferogramUnwrapMasked in zip( self._insar.unwrappedInterferogramSd, self._insar.unwrappedMaskedInterferogramSd): cmd = "imageMath.py -e='a_0*(b>0);a_1*(b>0)' --a={} --b={} -s BIL -t float -o={}".format( sdInterferogramUnwrap, sdInterferogramUnwrap + '.conncomp', sdInterferogramUnwrapMasked) runCmd(cmd) ############################################################ # STEP 3. mask using water body ############################################################ if self.waterBodyMaskStartingStepSd == 'unwrap': wbd = np.fromfile(self._insar.multilookWbdOutSd, dtype=np.int8).reshape(length, width) for sdInterferogramUnwrap, sdInterferogramUnwrapMasked in zip( self._insar.unwrappedInterferogramSd, self._insar.unwrappedMaskedInterferogramSd): unw = np.memmap(sdInterferogramUnwrap, dtype='float32', mode='r+', shape=(length * 2, width)) (unw[0:length * 2:2, :])[np.nonzero(wbd == -1)] = 0 (unw[1:length * 2:2, :])[np.nonzero(wbd == -1)] = 0 unw = np.memmap(sdInterferogramUnwrapMasked, dtype='float32', mode='r+', shape=(length * 2, width)) (unw[0:length * 2:2, :])[np.nonzero(wbd == -1)] = 0 (unw[1:length * 2:2, :])[np.nonzero(wbd == -1)] = 0 ############################################################ # STEP 4. convert to azimuth deformation ############################################################ #burst cycle in s burstCycleLength = masterTrack.frames[0].swaths[ 0].burstCycleLength / masterTrack.frames[0].swaths[0].prf #compute azimuth fmrate #stack all azimuth fmrates index = np.array([], dtype=np.float64) ka = np.array([], dtype=np.float64) for frame in masterTrack.frames: for swath in frame.swaths: startingRangeMultilook = masterTrack.frames[0].swaths[0].startingRange + \ (self._insar.numberRangeLooks1*self._insar.numberRangeLooksSd-1.0)/2.0*masterTrack.frames[0].swaths[0].rangePixelSize rangePixelSizeMultilook = self._insar.numberRangeLooks1 * self._insar.numberRangeLooksSd * masterTrack.frames[ 0].swaths[0].rangePixelSize index0 = (swath.startingRange + np.arange(swath.numberOfSamples) * swath.rangePixelSize - startingRangeMultilook) / rangePixelSizeMultilook ka0 = np.polyval(swath.azimuthFmrateVsPixel[::-1], np.arange(swath.numberOfSamples)) index = np.concatenate((index, index0)) ka = np.concatenate((ka, ka0)) p = np.polyfit(index, ka, 3) #new ka ka = np.polyval(p, np.arange(width)) #compute radar beam footprint velocity at middle track tmid = masterTrack.sensingStart + datetime.timedelta( seconds=(self._insar.numberAzimuthLooks1 - 1.0) / 2.0 * masterTrack.azimuthLineInterval + masterTrack.numberOfLines / 2.0 * self._insar.numberAzimuthLooks1 * masterTrack.azimuthLineInterval) svmid = masterTrack.orbit.interpolateOrbit(tmid, method='hermite') #earth radius in meters r = 6371 * 1000.0 #radar footprint velocity veln = np.linalg.norm(svmid.getVelocity()) * r / np.linalg.norm( svmid.getPosition()) print('radar beam footprint velocity at middle track: %8.2f m/s' % veln) #phase to defo factor factor = -1.0 * veln / (2.0 * np.pi * ka * burstCycleLength) #process unwrapped without mask sdunw_out = np.zeros((length * 2, width)) flag = np.zeros((length, width)) wgt = np.zeros((length, width)) for i in range(nsd): sdunw = np.fromfile(self._insar.unwrappedInterferogramSd[i], dtype=np.float32).reshape(length * 2, width) sdunw[1:length * 2:2, :] *= factor[None, :] / (i + 1.0) sdunw.astype(np.float32).tofile(self._insar.azimuthDeformationSd[i]) create_xml(self._insar.azimuthDeformationSd[i], width, length, 'rmg') flag += (sdunw[1:length * 2:2, :] != 0) #since the interferogram is filtered, we only use this light weight wgt0 = (i + 1)**2 wgt += wgt0 * (sdunw[1:length * 2:2, :] != 0) sdunw_out[0:length * 2:2, :] += (sdunw[0:length * 2:2, :])**2 sdunw_out[1:length * 2:2, :] += wgt0 * sdunw[1:length * 2:2, :] #output weighting average index = np.nonzero(flag != 0) (sdunw_out[0:length * 2:2, :])[index] = np.sqrt( (sdunw_out[0:length * 2:2, :])[index] / flag[index]) (sdunw_out[1:length * 2:2, :] )[index] = (sdunw_out[1:length * 2:2, :])[index] / wgt[index] if not self.unionSd: (sdunw_out[0:length * 2:2, :])[np.nonzero(flag < nsd)] = 0 (sdunw_out[1:length * 2:2, :])[np.nonzero(flag < nsd)] = 0 sdunw_out.astype(np.float32).tofile(self._insar.azimuthDeformationSd[-1]) create_xml(self._insar.azimuthDeformationSd[-1], width, length, 'rmg') #process unwrapped with mask sdunw_out = np.zeros((length * 2, width)) flag = np.zeros((length, width)) wgt = np.zeros((length, width)) for i in range(nsd): sdunw = np.fromfile(self._insar.unwrappedMaskedInterferogramSd[i], dtype=np.float32).reshape(length * 2, width) sdunw[1:length * 2:2, :] *= factor[None, :] / (i + 1.0) sdunw.astype(np.float32).tofile( self._insar.maskedAzimuthDeformationSd[i]) create_xml(self._insar.maskedAzimuthDeformationSd[i], width, length, 'rmg') flag += (sdunw[1:length * 2:2, :] != 0) #since the interferogram is filtered, we only use this light weight wgt0 = (i + 1)**2 wgt += wgt0 * (sdunw[1:length * 2:2, :] != 0) sdunw_out[0:length * 2:2, :] += (sdunw[0:length * 2:2, :])**2 sdunw_out[1:length * 2:2, :] += wgt0 * sdunw[1:length * 2:2, :] #output weighting average index = np.nonzero(flag != 0) (sdunw_out[0:length * 2:2, :])[index] = np.sqrt( (sdunw_out[0:length * 2:2, :])[index] / flag[index]) (sdunw_out[1:length * 2:2, :] )[index] = (sdunw_out[1:length * 2:2, :])[index] / wgt[index] if not self.unionSd: (sdunw_out[0:length * 2:2, :])[np.nonzero(flag < nsd)] = 0 (sdunw_out[1:length * 2:2, :])[np.nonzero(flag < nsd)] = 0 sdunw_out.astype(np.float32).tofile( self._insar.maskedAzimuthDeformationSd[-1]) create_xml(self._insar.maskedAzimuthDeformationSd[-1], width, length, 'rmg') os.chdir('../') catalog.printToLog(logger, "runUnwrapSnaphuSd") self._insar.procDoc.addAllFromCatalog(catalog)
def mergeBurstsVirtual(frame, referenceFrame, fileList, outfile, validOnly=True): ''' Merging using VRTs. ''' from VRTManager import Swath, VRTConstructor swaths = [Swath(x) for x in frame] refSwaths = [Swath(x) for x in referenceFrame] ###Identify the 4 corners and dimensions #topSwath = min(swaths, key = lambda x: x.sensingStart) #botSwath = max(swaths, key = lambda x: x.sensingStop) #leftSwath = min(swaths, key = lambda x: x.nearRange) #rightSwath = max(swaths, key = lambda x: x.farRange) topSwath = min(refSwaths, key=lambda x: x.sensingStart) botSwath = max(refSwaths, key=lambda x: x.sensingStop) leftSwath = min(refSwaths, key=lambda x: x.nearRange) rightSwath = max(refSwaths, key=lambda x: x.farRange) totalWidth = int( np.round((rightSwath.farRange - leftSwath.nearRange) / leftSwath.dr + 1)) totalLength = int( np.round((botSwath.sensingStop - topSwath.sensingStart).total_seconds() / topSwath.dt + 1)) ###Determine number of bands and type img = isceobj.createImage() img.load(fileList[0][0] + '.xml') bands = img.bands dtype = img.dataType img.filename = outfile #####Start the builder ###Now start building the VRT and then render it builder = VRTConstructor(totalLength, totalWidth) builder.setReferenceTime(topSwath.sensingStart) builder.setReferenceRange(leftSwath.nearRange) builder.setTimeSpacing(topSwath.dt) builder.setRangeSpacing(leftSwath.dr) builder.setDataType(dtype.upper()) builder.initVRT() ####Render XML and default VRT. VRT will be overwritten. img.width = totalWidth img.length = totalLength img.renderHdr() for bnd in range(1, bands + 1): builder.initBand(band=bnd) for ind, swath in enumerate(swaths): ####Relative path relfilelist = [ os.path.relpath(x, os.path.dirname(outfile)) for x in fileList[ind] ] builder.addSwath(swath, relfilelist, band=bnd, validOnly=validOnly) builder.finishBand() builder.finishVRT() with open(outfile + '.vrt', 'w') as fid: fid.write(builder.vrt)
def runCoregCc(self): '''coregister bursts by cross correlation ''' catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name) self.updateParamemetersFromUser() referenceTrack = self._insar.loadTrack(reference=True) secondaryTrack = self._insar.loadTrack(reference=False) #demFile = os.path.abspath(self._insar.dem) #wbdFile = os.path.abspath(self._insar.wbd) ############################################################################### self._insar.rangeResidualOffsetCc = [ [] for i in range(len(referenceTrack.frames)) ] self._insar.azimuthResidualOffsetCc = [ [] for i in range(len(referenceTrack.frames)) ] for i, frameNumber in enumerate(self._insar.referenceFrames): frameDir = 'f{}_{}'.format(i + 1, frameNumber) os.chdir(frameDir) for j, swathNumber in enumerate( range(self._insar.startingSwath, self._insar.endingSwath + 1)): swathDir = 's{}'.format(swathNumber) os.chdir(swathDir) print('processing frame {}, swath {}'.format( frameNumber, swathNumber)) referenceSwath = referenceTrack.frames[i].swaths[j] secondarySwath = secondaryTrack.frames[i].swaths[j] ################################################## # estimate cross-correlation offsets ################################################## #compute number of offsets to use wbdImg = isceobj.createImage() wbdImg.load(self._insar.wbdOut + '.xml') width = wbdImg.width length = wbdImg.length #initial number of offsets to use numberOfOffsets = 800 #compute land ratio to further determine the number of offsets to use if self.useWbdForNumberOffsets: wbd = np.memmap(self._insar.wbdOut, dtype='byte', mode='r', shape=(length, width)) landRatio = np.sum(wbd == 0) / length / width del wbd if (landRatio <= 0.00125): print( '\n\nWARNING: land area too small for estimating offsets between reference and secondary magnitudes at frame {}, swath {}' .format(frameNumber, swathNumber)) print('set offsets to zero\n\n') self._insar.rangeResidualOffsetCc[i].append(0.0) self._insar.azimuthResidualOffsetCc[i].append(0.0) catalog.addItem( 'warning message', 'land area too small for estimating offsets between reference and secondary magnitudes at frame {}, swath {}' .format(frameNumber, swathNumber), 'runCoregCc') continue #total number of offsets to use numberOfOffsets /= landRatio #allocate number of offsets in range/azimuth according to image width/length #number of offsets to use in range/azimuth numberOfOffsetsRange = int( np.sqrt(numberOfOffsets * width / length)) numberOfOffsetsAzimuth = int( length / width * np.sqrt(numberOfOffsets * width / length)) #this should be better? numberOfOffsetsRange = int(np.sqrt(numberOfOffsets)) numberOfOffsetsAzimuth = int(np.sqrt(numberOfOffsets)) if numberOfOffsetsRange > int(width / 2): numberOfOffsetsRange = int(width / 2) if numberOfOffsetsAzimuth > int(length / 2): numberOfOffsetsAzimuth = int(length / 2) if numberOfOffsetsRange < 10: numberOfOffsetsRange = 10 if numberOfOffsetsAzimuth < 10: numberOfOffsetsAzimuth = 10 #user's settings if self.numberRangeOffsets != None: numberOfOffsetsRange = self.numberRangeOffsets[i][j] if self.numberAzimuthOffsets != None: numberOfOffsetsAzimuth = self.numberAzimuthOffsets[i][j] catalog.addItem( 'number of range offsets at frame {}, swath {}'.format( frameNumber, swathNumber), '{}'.format(numberOfOffsetsRange), 'runCoregCc') catalog.addItem( 'number of azimuth offsets at frame {}, swath {}'.format( frameNumber, swathNumber), '{}'.format(numberOfOffsetsAzimuth), 'runCoregCc') #need to cp to current directory to make it (gdal) work if not os.path.isfile(self._insar.referenceMagnitude): os.symlink( os.path.join(self._insar.referenceBurstPrefix, self._insar.referenceMagnitude), self._insar.referenceMagnitude) #shutil.copy2() can overwrite shutil.copy2( os.path.join(self._insar.referenceBurstPrefix, self._insar.referenceMagnitude + '.vrt'), self._insar.referenceMagnitude + '.vrt') shutil.copy2( os.path.join(self._insar.referenceBurstPrefix, self._insar.referenceMagnitude + '.xml'), self._insar.referenceMagnitude + '.xml') if not os.path.isfile(self._insar.secondaryMagnitude): os.symlink( os.path.join( self._insar.secondaryBurstPrefix + '_1_coreg_geom', self._insar.secondaryMagnitude), self._insar.secondaryMagnitude) #shutil.copy2() can overwrite shutil.copy2( os.path.join( self._insar.secondaryBurstPrefix + '_1_coreg_geom', self._insar.secondaryMagnitude + '.vrt'), self._insar.secondaryMagnitude + '.vrt') shutil.copy2( os.path.join( self._insar.secondaryBurstPrefix + '_1_coreg_geom', self._insar.secondaryMagnitude + '.xml'), self._insar.secondaryMagnitude + '.xml') #matching ampcor = Ampcor(name='insarapp_slcs_ampcor') ampcor.configure() mMag = isceobj.createImage() mMag.load(self._insar.referenceMagnitude + '.xml') mMag.setAccessMode('read') mMag.createImage() sMag = isceobj.createImage() sMag.load(self._insar.secondaryMagnitude + '.xml') sMag.setAccessMode('read') sMag.createImage() ampcor.setImageDataType1('real') ampcor.setImageDataType2('real') ampcor.setReferenceSlcImage(mMag) ampcor.setSecondarySlcImage(sMag) #MATCH REGION rgoff = 0 azoff = 0 #it seems that we cannot use 0, haven't look into the problem if rgoff == 0: rgoff = 1 if azoff == 0: azoff = 1 firstSample = 1 if rgoff < 0: firstSample = int(35 - rgoff) firstLine = 1 if azoff < 0: firstLine = int(35 - azoff) ampcor.setAcrossGrossOffset(rgoff) ampcor.setDownGrossOffset(azoff) ampcor.setFirstSampleAcross(firstSample) ampcor.setLastSampleAcross(mMag.width) ampcor.setNumberLocationAcross(numberOfOffsetsRange) ampcor.setFirstSampleDown(firstLine) ampcor.setLastSampleDown(mMag.length) ampcor.setNumberLocationDown(numberOfOffsetsAzimuth) #MATCH PARAMETERS ampcor.setWindowSizeWidth(64) ampcor.setWindowSizeHeight(64) #note this is the half width/length of search area, so number of resulting correlation samples: 8*2+1 ampcor.setSearchWindowSizeWidth(8) ampcor.setSearchWindowSizeHeight(8) #REST OF THE STUFF ampcor.setAcrossLooks(1) ampcor.setDownLooks(1) ampcor.setOversamplingFactor(64) ampcor.setZoomWindowSize(16) #1. The following not set #Matching Scale for Sample/Line Directions (-) = 1. 1. #should add the following in Ampcor.py? #if not set, in this case, Ampcor.py'value is also 1. 1. #ampcor.setScaleFactorX(1.) #ampcor.setScaleFactorY(1.) #MATCH THRESHOLDS AND DEBUG DATA #2. The following not set #in roi_pac the value is set to 0 1 #in isce the value is set to 0.001 1000.0 #SNR and Covariance Thresholds (-) = {s1} {s2} #should add the following in Ampcor? #THIS SHOULD BE THE ONLY THING THAT IS DIFFERENT FROM THAT OF ROI_PAC #ampcor.setThresholdSNR(0) #ampcor.setThresholdCov(1) ampcor.setDebugFlag(False) ampcor.setDisplayFlag(False) #in summary, only two things not set which are indicated by 'The following not set' above. #run ampcor ampcor.ampcor() offsets = ampcor.getOffsetField() refinedOffsets = cullOffsetsRoipac(offsets, numThreshold=50) #finalize image, and re-create it #otherwise the file pointer is still at the end of the image mMag.finalizeImage() sMag.finalizeImage() #clear up os.remove(self._insar.referenceMagnitude) os.remove(self._insar.referenceMagnitude + '.vrt') os.remove(self._insar.referenceMagnitude + '.xml') os.remove(self._insar.secondaryMagnitude) os.remove(self._insar.secondaryMagnitude + '.vrt') os.remove(self._insar.secondaryMagnitude + '.xml') #compute average offsets to use in resampling if refinedOffsets == None: rangeOffset = 0 azimuthOffset = 0 self._insar.rangeResidualOffsetCc[i].append(rangeOffset) self._insar.azimuthResidualOffsetCc[i].append(azimuthOffset) print( '\n\nWARNING: too few offsets left in matching reference and secondary magnitudes at frame {}, swath {}' .format(frameNumber, swathNumber)) print('set offsets to zero\n\n') catalog.addItem( 'warning message', 'too few offsets left in matching reference and secondary magnitudes at frame {}, swath {}' .format(frameNumber, swathNumber), 'runCoregCc') else: rangeOffset, azimuthOffset = meanOffset(refinedOffsets) #for range offset, need to compute from a polynomial #see components/isceobj/Location/Offset.py and components/isceobj/Util/Library/python/Poly2D.py for definations (azimuthPoly, rangePoly) = refinedOffsets.getFitPolynomials(rangeOrder=2, azimuthOrder=2) #make a deep copy, otherwise it also changes original coefficient list of rangePoly, which affects following rangePoly(*, *) computation polyCoeff = copy.deepcopy(rangePoly.getCoeffs()) rgIndex = (np.arange(width) - rangePoly.getMeanRange() ) / rangePoly.getNormRange() azIndex = (np.arange(length) - rangePoly.getMeanAzimuth() ) / rangePoly.getNormAzimuth() rangeOffset = polyCoeff[0][0] + polyCoeff[0][1]*rgIndex[None,:] + polyCoeff[0][2]*rgIndex[None,:]**2 + \ (polyCoeff[1][0] + polyCoeff[1][1]*rgIndex[None,:]) * azIndex[:, None] + \ polyCoeff[2][0] * azIndex[:, None]**2 polyCoeff.append([ rangePoly.getMeanRange(), rangePoly.getNormRange(), rangePoly.getMeanAzimuth(), rangePoly.getNormAzimuth() ]) self._insar.rangeResidualOffsetCc[i].append(polyCoeff) self._insar.azimuthResidualOffsetCc[i].append(azimuthOffset) catalog.addItem( 'range residual offset at {} {} at frame {}, swath {}'. format(0, 0, frameNumber, swathNumber), '{}'.format(rangePoly(0, 0)), 'runCoregCc') catalog.addItem( 'range residual offset at {} {} at frame {}, swath {}'. format(0, width - 1, frameNumber, swathNumber), '{}'.format(rangePoly(0, width - 1)), 'runCoregCc') catalog.addItem( 'range residual offset at {} {} at frame {}, swath {}'. format(length - 1, 0, frameNumber, swathNumber), '{}'.format(rangePoly(length - 1, 0)), 'runCoregCc') catalog.addItem( 'range residual offset at {} {} at frame {}, swath {}'. format(length - 1, width - 1, frameNumber, swathNumber), '{}'.format(rangePoly(length - 1, width - 1)), 'runCoregCc') catalog.addItem( 'azimuth residual offset at frame {}, swath {}'.format( frameNumber, swathNumber), '{}'.format(azimuthOffset), 'runCoregCc') DEBUG = False if DEBUG: print('+++++++++++++++++++++++++++++') print(rangeOffset[0, 0], rangePoly(0, 0)) print(rangeOffset[0, width - 1], rangePoly(0, width - 1)) print(rangeOffset[length - 1, 0], rangePoly(length - 1, 0)) print(rangeOffset[length - 1, width - 1], rangePoly(length - 1, width - 1)) print( rangeOffset[int((length - 1) / 2), int((width - 1) / 2)], rangePoly(int((length - 1) / 2), int((width - 1) / 2))) print('+++++++++++++++++++++++++++++') ################################################## # resample bursts ################################################## secondaryBurstResampledDir = self._insar.secondaryBurstPrefix + '_2_coreg_cc' #interferogramDir = self._insar.referenceBurstPrefix + '-' + self._insar.secondaryBurstPrefix + '_coreg_geom' interferogramDir = 'burst_interf_2_coreg_cc' interferogramPrefix = self._insar.referenceBurstPrefix + '-' + self._insar.secondaryBurstPrefix resampleBursts(referenceSwath, secondarySwath, self._insar.referenceBurstPrefix, self._insar.secondaryBurstPrefix, secondaryBurstResampledDir, interferogramDir, self._insar.referenceBurstPrefix, self._insar.secondaryBurstPrefix, self._insar.secondaryBurstPrefix, interferogramPrefix, self._insar.rangeOffset, self._insar.azimuthOffset, rangeOffsetResidual=rangeOffset, azimuthOffsetResidual=azimuthOffset) ################################################## # mosaic burst amplitudes and interferograms ################################################## os.chdir(secondaryBurstResampledDir) mosaicBurstAmplitude(referenceSwath, self._insar.secondaryBurstPrefix, self._insar.secondaryMagnitude, numberOfLooksThreshold=4) os.chdir('../') os.chdir(interferogramDir) mosaicBurstInterferogram(referenceSwath, interferogramPrefix, self._insar.interferogram, numberOfLooksThreshold=4) os.chdir('../') ################################################## # final amplitude and interferogram ################################################## amp = np.zeros((referenceSwath.numberOfLines, 2 * referenceSwath.numberOfSamples), dtype=np.float32) amp[0:, 1:referenceSwath.numberOfSamples*2:2] = np.fromfile(os.path.join(secondaryBurstResampledDir, self._insar.secondaryMagnitude), \ dtype=np.float32).reshape(referenceSwath.numberOfLines, referenceSwath.numberOfSamples) amp[0:, 0:referenceSwath.numberOfSamples*2:2] = np.fromfile(os.path.join(self._insar.referenceBurstPrefix, self._insar.referenceMagnitude), \ dtype=np.float32).reshape(referenceSwath.numberOfLines, referenceSwath.numberOfSamples) amp.astype(np.float32).tofile(self._insar.amplitude) create_xml(self._insar.amplitude, referenceSwath.numberOfSamples, referenceSwath.numberOfLines, 'amp') os.rename( os.path.join(interferogramDir, self._insar.interferogram), self._insar.interferogram) os.rename( os.path.join(interferogramDir, self._insar.interferogram + '.vrt'), self._insar.interferogram + '.vrt') os.rename( os.path.join(interferogramDir, self._insar.interferogram + '.xml'), self._insar.interferogram + '.xml') os.chdir('../') os.chdir('../') ############################################################################### catalog.printToLog(logger, "runCoregCc") self._insar.procDoc.addAllFromCatalog(catalog)
def mergeBursts(frame, fileList, outfile, method='top'): ''' Merge burst products into single file. Simple numpy based stitching ''' ###Check against metadata if frame.numberOfBursts != len(fileList): print( 'Warning : Number of burst products does not appear to match number of bursts in metadata' ) t0 = frame.bursts[0].sensingStart dt = frame.bursts[0].azimuthTimeInterval width = frame.bursts[0].numberOfSamples ####### tstart = frame.bursts[0].sensingStart tend = frame.bursts[-1].sensingStop nLines = int(np.round((tend - tstart).total_seconds() / dt)) + 1 print('Expected total nLines: ', nLines) img = isceobj.createImage() img.load(fileList[0] + '.xml') bands = img.bands scheme = img.scheme npType = IML.NUMPY_type(img.dataType) azMasterOff = [] for index in range(frame.numberOfBursts): burst = frame.bursts[index] soff = burst.sensingStart + datetime.timedelta( seconds=(burst.firstValidLine * dt)) start = int(np.round((soff - tstart).total_seconds() / dt)) end = start + burst.numValidLines azMasterOff.append([start, end]) print('Burst: ', index, [start, end]) if index == 0: linecount = start outMap = IML.memmap(outfile, mode='write', nchannels=bands, nxx=width, nyy=nLines, scheme=scheme, dataType=npType) for index in range(frame.numberOfBursts): curBurst = frame.bursts[index] curLimit = azMasterOff[index] curMap = IML.mmapFromISCE(fileList[index], logging) #####If middle burst if index > 0: topBurst = frame.bursts[index - 1] topLimit = azMasterOff[index - 1] topMap = IML.mmapFromISCE(fileList[index - 1], logging) olap = topLimit[1] - curLimit[0] print("olap: ", olap) if olap <= 0: raise Exception('No Burst Overlap') for bb in range(bands): topData = topMap.bands[bb][ topBurst.firstValidLine:topBurst.firstValidLine + topBurst.numValidLines, :] curData = curMap.bands[bb][ curBurst.firstValidLine:curBurst.firstValidLine + curBurst.numValidLines, :] im1 = topData[-olap:, :] im2 = curData[:olap, :] if method == 'avg': data = 0.5 * (im1 + im2) elif method == 'top': data = im1 elif method == 'bot': data = im2 else: raise Exception('Method should be top/bot/avg') outMap.bands[bb][linecount:linecount + olap, :] = data tlim = olap else: tlim = 0 linecount += tlim if index != (frame.numberOfBursts - 1): botBurst = frame.bursts[index + 1] botLimit = azMasterOff[index + 1] olap = curLimit[1] - botLimit[0] if olap < 0: raise Exception('No Burst Overlap') blim = botLimit[0] - curLimit[0] else: blim = curBurst.numValidLines lineout = blim - tlim for bb in range(bands): curData = curMap.bands[bb][ curBurst.firstValidLine:curBurst.firstValidLine + curBurst.numValidLines, :] outMap.bands[bb][linecount:linecount + lineout, :] = curData[tlim:blim, :] linecount += lineout curMap = None topMap = None IML.renderISCEXML(outfile, bands, nLines, width, img.dataType, scheme) oimg = isceobj.createImage() oimg.load(outfile + '.xml') oimg.imageType = img.imageType oimg.renderHdr() try: outMap.bands[0].base.base.flush() except: pass
def runGeo2rdrGPU(info, rdict, misreg_az=0.0, misreg_rg=0.0): from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr from isceobj.Planet.Planet import Planet from iscesys import DateTimeUtil as DTU latImage = isceobj.createImage() latImage.load(rdict['lat'] + '.xml') latImage.setAccessMode('READ') latImage.createImage() lonImage = isceobj.createImage() lonImage.load(rdict['lon'] + '.xml') lonImage.setAccessMode('READ') lonImage.createImage() demImage = isceobj.createImage() demImage.load(rdict['hgt'] + '.xml') demImage.setAccessMode('READ') demImage.createImage() misreg_az = misreg_az * info.azimuthTimeInterval delta = datetime.timedelta(seconds=misreg_az) print('Additional time offset applied in geo2rdr: {0} secs'.format( misreg_az)) print( 'Additional range offset applied in geo2rdr: {0} m'.format(misreg_rg)) #####Run Geo2rdr planet = Planet(pname='Earth') grdr = PyGeo2rdr() grdr.setRangePixelSpacing(info.rangePixelSize) grdr.setPRF(1.0 / info.azimuthTimeInterval) grdr.setRadarWavelength(info.radarWavelength) grdr.createOrbit(0, len(info.orbit.stateVectors.list)) count = 0 for sv in info.orbit.stateVectors.list: td = DTU.seconds_since_midnight(sv.getTime()) pos = sv.getPosition() vel = sv.getVelocity() grdr.setOrbitVector(count, td, pos[0], pos[1], pos[2], vel[0], vel[1], vel[2]) count += 1 grdr.setOrbitMethod(0) grdr.setWidth(info.numberOfSamples) grdr.setLength(info.numberOfLines) grdr.setSensingStart(DTU.seconds_since_midnight(info.sensingStart - delta)) grdr.setRangeFirstSample(info.startingRange - misreg_rg) grdr.setNumberRangeLooks(1) grdr.setNumberAzimuthLooks(1) grdr.setEllipsoidMajorSemiAxis(planet.ellipsoid.a) grdr.setEllipsoidEccentricitySquared(planet.ellipsoid.e2) grdr.createPoly(0, 0., 1.) grdr.setPolyCoeff(0, 0.) grdr.setDemLength(demImage.getLength()) grdr.setDemWidth(demImage.getWidth()) grdr.setBistaticFlag(0) rangeOffsetImage = isceobj.createImage() rangeOffsetImage.setFilename(rdict['rangeOffName']) rangeOffsetImage.setAccessMode('write') rangeOffsetImage.setDataType('FLOAT') rangeOffsetImage.setCaster('write', 'DOUBLE') rangeOffsetImage.setWidth(demImage.width) rangeOffsetImage.createImage() azimuthOffsetImage = isceobj.createImage() azimuthOffsetImage.setFilename(rdict['azOffName']) azimuthOffsetImage.setAccessMode('write') azimuthOffsetImage.setDataType('FLOAT') azimuthOffsetImage.setCaster('write', 'DOUBLE') azimuthOffsetImage.setWidth(demImage.width) azimuthOffsetImage.createImage() grdr.setLatAccessor(latImage.getImagePointer()) grdr.setLonAccessor(lonImage.getImagePointer()) grdr.setHgtAccessor(demImage.getImagePointer()) grdr.setAzAccessor(0) grdr.setRgAccessor(0) grdr.setAzOffAccessor(azimuthOffsetImage.getImagePointer()) grdr.setRgOffAccessor(rangeOffsetImage.getImagePointer()) grdr.geo2rdr() rangeOffsetImage.finalizeImage() rangeOffsetImage.renderHdr() azimuthOffsetImage.finalizeImage() azimuthOffsetImage.renderHdr() latImage.finalizeImage() lonImage.finalizeImage() demImage.finalizeImage() return pass
def multilook(infile, outname=None, alks=5, rlks=15, multilook_tool="isce", no_data=None): ''' Take looks. ''' if multilook_tool == "gdal": from osgeo import gdal print("multi looking using gdal ...") if outname is None: spl = os.path.splitext(infile) ext = '.{0}alks_{1}rlks'.format(alks, rlks) outname = spl[0] + ext + spl[1] print(infile) ds = gdal.Open(infile + ".vrt", gdal.GA_ReadOnly) xSize = ds.RasterXSize ySize = ds.RasterYSize outXSize = xSize / int(rlks) outYSize = ySize / int(alks) if no_data: gdalTranslateOpts = gdal.TranslateOptions(format="ENVI", width=outXSize, height=outYSize, noData=no_data) else: gdalTranslateOpts = gdal.TranslateOptions(format="ENVI", width=outXSize, height=outYSize) gdal.Translate(outname, ds, options=gdalTranslateOpts) ds = None ds = gdal.Open(outname, gdal.GA_ReadOnly) gdal.Translate(outname + ".vrt", ds, options=gdal.TranslateOptions(format="VRT")) ds = None else: from mroipac.looks.Looks import Looks print('Multilooking {0} ...'.format(infile)) inimg = isceobj.createImage() inimg.load(infile + '.xml') if outname is None: spl = os.path.splitext(inimg.filename) ext = '.{0}alks_{1}rlks'.format(alks, rlks) outname = spl[0] + ext + spl[1] lkObj = Looks() lkObj.setDownLooks(alks) lkObj.setAcrossLooks(rlks) lkObj.setInputImage(inimg) lkObj.setOutputFilename(outname) lkObj.looks() return outname
def topoGPU(referenceTrack, numberRangeLooks, numberAzimuthLooks, demFile, latFile, lonFile, hgtFile, losFile): ''' Try with GPU module. ''' import datetime import numpy as np from isceobj.Planet.Planet import Planet from zerodop.GPUtopozero.GPUtopozero import PyTopozero from isceobj.Util.Poly2D import Poly2D from iscesys import DateTimeUtil as DTU pointingDirection = {'right': -1, 'left' :1} #creat poynomials polyDoppler = Poly2D(name='topsApp_dopplerPoly') polyDoppler.setWidth(referenceTrack.numberOfSamples) polyDoppler.setLength(referenceTrack.numberOfLines) polyDoppler.setNormRange(1.0) polyDoppler.setNormAzimuth(1.0) polyDoppler.setMeanRange(0.0) polyDoppler.setMeanAzimuth(0.0) polyDoppler.initPoly(rangeOrder=0,azimuthOrder=0, coeffs=[[0.]]) polyDoppler.createPoly2D() slantRangeImage = Poly2D() slantRangeImage.setWidth(referenceTrack.numberOfSamples) slantRangeImage.setLength(referenceTrack.numberOfLines) slantRangeImage.setNormRange(1.0) slantRangeImage.setNormAzimuth(1.0) slantRangeImage.setMeanRange(0.) slantRangeImage.setMeanAzimuth(0.) slantRangeImage.initPoly(rangeOrder=1,azimuthOrder=0, coeffs=[[referenceTrack.startingRange + (numberRangeLooks-1.0)/2.0*referenceTrack.rangePixelSize,numberRangeLooks * referenceTrack.rangePixelSize]]) slantRangeImage.createPoly2D() #creat images latImage = isceobj.createImage() latImage.initImage(latFile, 'write', referenceTrack.numberOfSamples, 'DOUBLE') latImage.createImage() lonImage = isceobj.createImage() lonImage.initImage(lonFile, 'write', referenceTrack.numberOfSamples, 'DOUBLE') lonImage.createImage() losImage = isceobj.createImage() losImage.initImage(losFile, 'write', referenceTrack.numberOfSamples, 'FLOAT', bands=2, scheme='BIL') losImage.setCaster('write', 'DOUBLE') losImage.createImage() heightImage = isceobj.createImage() heightImage.initImage(hgtFile, 'write', referenceTrack.numberOfSamples, 'DOUBLE') heightImage.createImage() demImage = isceobj.createDemImage() demImage.load(demFile + '.xml') demImage.setCaster('read', 'FLOAT') demImage.createImage() #compute a few things t0 = referenceTrack.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0*referenceTrack.azimuthLineInterval) orb = referenceTrack.orbit pegHdg = np.radians( orb.getENUHeading(t0)) elp = Planet(pname='Earth').ellipsoid #call gpu topo topo = PyTopozero() topo.set_firstlat(demImage.getFirstLatitude()) topo.set_firstlon(demImage.getFirstLongitude()) topo.set_deltalat(demImage.getDeltaLatitude()) topo.set_deltalon(demImage.getDeltaLongitude()) topo.set_major(elp.a) topo.set_eccentricitySquared(elp.e2) topo.set_rSpace(numberRangeLooks * referenceTrack.rangePixelSize) topo.set_r0(referenceTrack.startingRange + (numberRangeLooks-1.0)/2.0*referenceTrack.rangePixelSize) topo.set_pegHdg(pegHdg) topo.set_prf(1.0 / (numberAzimuthLooks*referenceTrack.azimuthLineInterval)) topo.set_t0(DTU.seconds_since_midnight(t0)) topo.set_wvl(referenceTrack.radarWavelength) topo.set_thresh(.05) topo.set_demAccessor(demImage.getImagePointer()) topo.set_dopAccessor(polyDoppler.getPointer()) topo.set_slrngAccessor(slantRangeImage.getPointer()) topo.set_latAccessor(latImage.getImagePointer()) topo.set_lonAccessor(lonImage.getImagePointer()) topo.set_losAccessor(losImage.getImagePointer()) topo.set_heightAccessor(heightImage.getImagePointer()) topo.set_incAccessor(0) topo.set_maskAccessor(0) topo.set_numIter(25) topo.set_idemWidth(demImage.getWidth()) topo.set_idemLength(demImage.getLength()) topo.set_ilrl(pointingDirection[referenceTrack.pointingDirection]) topo.set_extraIter(10) topo.set_length(referenceTrack.numberOfLines) topo.set_width(referenceTrack.numberOfSamples) topo.set_nRngLooks(1) topo.set_nAzLooks(1) topo.set_demMethod(5) # BIQUINTIC METHOD topo.set_orbitMethod(0) # HERMITE # Need to simplify orbit stuff later nvecs = len(orb._stateVectors) topo.set_orbitNvecs(nvecs) topo.set_orbitBasis(1) # Is this ever different? topo.createOrbit() # Initializes the empty orbit to the right allocated size count = 0 for sv in orb._stateVectors: td = DTU.seconds_since_midnight(sv.getTime()) pos = sv.getPosition() vel = sv.getVelocity() topo.set_orbitVector(count,td,pos[0],pos[1],pos[2],vel[0],vel[1],vel[2]) count += 1 topo.runTopo() #tidy up latImage.addDescription('Pixel-by-pixel latitude in degrees.') latImage.finalizeImage() latImage.renderHdr() lonImage.addDescription('Pixel-by-pixel longitude in degrees.') lonImage.finalizeImage() lonImage.renderHdr() heightImage.addDescription('Pixel-by-pixel height in meters.') heightImage.finalizeImage() heightImage.renderHdr() descr = '''Two channel Line-Of-Sight geometry image (all angles in degrees). Represents vector drawn from target to platform. Channel 1: Incidence angle measured from vertical at target (always +ve). Channel 2: Azimuth angle measured from North in Anti-clockwise direction.''' losImage.setImageType('bil') losImage.addDescription(descr) losImage.finalizeImage() losImage.renderHdr() demImage.finalizeImage() if slantRangeImage: try: slantRangeImage.finalizeImage() except: pass
def runCoherence(self, method="phase_gradient"): logger.info("Calculating Coherence") import os resampAmpImage = os.path.join(self.insar.ifgDirname, self.insar.ifgFilename) topoflatIntFilename = os.path.join(self.insar.ifgDirname, self.insar.ifgFilename) if '.flat' in resampAmpImage: resampAmpImage = resampAmpImage.replace('.flat', '.amp') elif '.int' in resampAmpImage: resampAmpImage = resampAmpImage.replace('.int', '.amp') else: resampAmpImage += '.amp' # Initialize the amplitude # resampAmpImage = self.insar.resampAmpImage # ampImage = isceobj.createAmpImage() # IU.copyAttributes(resampAmpImage, ampImage) # ampImage.setAccessMode('read') # ampImage.createImage() # ampImage = self.insar.getResampOnlyAmp().copy(access_mode='read') ampImage = isceobj.createImage() ampImage.load(resampAmpImage + '.xml') ampImage.setAccessMode('READ') ampImage.createImage() # Initialize the flattened inteferogram # topoflatIntFilename = self.insar.topophaseFlatFilename intImage = isceobj.createImage() intImage.load(topoflatIntFilename + '.xml') intImage.setAccessMode('READ') intImage.createImage() # widthInt = self.insar.resampIntImage.getWidth() # intImage.setFilename(topoflatIntFilename) # intImage.setWidth(widthInt) # intImage.setAccessMode('read') # intImage.createImage() # Create the coherence image cohFilename = topoflatIntFilename.replace('.flat', '.cor') cohImage = isceobj.createOffsetImage() cohImage.setFilename(cohFilename) cohImage.setWidth(intImage.width) cohImage.setAccessMode('write') cohImage.createImage() cor = Correlation() cor.configure() cor.wireInputPort(name='interferogram', object=intImage) cor.wireInputPort(name='amplitude', object=ampImage) cor.wireOutputPort(name='correlation', object=cohImage) cohImage.finalizeImage() intImage.finalizeImage() ampImage.finalizeImage() cor.calculateCorrelation() # NEW COMMANDS added by YL --start import subprocess subprocess.getoutput( 'MULTILOOK_FILTER_ISCE.py -a ./interferogram/topophase.amp -c ./interferogram/topophase.cor' ) subprocess.getoutput( 'CROP_ISCE_stripmapApp.py -a ./interferogram/topophase.amp -c ./interferogram/topophase.cor' ) subprocess.getoutput( 'imageMath.py -e="a_0;a_1" --a ./interferogram/topophase.amp -o ./interferogram/resampOnlyImage1.amp -s BIL -t FLOAT' ) self.geocode_list += ['./interferogram/resampOnlyImage1.amp'] # NEW COMMANDS added by YL --end # try: # CORRELATION_METHOD[method](cor) # except KeyError: # print("Unrecognized correlation method") # sys.exit(1) # pass return None
def runResampleSlc(self, kind='coarse'): ''' Kind can either be coarse, refined or fine. ''' if kind not in ['coarse', 'refined', 'fine']: raise Exception( 'Unknown operation type {0} in runResampleSlc'.format(kind)) if kind == 'fine': if not (self.doRubbersheetingRange | self.doRubbersheetingAzimuth ): # Modified by V. Brancato 10.10.2019 print('Rubber sheeting not requested, skipping resampling ....') return logger.info("Resampling slave SLC") slaveFrame = self._insar.loadProduct(self._insar.slaveSlcCropProduct) masterFrame = self._insar.loadProduct(self._insar.masterSlcCropProduct) inimg = isceobj.createSlcImage() inimg.load(slaveFrame.getImage().filename + '.xml') inimg.setAccessMode('READ') prf = slaveFrame.PRF doppler = slaveFrame._dopplerVsPixel coeffs = [2 * np.pi * val / prf for val in doppler] dpoly = Poly2D() dpoly.initPoly(rangeOrder=len(coeffs) - 1, azimuthOrder=0, coeffs=[coeffs]) rObj = stdproc.createResamp_slc() rObj.slantRangePixelSpacing = slaveFrame.getInstrument().getRangePixelSize( ) rObj.radarWavelength = slaveFrame.getInstrument().getRadarWavelength() rObj.dopplerPoly = dpoly # for now let's start with None polynomial. Later this should change to # the misregistration polynomial misregFile = os.path.join(self.insar.misregDirname, self.insar.misregFilename) if ((kind in ['refined', 'fine']) and os.path.exists(misregFile + '_az.xml')): azpoly = self._insar.loadProduct(misregFile + '_az.xml') rgpoly = self._insar.loadProduct(misregFile + '_rg.xml') else: print(misregFile, " does not exist.") azpoly = None rgpoly = None rObj.azimuthOffsetsPoly = azpoly rObj.rangeOffsetsPoly = rgpoly rObj.imageIn = inimg #Since the app is based on geometry module we expect pixel-by-pixel offset #field offsetsDir = self.insar.offsetsDirname # Modified by V. Brancato 10.10.2019 #rgname = os.path.join(offsetsDir, self.insar.rangeOffsetFilename) if kind in ['coarse', 'refined']: azname = os.path.join(offsetsDir, self.insar.azimuthOffsetFilename) rgname = os.path.join(offsetsDir, self.insar.rangeOffsetFilename) flatten = True else: azname = os.path.join(offsetsDir, self.insar.azimuthRubbersheetFilename) if self.doRubbersheetingRange: print( 'Rubbersheeting in range is turned on, taking the cross-correlation offsets' ) print('Setting Flattening to False') rgname = os.path.join(offsetsDir, self.insar.rangeRubbersheetFilename) flatten = False else: print( 'Rubbersheeting in range is turned off, taking range geometric offsets' ) rgname = os.path.join(offsetsDir, self.insar.rangeOffsetFilename) flatten = True rngImg = isceobj.createImage() rngImg.load(rgname + '.xml') rngImg.setAccessMode('READ') aziImg = isceobj.createImage() aziImg.load(azname + '.xml') aziImg.setAccessMode('READ') width = rngImg.getWidth() length = rngImg.getLength() # Modified by V. Brancato 10.10.2019 #flatten = True rObj.flatten = flatten rObj.outputWidth = width rObj.outputLines = length rObj.residualRangeImage = rngImg rObj.residualAzimuthImage = aziImg if masterFrame is not None: rObj.startingRange = slaveFrame.startingRange rObj.referenceStartingRange = masterFrame.startingRange rObj.referenceSlantRangePixelSpacing = masterFrame.getInstrument( ).getRangePixelSize() rObj.referenceWavelength = masterFrame.getInstrument( ).getRadarWavelength() # preparing the output directory for coregistered slave slc coregDir = self.insar.coregDirname if os.path.isdir(coregDir): logger.info('Geometry directory {0} already exists.'.format(coregDir)) else: os.makedirs(coregDir) # output file name of the coregistered slave slc img = slaveFrame.getImage() if kind == 'coarse': coregFilename = os.path.join(coregDir, self._insar.coarseCoregFilename) elif kind == 'refined': coregFilename = os.path.join(coregDir, self._insar.refinedCoregFilename) elif kind == 'fine': coregFilename = os.path.join(coregDir, self._insar.fineCoregFilename) else: print('Exception: Should not have gotten to this stage') imgOut = isceobj.createSlcImage() imgOut.setWidth(width) imgOut.filename = coregFilename imgOut.setAccessMode('write') rObj.resamp_slc(imageOut=imgOut) imgOut.renderHdr() return
def main(iargs=None): inps = cmdLineParse(iargs) ''' Estimate azimuth misregistration. ''' #catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name) #master = self._insar.loadProduct( self._insar.masterSlcProduct + '.xml' ) #minBurst, maxBurst = self._insar.commonMasterBurstLimits #slaveBurstStart, slaveBurstEnd = self._insar.commonSlaveBurstLimits esdPath = inps.esdDirname swathList = ut.getSwathList(esdPath) alks = inps.esdAzimuthLooks rlks = inps.esdRangeLooks #esdPath = esdPath.split() val = [] #for esddir in esdPath: for swath in swathList: esddir = os.path.join(esdPath, 'IW{0}'.format(swath)) freqFiles = glob.glob(os.path.join(esddir,'freq_??.bin')) freqFiles.sort() minBurst = int(os.path.basename(freqFiles[0]).split('.')[0][-2:]) maxBurst = int(os.path.basename(freqFiles[-1]).split('.')[0][-2:]) #maxBurst = maxBurst - 1 combIntName = os.path.join(esddir, 'combined.int') combFreqName = os.path.join(esddir, 'combined_freq.bin') combCorName = os.path.join(esddir, 'combined.cor') combOffName = os.path.join(esddir, 'combined.off') for ff in [combIntName, combFreqName, combCorName, combOffName]: if os.path.exists(ff): os.remove(ff) # val = [] lineCount = 0 for ii in range(minBurst, maxBurst): intname = os.path.join(esddir, 'overlap_%02d.%dalks_%drlks.int'%(ii+1, alks,rlks)) freqname = os.path.join(esddir, 'freq_%02d.%dalks_%drlks.bin'%(ii+1,alks,rlks)) corname = os.path.join(esddir, 'overlap_%02d.%dalks_%drlks.cor'%(ii+1, alks, rlks)) img = isceobj.createImage() img.load(intname + '.xml') width = img.getWidth() length = img.getLength() ifg = np.fromfile(intname, dtype=np.complex64).reshape((-1,width)) freq = np.fromfile(freqname, dtype=np.float32).reshape((-1,width)) cor = np.fromfile(corname, dtype=np.float32).reshape((-1,width)) with open(combIntName, 'ab') as fid: ifg.tofile(fid) with open(combFreqName, 'ab') as fid: freq.tofile(fid) with open(combCorName, 'ab') as fid: cor.tofile(fid) off = np.angle(ifg) / freq with open(combOffName, 'ab') as fid: off.astype(np.float32).tofile(fid) lineCount += length mask = (np.abs(ifg) > 0) * (cor > inps.esdCoherenceThreshold) vali = off[mask] val = np.hstack((val, vali)) img = isceobj.createIntImage() img.filename = combIntName img.setWidth(width) img.setAccessMode('READ') img.renderHdr() for fname in [combFreqName, combCorName, combOffName]: img = isceobj.createImage() img.bands = 1 img.scheme = 'BIP' img.dataType = 'FLOAT' img.filename = fname img.setWidth(width) img.setAccessMode('READ') img.renderHdr() if val.size == 0 : raise Exception('Coherence threshold too strict. No points left for reliable ESD estimate') medianval = np.median(val) meanval = np.mean(val) stdval = np.std(val) hist, bins = np.histogram(val, 50, normed=1) center = 0.5*(bins[:-1] + bins[1:]) debugplot = True try: import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt except: print('Matplotlib could not be imported. Skipping debug plot...') debugplot = False if debugplot: ####Plotting plt.figure() plt.bar(center, hist, align='center', width = 0.7*(bins[1] - bins[0])) plt.xlabel('Azimuth shift in pixels') plt.savefig( os.path.join(esddir, 'ESDmisregistration.png')) plt.close() # catalog.addItem('Median', medianval, 'esd') # catalog.addItem('Mean', meanval, 'esd') # catalog.addItem('Std', stdval, 'esd') # catalog.addItem('coherence threshold', self.esdCoherenceThreshold, 'esd') # catalog.addItem('number of coherent points', val.size, 'esd') # catalog.printToLog(logger, "runESD") # self._insar.procDoc.addAllFromCatalog(catalog) # slaveTimingCorrection = medianval * master.bursts[0].azimuthTimeInterval outputDir = os.path.dirname(inps.output) if not os.path.exists(outputDir): os.makedirs(outputDir) with open(inps.output, 'w') as f: f.write('median : '+str(medianval) +'\n') f.write('mean : '+str(meanval)+'\n') f.write('std : '+str(stdval)+'\n') f.write('coherence threshold : '+str(inps.esdCoherenceThreshold)+'\n') f.write('mumber of coherent points : '+str(len(val))+'\n')
def runUnwrap(infile, outfile, corfile, config, costMode=None, initMethod=None, defomax=None, initOnly=None): if costMode is None: costMode = 'DEFO' if initMethod is None: initMethod = 'MST' if defomax is None: defomax = 4.0 if initOnly is None: initOnly = False wrapName = infile unwrapName = outfile img = isceobj.createImage() img.load(infile + '.xml') wavelength = config['wavelength'] width = img.getWidth() length = img.getLength() earthRadius = config['earthRadius'] altitude = config['altitude'] rangeLooks = config['rglooks'] azimuthLooks = config['azlooks'] corrLooks = config['corrlooks'] maxComponents = 20 snp = Snaphu() snp.setInitOnly(initOnly) snp.setInput(wrapName) snp.setOutput(unwrapName) snp.setWidth(width) snp.setCostMode(costMode) snp.setEarthRadius(earthRadius) snp.setWavelength(wavelength) snp.setAltitude(altitude) snp.setCorrfile(corfile) snp.setInitMethod(initMethod) snp.setCorrLooks(corrLooks) snp.setMaxComponents(maxComponents) snp.setDefoMaxCycles(defomax) snp.setRangeLooks(rangeLooks) snp.setAzimuthLooks(azimuthLooks) snp.setCorFileFormat('FLOAT_DATA') snp.prepare() snp.unwrap() ######Render XML outImage = isceobj.Image.createUnwImage() outImage.setFilename(unwrapName) outImage.setWidth(width) outImage.setLength(length) outImage.setAccessMode('read') #outImage.createImage() outImage.renderHdr() outImage.renderVRT() #outImage.finalizeImage() #####Check if connected components was created if snp.dumpConnectedComponents: connImage = isceobj.Image.createImage() connImage.setFilename(unwrapName + '.conncomp') #At least one can query for the name used connImage.setWidth(width) connImage.setLength(length) connImage.setAccessMode('read') connImage.setDataType('BYTE') # connImage.createImage() connImage.renderHdr() connImage.renderVRT() # connImage.finalizeImage() return
def runTopoGPU(info, demImage, dop=None, nativedop=False, legendre=False): from isceobj.Planet.Planet import Planet from zerodop.GPUtopozero.GPUtopozero import PyTopozero from isceobj import Constants as CN from isceobj.Util.Poly2D import Poly2D from iscesys import DateTimeUtil as DTU ## TODO GPU does not support shadow and layover and local inc file generation full = False if not os.path.isdir(info.outdir): os.makedirs(info.outdir) # define variables to be used later on r0 = info.rangeFirstSample + ( (info.numberRangeLooks - 1) / 2) * info.slantRangePixelSpacing tbef = info.sensingStart + datetime.timedelta(seconds=( (info.numberAzimuthLooks - 1) / 2) / info.prf) pegHdg = np.radians(info.orbit.getENUHeading(tbef)) width = info.width // info.numberRangeLooks length = info.length // info.numberAzimuthLooks dr = info.slantRangePixelSpacing * info.numberRangeLooks # output file names latFilename = info.latFilename lonFilename = info.lonFilename losFilename = info.losFilename heightFilename = info.heightFilename incFilename = info.incFilename maskFilename = info.maskFilename # orbit interpolator if legendre: omethod = 2 # LEGENDRE INTERPOLATION else: omethod = 0 # HERMITE INTERPOLATION # tracking doppler specifications if nativedop and (dop is not None): try: coeffs = dop._coeffs except: coeffs = dop polyDoppler = Poly2D() polyDoppler.setWidth(width) polyDoppler.setLength(length) polyDoppler.initPoly(rangeOrder=len(coeffs) - 1, azimuthOrder=0, coeffs=[coeffs]) else: print('Zero doppler') polyDoppler = Poly2D(name='stripmapStack_dopplerPoly') polyDoppler.setWidth(width) polyDoppler.setLength(length) polyDoppler.setNormRange(1.0) polyDoppler.setNormAzimuth(1.0) polyDoppler.setMeanRange(0.0) polyDoppler.setMeanAzimuth(0.0) polyDoppler.initPoly(rangeOrder=0, azimuthOrder=0, coeffs=[[0.0]]) polyDoppler.createPoly2D() # dem demImage.setCaster('read', 'FLOAT') demImage.createImage() # slant range file slantRangeImage = Poly2D() slantRangeImage.setWidth(width) slantRangeImage.setLength(length) slantRangeImage.setNormRange(1.0) slantRangeImage.setNormAzimuth(1.0) slantRangeImage.setMeanRange(0.0) slantRangeImage.setMeanAzimuth(0.0) slantRangeImage.initPoly(rangeOrder=1, azimuthOrder=0, coeffs=[[r0, dr]]) slantRangeImage.createPoly2D() # lat file latImage = isceobj.createImage() accessMode = 'write' dataType = 'DOUBLE' latImage.initImage(latFilename, accessMode, width, dataType) latImage.createImage() # lon file lonImage = isceobj.createImage() lonImage.initImage(lonFilename, accessMode, width, dataType) lonImage.createImage() # LOS file losImage = isceobj.createImage() dataType = 'FLOAT' bands = 2 scheme = 'BIL' losImage.initImage(losFilename, accessMode, width, dataType, bands=bands, scheme=scheme) losImage.setCaster('write', 'DOUBLE') losImage.createImage() # height file heightImage = isceobj.createImage() dataType = 'DOUBLE' heightImage.initImage(heightFilename, accessMode, width, dataType) heightImage.createImage() # add inc and mask file if requested if full: incImage = isceobj.createImage() dataType = 'FLOAT' incImage.initImage(incFilename, accessMode, width, dataType, bands=bands, scheme=scheme) incImage.createImage() incImagePtr = incImage.getImagePointer() maskImage = isceobj.createImage() dataType = 'BYTE' bands = 1 maskImage.initImage(maskFilename, accessMode, width, dataType, bands=bands, scheme=scheme) maskImage.createImage() maskImagePtr = maskImage.getImagePointer() else: incImagePtr = 0 maskImagePtr = 0 # initalize planet elp = Planet(pname='Earth').ellipsoid # initialize topo object and fill with parameters topo = PyTopozero() topo.set_firstlat(demImage.getFirstLatitude()) topo.set_firstlon(demImage.getFirstLongitude()) topo.set_deltalat(demImage.getDeltaLatitude()) topo.set_deltalon(demImage.getDeltaLongitude()) topo.set_major(elp.a) topo.set_eccentricitySquared(elp.e2) topo.set_rSpace(info.slantRangePixelSpacing) topo.set_r0(r0) topo.set_pegHdg(pegHdg) topo.set_prf(info.prf) topo.set_t0(DTU.seconds_since_midnight(tbef)) topo.set_wvl(info.radarWavelength) topo.set_thresh(.05) topo.set_demAccessor(demImage.getImagePointer()) topo.set_dopAccessor(polyDoppler.getPointer()) topo.set_slrngAccessor(slantRangeImage.getPointer()) topo.set_latAccessor(latImage.getImagePointer()) topo.set_lonAccessor(lonImage.getImagePointer()) topo.set_losAccessor(losImage.getImagePointer()) topo.set_heightAccessor(heightImage.getImagePointer()) topo.set_incAccessor(incImagePtr) topo.set_maskAccessor(maskImagePtr) topo.set_numIter(25) topo.set_idemWidth(demImage.getWidth()) topo.set_idemLength(demImage.getLength()) topo.set_ilrl(info.lookSide) topo.set_extraIter(10) topo.set_length(length) topo.set_width(width) topo.set_nRngLooks(info.numberRangeLooks) topo.set_nAzLooks(info.numberAzimuthLooks) topo.set_demMethod(5) # BIQUINTIC METHOD topo.set_orbitMethod(omethod) # Need to simplify orbit stuff later nvecs = len(info.orbit.stateVectors.list) topo.set_orbitNvecs(nvecs) topo.set_orbitBasis(1) # Is this ever different? topo.createOrbit( ) # Initializes the empty orbit to the right allocated size count = 0 for sv in info.orbit.stateVectors.list: td = DTU.seconds_since_midnight(sv.getTime()) pos = sv.getPosition() vel = sv.getVelocity() topo.set_orbitVector(count, td, pos[0], pos[1], pos[2], vel[0], vel[1], vel[2]) count += 1 # run topo topo.runTopo() # close the written files and add description etc # lat file latImage.addDescription('Pixel-by-pixel latitude in degrees.') latImage.finalizeImage() latImage.renderHdr() # lon file lonImage.addDescription('Pixel-by-pixel longitude in degrees.') lonImage.finalizeImage() lonImage.renderHdr() # height file heightImage.addDescription('Pixel-by-pixel height in meters.') heightImage.finalizeImage() heightImage.renderHdr() # los file descr = '''Two channel Line-Of-Sight geometry image (all angles in degrees). Represents vector drawn from target to platform. Channel 1: Incidence angle measured from vertical at target (always +ve). Channel 2: Azimuth angle measured from North in Anti-clockwise direction.''' losImage.setImageType('bil') losImage.addDescription(descr) losImage.finalizeImage() losImage.renderHdr() # dem/ height file demImage.finalizeImage() # adding in additional files if requested if full: descr = '''Two channel angle file. Channel 1: Angle between ray to target and the vertical at the sensor Channel 2: Local incidence angle accounting for DEM slope at target''' incImage.addDescription(descr) incImage.finalizeImage() incImage.renderHdr() descr = 'Radar shadow-layover mask. 1 - Radar Shadow. 2 - Radar Layover. 3 - Both.' maskImage.addDescription(descr) maskImage.finalizeImage() maskImage.renderHdr() if slantRangeImage: try: slantRangeImage.finalizeImage() except: pass
def runMultilook(in_dir, out_dir, alks, rlks, in_ext='.rdr', out_ext='.rdr', method='gdal', fbase_list=[ 'hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask' ]): """ Multilook geometry files. """ from iscesys.Parsers.FileParserFactory import createFileParser from mroipac.looks.Looks import Looks msg = 'generate multilooked geometry files with alks={} and rlks={}'.format( alks, rlks) if method == 'isce': msg += ' using mroipac.looks.Looks() ...' else: msg += ' using gdal.Translate() ...' print('-' * 50 + '\n' + msg) # create 'geom_master' directory os.makedirs(out_dir, exist_ok=True) # multilook files one by one for fbase in fbase_list: in_file = os.path.join(in_dir, '{}{}'.format(fbase, in_ext)) out_file = os.path.join(out_dir, '{}{}'.format(fbase, out_ext)) if all(os.path.isfile(in_file + ext) for ext in ['', '.vrt', '.xml']): print('multilook {}'.format(in_file)) # option 1 - Looks module (isce) if method == 'isce': xmlProp = createFileParser('xml').parse(in_file + '.xml')[0] if ('image_type' in xmlProp and xmlProp['image_type'] == 'dem'): inImage = isceobj.createDemImage() else: inImage = isceobj.createImage() inImage.load(in_file + '.xml') inImage.filename = in_file lkObj = Looks() lkObj.setDownLooks(alks) lkObj.setAcrossLooks(rlks) lkObj.setInputImage(inImage) lkObj.setOutputFilename(out_file) lkObj.looks() # option 2 - gdal_translate (gdal) elif method == 'gdal': ds = gdal.Open(in_file, gdal.GA_ReadOnly) in_wid = ds.RasterXSize in_len = ds.RasterYSize out_wid = int(in_wid / rlks) out_len = int(in_len / alks) src_wid = out_wid * rlks src_len = out_len * alks options_str = '-of ENVI -a_nodata 0 -outsize {ox} {oy} -srcwin 0 0 {sx} {sy} '.format( ox=out_wid, oy=out_len, sx=src_wid, sy=src_len) gdal.Translate(out_file, ds, options=options_str) # generate ISCE .xml file if not os.path.isfile(out_file + '.xml'): cmd = 'gdal2isce_xml.py -i {}.vrt'.format(out_file) print(cmd) os.system(cmd) else: raise ValueError( 'un-supported multilook method: {}'.format(method)) # copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master # to facilitate the number of looks extraction # the file path inside .xml file is not, but should, updated if in_file != out_file + '.full': shutil.copy(in_file + '.xml', out_file + '.full.xml') shutil.copy(in_file + '.vrt', out_file + '.full.vrt') return out_dir
def resampleSlc(self, masterFrame, slaveFrame, imageSlc2, radarWavelength, coregDir, azoffname, rgoffname, azpoly=None, rgpoly=None, misreg=False): logger.info("Resampling slave SLC") imageSlc1 = masterFrame.getImage().filename inimg = isceobj.createSlcImage() inimg.load(imageSlc2 + '.xml') inimg.setAccessMode('READ') prf = slaveFrame.PRF doppler = slaveFrame._dopplerVsPixel factor = 1.0 # this should be zero for zero Doppler SLC. coeffs = [factor * 2 * np.pi * val / prf / prf for val in doppler] dpoly = Poly2D() dpoly.initPoly(rangeOrder=len(coeffs) - 1, azimuthOrder=0, coeffs=[coeffs]) rObj = stdproc.createResamp_slc() rObj.slantRangePixelSpacing = slaveFrame.getInstrument().getRangePixelSize( ) #rObj.radarWavelength = slaveFrame.getInstrument().getRadarWavelength() rObj.radarWavelength = radarWavelength rObj.dopplerPoly = dpoly # for now let's start with None polynomial. Later this should change to # the misregistration polynomial rObj.azimuthOffsetsPoly = azpoly rObj.rangeOffsetsPoly = rgpoly rObj.imageIn = inimg rngImg = isceobj.createImage() rngImg.load(rgoffname + '.xml') rngImg.setAccessMode('READ') aziImg = isceobj.createImage() aziImg.load(azoffname + '.xml') aziImg.setAccessMode('READ') width = rngImg.getWidth() length = rngImg.getLength() # Modified by V. Brancato on 10.14.2019 (if Rubbersheeting in range is turned on, flatten the interferogram during cross-correlation) if not self.doRubbersheetingRange: print( 'Rubber sheeting in range is turned off, flattening the interferogram during resampling' ) flatten = True print(flatten) else: print( 'Rubber sheeting in range is turned on, flattening the interferogram during interferogram formation' ) flatten = False print(flatten) # end of Modification rObj.flatten = flatten rObj.outputWidth = width rObj.outputLines = length rObj.residualRangeImage = rngImg rObj.residualAzimuthImage = aziImg if masterFrame is not None: rObj.startingRange = slaveFrame.startingRange rObj.referenceStartingRange = masterFrame.startingRange rObj.referenceSlantRangePixelSpacing = masterFrame.getInstrument( ).getRangePixelSize() rObj.referenceWavelength = radarWavelength # preparing the output directory for coregistered slave slc #coregDir = self.insar.coregDirname os.makedirs(coregDir, exist_ok=True) # output file name of the coregistered slave slc img = slaveFrame.getImage() coregFilename = os.path.join(coregDir, os.path.basename(img.filename)) imgOut = isceobj.createSlcImage() imgOut.setWidth(width) imgOut.filename = coregFilename imgOut.setAccessMode('write') rObj.resamp_slc(imageOut=imgOut) imgOut.renderHdr() return coregFilename