Example #1
0
def formInterferogram(slcReference, slcSecondary, interferogram, amplitude,
                      numberRangeLooks, numberAzimuthLooks):
    import numpy as np
    import isce, isceobj
    from isceobj.Alos2Proc.Alos2ProcPublic import multilook
    from isceobj.Alos2Proc.Alos2ProcPublic import create_xml

    img = isceobj.createImage()
    img.load(slcReference + '.xml')
    width = img.width
    length = img.length

    width2 = int(width / numberRangeLooks)
    length2 = int(length / numberAzimuthLooks)

    fpRef = open(slcReference, 'rb')
    fpSec = open(slcSecondary, 'rb')
    fpInf = open(interferogram, 'wb')
    fpAmp = open(amplitude, 'wb')

    for k in range(length2):
        if (((k + 1) % 200) == 0):
            print("processing line %6d of %6d" % (k + 1, length2),
                  end='\r',
                  flush=True)
        ref = np.fromfile(fpRef,
                          dtype=np.complex64,
                          count=numberAzimuthLooks * width).reshape(
                              numberAzimuthLooks, width)
        sec = np.fromfile(fpSec,
                          dtype=np.complex64,
                          count=numberAzimuthLooks * width).reshape(
                              numberAzimuthLooks, width)
        inf = multilook(ref * np.conjugate(sec),
                        numberAzimuthLooks,
                        numberRangeLooks,
                        mean=False)
        amp = np.sqrt(multilook(ref.real*ref.real+ref.imag*ref.imag, numberAzimuthLooks, numberRangeLooks, mean=False)) + 1j * \
              np.sqrt(multilook(sec.real*sec.real+sec.imag*sec.imag, numberAzimuthLooks, numberRangeLooks, mean=False))
        index = np.nonzero((np.real(amp) == 0) + (np.imag(amp) == 0))
        amp[index] = 0
        inf.tofile(fpInf)
        amp.tofile(fpAmp)
    print("processing line %6d of %6d" % (length2, length2))
    fpRef.close()
    fpSec.close()
    fpInf.close()
    fpAmp.close()

    create_xml(interferogram, width2, length2, 'int')
    create_xml(amplitude, width2, length2, 'amp')
Example #2
0
def swathMosaic(frame,
                inputFiles,
                outputfile,
                rangeOffsets,
                azimuthOffsets,
                numberOfRangeLooks,
                numberOfAzimuthLooks,
                updateFrame=False,
                phaseCompensation=False,
                pcRangeLooks=1,
                pcAzimuthLooks=4,
                filt=False,
                resamplingMethod=0):
    '''
    mosaic swaths
    
    frame:                 frame
    inputFiles:            input file list
    output file:           output mosaic file
    rangeOffsets:          range offsets
    azimuthOffsets:        azimuth offsets
    numberOfRangeLooks:    number of range looks of the input files
    numberOfAzimuthLooks:  number of azimuth looks of the input files
    phaseCompensation:     whether do phase compensation for each swath
    pcRangeLooks:          number of range looks to take when compute swath phase difference
    pcAzimuthLooks:        number of azimuth looks to take when compute swath phase difference
    filt:                  whether do filtering when compute swath phase difference
    resamplingMethod:      0: amp resampling. 1: int resampling.
    '''
    from contrib.alos2proc_f.alos2proc_f import rect_with_looks
    from contrib.alos2proc.alos2proc import mosaicsubswath
    from isceobj.Alos2Proc.Alos2ProcPublic import multilook
    from isceobj.Alos2Proc.Alos2ProcPublic import cal_coherence_1
    from isceobj.Alos2Proc.Alos2ProcPublic import filterInterferogram

    numberOfSwaths = len(frame.swaths)
    swaths = frame.swaths

    rangeScale = []
    azimuthScale = []
    rectWidth = []
    rectLength = []
    for i in range(numberOfSwaths):
        rangeScale.append(swaths[0].rangePixelSize / swaths[i].rangePixelSize)
        azimuthScale.append(swaths[0].azimuthLineInterval /
                            swaths[i].azimuthLineInterval)
        if i == 0:
            rectWidth.append(
                int(swaths[i].numberOfSamples / numberOfRangeLooks))
            rectLength.append(
                int(swaths[i].numberOfLines / numberOfAzimuthLooks))
        else:
            rectWidth.append(
                int(1.0 / rangeScale[i] *
                    int(swaths[i].numberOfSamples / numberOfRangeLooks)))
            rectLength.append(
                int(1.0 / azimuthScale[i] *
                    int(swaths[i].numberOfLines / numberOfAzimuthLooks)))

    #convert original offset to offset for images with looks
    #use list instead of np.array to make it consistent with the rest of the code
    rangeOffsets1 = [i / numberOfRangeLooks for i in rangeOffsets]
    azimuthOffsets1 = [i / numberOfAzimuthLooks for i in azimuthOffsets]

    #get offset relative to the first frame
    rangeOffsets2 = [0.0]
    azimuthOffsets2 = [0.0]
    for i in range(1, numberOfSwaths):
        rangeOffsets2.append(0.0)
        azimuthOffsets2.append(0.0)
        for j in range(1, i + 1):
            rangeOffsets2[i] += rangeOffsets1[j]
            azimuthOffsets2[i] += azimuthOffsets1[j]

    #resample each swath
    rinfs = []
    for i, inf in enumerate(inputFiles):
        rinfs.append("{}_{}{}".format(
            os.path.splitext(os.path.basename(inf))[0], i,
            os.path.splitext(os.path.basename(inf))[1]))
        #do not resample first swath
        if i == 0:
            if os.path.isfile(rinfs[i]):
                os.remove(rinfs[i])
            os.symlink(inf, rinfs[i])
        else:
            infImg = isceobj.createImage()
            infImg.load(inf + '.xml')
            rangeOffsets2Frac = rangeOffsets2[i] - int(rangeOffsets2[i])
            azimuthOffsets2Frac = azimuthOffsets2[i] - int(azimuthOffsets2[i])

            if resamplingMethod == 0:
                rect_with_looks(inf, rinfs[i], infImg.width, infImg.length,
                                rectWidth[i], rectLength[i], rangeScale[i],
                                0.0, 0.0, azimuthScale[i],
                                rangeOffsets2Frac * rangeScale[i],
                                azimuthOffsets2Frac * azimuthScale[i], 1, 1, 1,
                                1, 'COMPLEX', 'Bilinear')
            elif resamplingMethod == 1:
                #decompose amplitude and phase
                phaseFile = 'phase'
                amplitudeFile = 'amplitude'
                data = np.fromfile(inf, dtype=np.complex64).reshape(
                    infImg.length, infImg.width)
                phase = np.exp(np.complex64(1j) * np.angle(data))
                phase[np.nonzero(data == 0)] = 0
                phase.astype(np.complex64).tofile(phaseFile)
                amplitude = np.absolute(data)
                amplitude.astype(np.float32).tofile(amplitudeFile)

                #resampling
                phaseRectFile = 'phaseRect'
                amplitudeRectFile = 'amplitudeRect'
                rect_with_looks(phaseFile, phaseRectFile, infImg.width,
                                infImg.length, rectWidth[i], rectLength[i],
                                rangeScale[i], 0.0, 0.0, azimuthScale[i],
                                rangeOffsets2Frac * rangeScale[i],
                                azimuthOffsets2Frac * azimuthScale[i], 1, 1, 1,
                                1, 'COMPLEX', 'Sinc')
                rect_with_looks(amplitudeFile, amplitudeRectFile, infImg.width,
                                infImg.length, rectWidth[i], rectLength[i],
                                rangeScale[i], 0.0, 0.0, azimuthScale[i],
                                rangeOffsets2Frac * rangeScale[i],
                                azimuthOffsets2Frac * azimuthScale[i], 1, 1, 1,
                                1, 'REAL', 'Bilinear')

                #recombine amplitude and phase
                phase = np.fromfile(phaseRectFile, dtype=np.complex64).reshape(
                    rectLength[i], rectWidth[i])
                amplitude = np.fromfile(amplitudeRectFile,
                                        dtype=np.float32).reshape(
                                            rectLength[i], rectWidth[i])
                (phase * amplitude).astype(np.complex64).tofile(rinfs[i])

                #tidy up
                os.remove(phaseFile)
                os.remove(amplitudeFile)
                os.remove(phaseRectFile)
                os.remove(amplitudeRectFile)

    #determine output width and length
    #actually no need to calculate in range direction
    xs = []
    xe = []
    ys = []
    ye = []
    for i in range(numberOfSwaths):
        if i == 0:
            xs.append(0)
            xe.append(rectWidth[i] - 1)
            ys.append(0)
            ye.append(rectLength[i] - 1)
        else:
            xs.append(0 - int(rangeOffsets2[i]))
            xe.append(rectWidth[i] - 1 - int(rangeOffsets2[i]))
            ys.append(0 - int(azimuthOffsets2[i]))
            ye.append(rectLength[i] - 1 - int(azimuthOffsets2[i]))

    (xmin, xminIndex) = min((v, i) for i, v in enumerate(xs))
    (xmax, xmaxIndex) = max((v, i) for i, v in enumerate(xe))
    (ymin, yminIndex) = min((v, i) for i, v in enumerate(ys))
    (ymax, ymaxIndex) = max((v, i) for i, v in enumerate(ye))

    outWidth = xmax - xmin + 1
    outLength = ymax - ymin + 1

    #prepare offset for mosaicing
    rangeOffsets3 = []
    azimuthOffsets3 = []
    for i in range(numberOfSwaths):
        azimuthOffsets3.append(
            int(azimuthOffsets2[i]) - int(azimuthOffsets2[yminIndex]))
        if i != 0:
            rangeOffsets3.append(
                int(rangeOffsets2[i]) - int(rangeOffsets2[i - 1]))
        else:
            rangeOffsets3.append(0)

    delta = int(30 / numberOfRangeLooks)

    #compute compensation phase for each swath
    diffMean2 = [0.0 for i in range(numberOfSwaths)]
    if phaseCompensation:
        #compute swath phase offset
        diffMean = [0.0]
        for i in range(1, numberOfSwaths):
            #all indexes start with zero, all the computed start/end sample/line indexes are included.

            #no need to add edge here, as we are going to find first/last nonzero sample/lines later
            #edge = delta
            edge = 0

            #image i-1
            startSample1 = edge + 0 - int(rangeOffsets2[i]) + int(
                rangeOffsets2[i - 1])
            endSample1 = -edge + rectWidth[i - 1] - 1
            startLine1 = edge + max(
                0 - int(azimuthOffsets2[i]) + int(azimuthOffsets2[i - 1]), 0)
            endLine1 = -edge + min(
                rectLength[i] - 1 - int(azimuthOffsets2[i]) +
                int(azimuthOffsets2[i - 1]), rectLength[i - 1] - 1)
            data1 = readImage(rinfs[i - 1], rectWidth[i - 1],
                              rectLength[i - 1], startSample1, endSample1,
                              startLine1, endLine1)

            #image i
            startSample2 = edge + 0
            endSample2 = -edge + rectWidth[i - 1] - 1 - int(
                rangeOffsets2[i - 1]) + int(rangeOffsets2[i])
            startLine2 = edge + max(
                0 - int(azimuthOffsets2[i - 1]) + int(azimuthOffsets2[i]), 0)
            endLine2 = -edge + min(
                rectLength[i - 1] - 1 - int(azimuthOffsets2[i - 1]) +
                int(azimuthOffsets2[i]), rectLength[i] - 1)
            data2 = readImage(rinfs[i], rectWidth[i], rectLength[i],
                              startSample2, endSample2, startLine2, endLine2)

            #remove edge due to incomplete covolution in resampling
            edge = 9
            (startLine0, endLine0, startSample0, endSample0) = findNonzero(
                np.logical_and((data1 != 0), (data2 != 0)))
            data1 = data1[startLine0 + edge:endLine0 + 1 - edge,
                          startSample0 + edge:endSample0 + 1 - edge]
            data2 = data2[startLine0 + edge:endLine0 + 1 - edge,
                          startSample0 + edge:endSample0 + 1 - edge]

            #take looks
            data1 = multilook(data1, pcAzimuthLooks, pcRangeLooks)
            data2 = multilook(data2, pcAzimuthLooks, pcRangeLooks)

            #filter
            if filt:
                data1 /= (np.absolute(data1) + (data1 == 0))
                data2 /= (np.absolute(data2) + (data2 == 0))
                data1 = filterInterferogram(data1, 3.0, 64, 1)
                data2 = filterInterferogram(data2, 3.0, 64, 1)

            #get difference
            corth = 0.87
            if filt:
                corth = 0.90
            diffMean0 = 0.0
            for k in range(5):
                dataDiff = data1 * np.conj(data2)
                cor = cal_coherence_1(dataDiff, win=3)
                index = np.nonzero(np.logical_and(cor > corth, dataDiff != 0))
                if index[0].size < 100:
                    diffMean0 = 0.0
                    print(
                        '\n\nWARNING: too few high coherence pixels for swath phase difference estimation between swath {} and {}'
                        .format(i - 1, i))
                    print('       : first swath swath number: 0\n\n')
                    break
                angle = np.mean(np.angle(dataDiff[index]), dtype=np.float64)
                diffMean0 += angle
                data2 *= np.exp(np.complex64(1j) * angle)
                print('phase offset: %15.12f rad after loop: %3d' %
                      (diffMean0, k))

                DEBUG = False
                if DEBUG and (k == 0):
                    from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
                    (lengthxx, widthxx) = dataDiff.shape
                    filtnamePrefix = 'subswath{}_subswath{}_loop{}'.format(
                        frame.swaths[i - 1].swathNumber,
                        frame.swaths[i].swathNumber, k)
                    cor.astype(np.float32).tofile(filtnamePrefix + '.cor')
                    create_xml(filtnamePrefix + '.cor', widthxx, lengthxx,
                               'float')
                    dataDiff.astype(np.complex64).tofile(filtnamePrefix +
                                                         '.int')
                    create_xml(filtnamePrefix + '.int', widthxx, lengthxx,
                               'int')

            diffMean.append(diffMean0)
            print('phase offset: subswath{} - subswath{}: {}'.format(
                frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber,
                diffMean0))

        for i in range(1, numberOfSwaths):
            for j in range(1, i + 1):
                diffMean2[i] += diffMean[j]

    #mosaic swaths
    diffflag = 1
    oflag = [0 for i in range(numberOfSwaths)]
    mosaicsubswath(outputfile, outWidth, outLength, delta, diffflag,
                   numberOfSwaths, rinfs, rectWidth, rangeOffsets3,
                   azimuthOffsets3, diffMean2, oflag)
    #remove tmp files
    for x in rinfs:
        os.remove(x)

    #update frame parameters
    if updateFrame:
        #mosaic size
        frame.numberOfSamples = outWidth
        frame.numberOfLines = outLength
        #NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
        #range parameters
        frame.startingRange = frame.swaths[0].startingRange
        frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate
        frame.rangePixelSize = frame.swaths[0].rangePixelSize
        #azimuth parameters
        azimuthTimeOffset = -max([
            int(x) for x in azimuthOffsets2
        ]) * numberOfAzimuthLooks * frame.swaths[0].azimuthLineInterval
        frame.sensingStart = frame.swaths[0].sensingStart + datetime.timedelta(
            seconds=azimuthTimeOffset)
        frame.prf = frame.swaths[0].prf
        frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize
        frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval
Example #3
0
def swathMosaic(frame,
                inputFiles,
                outputfile,
                rangeOffsets,
                azimuthOffsets,
                numberOfRangeLooks,
                numberOfAzimuthLooks,
                updateFrame=False,
                phaseCompensation=False,
                phaseDiff=None,
                phaseDiffFixed=None,
                snapThreshold=None,
                snapSwath=None,
                pcRangeLooks=1,
                pcAzimuthLooks=4,
                filt=False,
                resamplingMethod=0):
    '''
    mosaic swaths
    
    #PART 1. REGULAR INPUT PARAMTERS
    frame:                 frame
    inputFiles:            input file list
    outputfile:            output mosaic file
    rangeOffsets:          range offsets
    azimuthOffsets:        azimuth offsets
    numberOfRangeLooks:    number of range looks of the input files
    numberOfAzimuthLooks:  number of azimuth looks of the input files
    updateFrame:           whether update frame parameters

    #PART 2. PARAMETERS FOR COMPUTING PHASE DIFFERENCE BETWEEN SUBSWATHS
    phaseCompensation:     whether do phase compensation for each swath
    phaseDiff:             pre-computed compensation phase for each swath
    phaseDiffFixed:        if provided, the estimated value will snap to one of these values, which is nearest to the estimated one.
    snapThreshold:         this is used with phaseDiffFixed
    snapSwath:             indicate whether snap to fixed values for each swath phase diff, must be specified if phaseDiffFixed!=None
    pcRangeLooks:          number of range looks to take when compute swath phase difference
    pcAzimuthLooks:        number of azimuth looks to take when compute swath phase difference
    filt:                  whether do filtering when compute swath phase difference

    #PART 3. RESAMPLING METHOD
    resamplingMethod:      0: amp resampling. 1: int resampling.
    '''
    from contrib.alos2proc_f.alos2proc_f import rect_with_looks
    from contrib.alos2proc.alos2proc import mosaicsubswath
    from isceobj.Alos2Proc.Alos2ProcPublic import multilook
    from isceobj.Alos2Proc.Alos2ProcPublic import cal_coherence_1
    from isceobj.Alos2Proc.Alos2ProcPublic import filterInterferogram
    from isceobj.Alos2Proc.Alos2ProcPublic import computePhaseDiff
    from isceobj.Alos2Proc.Alos2ProcPublic import snap

    numberOfSwaths = len(frame.swaths)
    swaths = frame.swaths

    rangeScale = []
    azimuthScale = []
    rectWidth = []
    rectLength = []
    for i in range(numberOfSwaths):
        rangeScale.append(swaths[0].rangePixelSize / swaths[i].rangePixelSize)
        azimuthScale.append(swaths[0].azimuthLineInterval /
                            swaths[i].azimuthLineInterval)
        if i == 0:
            rectWidth.append(
                int(swaths[i].numberOfSamples / numberOfRangeLooks))
            rectLength.append(
                int(swaths[i].numberOfLines / numberOfAzimuthLooks))
        else:
            rectWidth.append(
                round(1.0 / rangeScale[i] *
                      int(swaths[i].numberOfSamples / numberOfRangeLooks)))
            rectLength.append(
                round(1.0 / azimuthScale[i] *
                      int(swaths[i].numberOfLines / numberOfAzimuthLooks)))
            #rectWidth.append( int(1.0 / rangeScale[i] * int(swaths[i].numberOfSamples / numberOfRangeLooks)) )
            #rectLength.append( int(1.0 / azimuthScale[i] * int(swaths[i].numberOfLines / numberOfAzimuthLooks)) )

    #convert original offset to offset for images with looks
    #use list instead of np.array to make it consistent with the rest of the code
    rangeOffsets1 = [i / numberOfRangeLooks for i in rangeOffsets]
    azimuthOffsets1 = [i / numberOfAzimuthLooks for i in azimuthOffsets]

    #get offset relative to the first frame
    rangeOffsets2 = [0.0]
    azimuthOffsets2 = [0.0]
    for i in range(1, numberOfSwaths):
        rangeOffsets2.append(0.0)
        azimuthOffsets2.append(0.0)
        for j in range(1, i + 1):
            rangeOffsets2[i] += rangeOffsets1[j]
            azimuthOffsets2[i] += azimuthOffsets1[j]

    #resample each swath
    rinfs = []
    for i, inf in enumerate(inputFiles):
        rinfs.append("{}_{}{}".format(
            os.path.splitext(os.path.basename(inf))[0], i,
            os.path.splitext(os.path.basename(inf))[1]))
        #do not resample first swath
        if i == 0:
            if os.path.isfile(rinfs[i]):
                os.remove(rinfs[i])
            os.symlink(inf, rinfs[i])
        else:
            #no need to resample
            if (abs(rangeOffsets2[i] - round(rangeOffsets2[i])) < 0.0001) and (
                    abs(azimuthOffsets2[i] - round(azimuthOffsets2[i])) <
                    0.0001):
                if os.path.isfile(rinfs[i]):
                    os.remove(rinfs[i])
                os.symlink(inf, rinfs[i])
                #all of the following use of rangeOffsets2/azimuthOffsets2 is inside int(), we do the following in case it is like
                #4.99999999999...
                rangeOffsets2[i] = round(rangeOffsets2[i])
                azimuthOffsets2[i] = round(azimuthOffsets2[i])
            else:
                infImg = isceobj.createImage()
                infImg.load(inf + '.xml')
                rangeOffsets2Frac = rangeOffsets2[i] - int(rangeOffsets2[i])
                azimuthOffsets2Frac = azimuthOffsets2[i] - int(
                    azimuthOffsets2[i])

                if resamplingMethod == 0:
                    rect_with_looks(inf, rinfs[i], infImg.width, infImg.length,
                                    rectWidth[i], rectLength[i], rangeScale[i],
                                    0.0, 0.0, azimuthScale[i],
                                    rangeOffsets2Frac * rangeScale[i],
                                    azimuthOffsets2Frac * azimuthScale[i], 1,
                                    1, 1, 1, 'COMPLEX', 'Bilinear')
                elif resamplingMethod == 1:
                    #decompose amplitude and phase
                    phaseFile = 'phase'
                    amplitudeFile = 'amplitude'
                    data = np.fromfile(inf, dtype=np.complex64).reshape(
                        infImg.length, infImg.width)
                    phase = np.exp(np.complex64(1j) * np.angle(data))
                    phase[np.nonzero(data == 0)] = 0
                    phase.astype(np.complex64).tofile(phaseFile)
                    amplitude = np.absolute(data)
                    amplitude.astype(np.float32).tofile(amplitudeFile)

                    #resampling
                    phaseRectFile = 'phaseRect'
                    amplitudeRectFile = 'amplitudeRect'
                    rect_with_looks(phaseFile, phaseRectFile, infImg.width,
                                    infImg.length, rectWidth[i], rectLength[i],
                                    rangeScale[i], 0.0, 0.0, azimuthScale[i],
                                    rangeOffsets2Frac * rangeScale[i],
                                    azimuthOffsets2Frac * azimuthScale[i], 1,
                                    1, 1, 1, 'COMPLEX', 'Sinc')
                    rect_with_looks(amplitudeFile, amplitudeRectFile,
                                    infImg.width, infImg.length, rectWidth[i],
                                    rectLength[i], rangeScale[i], 0.0, 0.0,
                                    azimuthScale[i],
                                    rangeOffsets2Frac * rangeScale[i],
                                    azimuthOffsets2Frac * azimuthScale[i], 1,
                                    1, 1, 1, 'REAL', 'Bilinear')

                    #recombine amplitude and phase
                    phase = np.fromfile(phaseRectFile,
                                        dtype=np.complex64).reshape(
                                            rectLength[i], rectWidth[i])
                    amplitude = np.fromfile(amplitudeRectFile,
                                            dtype=np.float32).reshape(
                                                rectLength[i], rectWidth[i])
                    (phase * amplitude).astype(np.complex64).tofile(rinfs[i])

                    #tidy up
                    os.remove(phaseFile)
                    os.remove(amplitudeFile)
                    os.remove(phaseRectFile)
                    os.remove(amplitudeRectFile)

    #determine output width and length
    #actually no need to calculate in range direction
    xs = []
    xe = []
    ys = []
    ye = []
    for i in range(numberOfSwaths):
        if i == 0:
            xs.append(0)
            xe.append(rectWidth[i] - 1)
            ys.append(0)
            ye.append(rectLength[i] - 1)
        else:
            xs.append(0 - int(rangeOffsets2[i]))
            xe.append(rectWidth[i] - 1 - int(rangeOffsets2[i]))
            ys.append(0 - int(azimuthOffsets2[i]))
            ye.append(rectLength[i] - 1 - int(azimuthOffsets2[i]))

    (xmin, xminIndex) = min((v, i) for i, v in enumerate(xs))
    (xmax, xmaxIndex) = max((v, i) for i, v in enumerate(xe))
    (ymin, yminIndex) = min((v, i) for i, v in enumerate(ys))
    (ymax, ymaxIndex) = max((v, i) for i, v in enumerate(ye))

    outWidth = xmax - xmin + 1
    outLength = ymax - ymin + 1

    #prepare offset for mosaicing
    rangeOffsets3 = []
    azimuthOffsets3 = []
    for i in range(numberOfSwaths):
        azimuthOffsets3.append(
            int(azimuthOffsets2[i]) - int(azimuthOffsets2[yminIndex]))
        if i != 0:
            rangeOffsets3.append(
                int(rangeOffsets2[i]) - int(rangeOffsets2[i - 1]))
        else:
            rangeOffsets3.append(0)

    delta = int(30 / numberOfRangeLooks)

    #compute compensation phase for each swath
    diffMean2 = [0.0 for i in range(numberOfSwaths)]
    phaseDiffEst = [None for i in range(numberOfSwaths)]
    #True if:
    #  (1) used diff phase from input
    #  (2) used estimated diff phase after snapping to a fixed diff phase provided
    #False if:
    #  (1) used purely estimated diff phase
    phaseDiffSource = ['estimated' for i in range(numberOfSwaths)]
    # 1. 'estimated': estimated from subswath overlap
    # 2. 'estimated+snap': estimated from subswath overlap and snap to a fixed value
    # 3. 'input': pre-computed
    # confidence level: 3 > 2 > 1
    numberOfValidSamples = [None for i in range(numberOfSwaths)]
    # only record when (filt == False) and (index[0].size >= 4000)
    if phaseCompensation:
        #compute swath phase offset
        diffMean = [0.0]
        for i in range(1, numberOfSwaths):

            #no need to estimate diff phase if provided from input
            #####################################################################
            if phaseDiff != None:
                if phaseDiff[i] != None:
                    diffMean.append(phaseDiff[i])
                    phaseDiffSource[i] = 'input'
                    print('using pre-computed phase offset given from input')
                    print('phase offset: subswath{} - subswath{}: {}'.format(
                        frame.swaths[i - 1].swathNumber,
                        frame.swaths[i].swathNumber, phaseDiff[i]))
                    continue
            #####################################################################

            #all indexes start with zero, all the computed start/end sample/line indexes are included.

            #no need to add edge here, as we are going to find first/last nonzero sample/lines later
            #edge = delta
            edge = 0

            #image i-1
            startSample1 = edge + 0 - int(rangeOffsets2[i]) + int(
                rangeOffsets2[i - 1])
            endSample1 = -edge + rectWidth[i - 1] - 1
            startLine1 = edge + max(
                0 - int(azimuthOffsets2[i]) + int(azimuthOffsets2[i - 1]), 0)
            endLine1 = -edge + min(
                rectLength[i] - 1 - int(azimuthOffsets2[i]) +
                int(azimuthOffsets2[i - 1]), rectLength[i - 1] - 1)
            data1 = readImage(rinfs[i - 1], rectWidth[i - 1],
                              rectLength[i - 1], startSample1, endSample1,
                              startLine1, endLine1)

            #image i
            startSample2 = edge + 0
            endSample2 = -edge + rectWidth[i - 1] - 1 - int(
                rangeOffsets2[i - 1]) + int(rangeOffsets2[i])
            startLine2 = edge + max(
                0 - int(azimuthOffsets2[i - 1]) + int(azimuthOffsets2[i]), 0)
            endLine2 = -edge + min(
                rectLength[i - 1] - 1 - int(azimuthOffsets2[i - 1]) +
                int(azimuthOffsets2[i]), rectLength[i] - 1)
            data2 = readImage(rinfs[i], rectWidth[i], rectLength[i],
                              startSample2, endSample2, startLine2, endLine2)

            #remove edge due to incomplete covolution in resampling
            edge = 9
            (startLine0, endLine0, startSample0, endSample0) = findNonzero(
                np.logical_and((data1 != 0), (data2 != 0)))
            data1 = data1[startLine0 + edge:endLine0 + 1 - edge,
                          startSample0 + edge:endSample0 + 1 - edge]
            data2 = data2[startLine0 + edge:endLine0 + 1 - edge,
                          startSample0 + edge:endSample0 + 1 - edge]

            #take looks
            data1 = multilook(data1, pcAzimuthLooks, pcRangeLooks)
            data2 = multilook(data2, pcAzimuthLooks, pcRangeLooks)

            #filter
            if filt:
                data1 /= (np.absolute(data1) + (data1 == 0))
                data2 /= (np.absolute(data2) + (data2 == 0))
                data1 = filterInterferogram(data1, 3.0, 64, 1)
                data2 = filterInterferogram(data2, 3.0, 64, 1)

            #get difference
            dataDiff = data1 * np.conj(data2)
            cor = cal_coherence_1(dataDiff, win=5)
            index = np.nonzero(np.logical_and(cor > 0.85, dataDiff != 0))

            DEBUG = False
            if DEBUG:
                from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
                (length7, width7) = dataDiff.shape
                filename = 'diff_ori_s{}-s{}.int'.format(
                    frame.swaths[i - 1].swathNumber,
                    frame.swaths[i].swathNumber)
                dataDiff.astype(np.complex64).tofile(filename)
                create_xml(filename, width7, length7, 'int')
                filename = 'cor_ori_s{}-s{}.cor'.format(
                    frame.swaths[i - 1].swathNumber,
                    frame.swaths[i].swathNumber)
                cor.astype(np.float32).tofile(filename)
                create_xml(filename, width7, length7, 'float')

            print('\ncompute phase difference between subswaths {} and {}'.
                  format(frame.swaths[i - 1].swathNumber,
                         frame.swaths[i].swathNumber))
            print('number of pixels with coherence > 0.85: {}'.format(
                index[0].size))

            #if already filtered the subswath overlap interferograms (MAI), do not filtered differential interferograms
            if (filt == False) and (index[0].size < 4000):
                #coherence too low, filter subswath overlap differential interferogram
                diffMean0 = 0.0
                breakFlag = False
                for (filterStrength, filterWinSize) in zip([3.0, 9.0],
                                                           [64, 128]):
                    dataDiff = data1 * np.conj(data2)
                    dataDiff /= (np.absolute(dataDiff) + (dataDiff == 0))
                    dataDiff = filterInterferogram(dataDiff, filterStrength,
                                                   filterWinSize, 1)
                    cor = cal_coherence_1(dataDiff, win=7)

                    DEBUG = False
                    if DEBUG:
                        from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
                        (length7, width7) = dataDiff.shape
                        filename = 'diff_filt_s{}-s{}_strength_{}_winsize_{}.int'.format(
                            frame.swaths[i - 1].swathNumber,
                            frame.swaths[i].swathNumber, filterStrength,
                            filterWinSize)
                        dataDiff.astype(np.complex64).tofile(filename)
                        create_xml(filename, width7, length7, 'int')
                        filename = 'cor_filt_s{}-s{}_strength_{}_winsize_{}.cor'.format(
                            frame.swaths[i - 1].swathNumber,
                            frame.swaths[i].swathNumber, filterStrength,
                            filterWinSize)
                        cor.astype(np.float32).tofile(filename)
                        create_xml(filename, width7, length7, 'float')

                    for corth in [0.99999, 0.9999]:
                        index = np.nonzero(
                            np.logical_and(cor > corth, dataDiff != 0))
                        if index[0].size > 30000:
                            breakFlag = True
                            break
                    if breakFlag:
                        break

                if index[0].size < 100:
                    diffMean0 = 0.0
                    print(
                        '\n\nWARNING: too few high coherence pixels for swath phase difference estimation'
                    )
                    print('         number of high coherence pixels: {}\n\n'.
                          format(index[0].size))
                else:
                    print(
                        'filtered coherence threshold used: {}, number of pixels used: {}'
                        .format(corth, index[0].size))
                    angle = np.mean(np.angle(dataDiff[index]),
                                    dtype=np.float64)
                    diffMean0 += angle
                    data2 *= np.exp(np.complex64(1j) * angle)
                    print(
                        'phase offset: %15.12f rad with filter strength: %f, window size: %3d'
                        % (diffMean0, filterStrength, filterWinSize))
            else:
                if filt:
                    (diffMean0, numberOfValidSamples[i]) = computePhaseDiff(
                        data1,
                        data2,
                        coherenceWindowSize=5,
                        coherenceThreshold=0.95)
                else:
                    (diffMean0, numberOfValidSamples[i]) = computePhaseDiff(
                        data1,
                        data2,
                        coherenceWindowSize=5,
                        coherenceThreshold=0.85)
                if numberOfValidSamples[i] < 100:
                    diffMean0 = 0.0
                    print(
                        '\n\nWARNING: too few high coherence pixels for swath phase difference estimation'
                    )
                    print('         number of high coherence pixels: {}\n\n'.
                          format(numberOfValidSamples[i]))

                #do not record when filt
                if filt:
                    numberOfValidSamples[i] = None

            #save purely estimated diff phase
            phaseDiffEst[i] = diffMean0

            #if fixed diff phase provided and the estimated diff phase is close enough to a fixed value, snap to it
            if phaseDiffFixed != None:
                if snapSwath[i - 1] == True:
                    (outputValue, snapped) = snap(diffMean0, phaseDiffFixed,
                                                  snapThreshold)
                    if snapped == True:
                        diffMean0 = outputValue
                        phaseDiffSource[i] = 'estimated+snap'

            diffMean.append(diffMean0)
            print('phase offset: subswath{} - subswath{}: {}'.format(
                frame.swaths[i - 1].swathNumber, frame.swaths[i].swathNumber,
                diffMean0))

        for i in range(1, numberOfSwaths):
            for j in range(1, i + 1):
                diffMean2[i] += diffMean[j]

    #mosaic swaths
    diffflag = 1
    oflag = [0 for i in range(numberOfSwaths)]
    mosaicsubswath(outputfile, outWidth, outLength, delta, diffflag,
                   numberOfSwaths, rinfs, rectWidth, rangeOffsets3,
                   azimuthOffsets3, diffMean2, oflag)
    #remove tmp files
    for x in rinfs:
        os.remove(x)

    #update frame parameters
    if updateFrame:
        #mosaic size
        frame.numberOfSamples = outWidth
        frame.numberOfLines = outLength
        #NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
        #range parameters
        frame.startingRange = frame.swaths[0].startingRange
        frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate
        frame.rangePixelSize = frame.swaths[0].rangePixelSize
        #azimuth parameters
        azimuthTimeOffset = -max([
            int(x) for x in azimuthOffsets2
        ]) * numberOfAzimuthLooks * frame.swaths[0].azimuthLineInterval
        frame.sensingStart = frame.swaths[0].sensingStart + datetime.timedelta(
            seconds=azimuthTimeOffset)
        frame.prf = frame.swaths[0].prf
        frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize
        frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval

    if phaseCompensation:
        # estimated phase diff, used phase diff, used phase diff source
        return (phaseDiffEst, diffMean, phaseDiffSource, numberOfValidSamples)
Example #4
0
def estimateSwathOffset(swath1,
                        swath2,
                        image1,
                        image2,
                        rangeScale1=1,
                        azimuthScale1=1,
                        rangeScale2=1,
                        azimuthScale2=1,
                        numberOfAzimuthLooks=10):
    '''
    estimate offset of two adjacent swaths using matching
    '''
    from osgeo import gdal
    import isceobj
    from contrib.alos2proc_f.alos2proc_f import rect_with_looks
    from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
    from isceobj.Alos2Proc.Alos2ProcPublic import cullOffsets
    from isceobj.Alos2Proc.Alos2ProcPublic import meanOffset
    from mroipac.ampcor.Ampcor import Ampcor

    #processing image 1
    rangeOff1 = int(
        (swath2.startingRange - swath1.startingRange) / swath1.rangePixelSize)
    if rangeOff1 < 0:
        rangeOff1 = 0
    numberOfSamples1 = swath1.numberOfSamples - rangeOff1

    numberOfSamplesRect1 = int(numberOfSamples1 / rangeScale1)
    numberOfLinesRect1 = int(swath1.numberOfLines / azimuthScale1)

    numberOfSamplesLook1 = int(numberOfSamplesRect1 / 1)
    numberOfLinesLook1 = int(numberOfLinesRect1 / numberOfAzimuthLooks)

    #get magnitude image whether complex or not
    #ReadAsArray: https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html
    ds = gdal.Open(image1 + '.vrt', gdal.GA_ReadOnly)
    data = ds.ReadAsArray(rangeOff1, 0, numberOfSamples1, swath1.numberOfLines)
    ds = None
    (np.absolute(data)).astype(np.float32).tofile('image1.float')

    #rectify
    if rangeScale1 == 1 and azimuthScale1 == 1:
        os.rename('image1.float', 'image1_rect.float')
    else:
        rect_with_looks('image1.float', 'image1_rect.float', numberOfSamples1,
                        swath1.numberOfLines, numberOfSamplesRect1,
                        numberOfLinesRect1, rangeScale1, 0.0, 0.0,
                        azimuthScale1, 0.0, 0.0, 1, 1, 1, 1, 'REAL',
                        'Bilinear')
        os.remove('image1.float')

    #take looks
    if numberOfAzimuthLooks == 1:
        os.rename('image1_rect.float', 'image1_look.float')
    else:
        data1 = np.fromfile('image1_rect.float',
                            dtype=np.float32).reshape(numberOfLinesRect1,
                                                      numberOfSamplesRect1)
        data1 = np.sqrt(multilook(data1**2, numberOfAzimuthLooks, 1))
        data1.astype(np.float32).tofile('image1_look.float')
        os.remove('image1_rect.float')
    create_xml('image1_look.float', numberOfSamplesLook1, numberOfLinesLook1,
               'float')

    #processing image 2
    rangeOff2 = 0
    numberOfSamples2 = int(
        (swath1.startingRange + swath1.rangePixelSize *
         (swath1.numberOfSamples - 1) - swath2.startingRange) /
        swath2.rangePixelSize) + 1
    if numberOfSamples2 > swath2.numberOfSamples:
        numberOfSamples2 = swath2.numberOfSamples

    numberOfSamplesRect2 = int(numberOfSamples2 / rangeScale2)
    numberOfLinesRect2 = int(swath2.numberOfLines / azimuthScale2)

    numberOfSamplesLook2 = int(numberOfSamplesRect2 / 1)
    numberOfLinesLook2 = int(numberOfLinesRect2 / numberOfAzimuthLooks)

    #get magnitude image whether complex or not
    ds = gdal.Open(image2 + '.vrt', gdal.GA_ReadOnly)
    data = ds.ReadAsArray(rangeOff2, 0, numberOfSamples2, swath2.numberOfLines)
    ds = None
    (np.absolute(data)).astype(np.float32).tofile('image2.float')

    #rectify
    if rangeScale2 == 1 and azimuthScale2 == 1:
        os.rename('image2.float', 'image2_rect.float')
    else:
        rect_with_looks('image2.float', 'image2_rect.float', numberOfSamples2,
                        swath2.numberOfLines, numberOfSamplesRect2,
                        numberOfLinesRect2, rangeScale2, 0.0, 0.0,
                        azimuthScale2, 0.0, 0.0, 1, 1, 1, 1, 'REAL',
                        'Bilinear')
        os.remove('image2.float')

    #take looks
    if numberOfAzimuthLooks == 1:
        os.rename('image2_rect.float', 'image2_look.float')
    else:
        data2 = np.fromfile('image2_rect.float',
                            dtype=np.float32).reshape(numberOfLinesRect2,
                                                      numberOfSamplesRect2)
        data2 = np.sqrt(multilook(data2**2, numberOfAzimuthLooks, 1))
        data2.astype(np.float32).tofile('image2_look.float')
        os.remove('image2_rect.float')
    create_xml('image2_look.float', numberOfSamplesLook2, numberOfLinesLook2,
               'float')

    #matching
    ampcor = Ampcor(name='insarapp_slcs_ampcor')
    ampcor.configure()

    mMag = isceobj.createImage()
    mMag.load('image1_look.float.xml')
    mMag.setAccessMode('read')
    mMag.createImage()

    sMag = isceobj.createImage()
    sMag.load('image2_look.float.xml')
    sMag.setAccessMode('read')
    sMag.createImage()

    ampcor.setImageDataType1('real')
    ampcor.setImageDataType2('real')

    ampcor.setReferenceSlcImage(mMag)
    ampcor.setSecondarySlcImage(sMag)

    #MATCH REGION
    rgoff = 0
    azoff = int(
        (swath1.sensingStart - swath2.sensingStart).total_seconds() /
        swath1.azimuthLineInterval / azimuthScale1 / numberOfAzimuthLooks)
    #it seems that we cannot use 0, haven't look into the problem
    if rgoff == 0:
        rgoff = 1
    if azoff == 0:
        azoff = 1
    firstSample = 1
    if rgoff < 0:
        firstSample = int(35 - rgoff)
    firstLine = 1
    if azoff < 0:
        firstLine = int(35 - azoff)
    ampcor.setAcrossGrossOffset(rgoff)
    ampcor.setDownGrossOffset(azoff)
    ampcor.setFirstSampleAcross(firstSample)
    ampcor.setLastSampleAcross(numberOfSamplesLook1)
    ampcor.setNumberLocationAcross(20)
    ampcor.setFirstSampleDown(firstLine)
    ampcor.setLastSampleDown(numberOfLinesLook1)
    ampcor.setNumberLocationDown(100)

    #MATCH PARAMETERS
    ampcor.setWindowSizeWidth(32)
    ampcor.setWindowSizeHeight(32)
    #note this is the half width/length of search area, so number of resulting correlation samples: 8*2+1
    ampcor.setSearchWindowSizeWidth(8)
    ampcor.setSearchWindowSizeHeight(8)

    #REST OF THE STUFF
    ampcor.setAcrossLooks(1)
    ampcor.setDownLooks(1)
    ampcor.setOversamplingFactor(64)
    ampcor.setZoomWindowSize(16)
    #1. The following not set
    #Matching Scale for Sample/Line Directions                       (-)    = 1. 1.
    #should add the following in Ampcor.py?
    #if not set, in this case, Ampcor.py'value is also 1. 1.
    #ampcor.setScaleFactorX(1.)
    #ampcor.setScaleFactorY(1.)

    #MATCH THRESHOLDS AND DEBUG DATA
    #2. The following not set
    #in roi_pac the value is set to 0 1
    #in isce the value is set to 0.001 1000.0
    #SNR and Covariance Thresholds                                   (-)    =  {s1} {s2}
    #should add the following in Ampcor?
    #THIS SHOULD BE THE ONLY THING THAT IS DIFFERENT FROM THAT OF ROI_PAC
    #ampcor.setThresholdSNR(0)
    #ampcor.setThresholdCov(1)
    ampcor.setDebugFlag(False)
    ampcor.setDisplayFlag(False)

    #in summary, only two things not set which are indicated by 'The following not set' above.

    #run ampcor
    ampcor.ampcor()
    offsets = ampcor.getOffsetField()
    refinedOffsets = cullOffsets(offsets)

    #finalize image, and re-create it
    #otherwise the file pointer is still at the end of the image
    mMag.finalizeImage()
    sMag.finalizeImage()

    os.remove('image1_look.float')
    os.remove('image1_look.float.vrt')
    os.remove('image1_look.float.xml')
    os.remove('image2_look.float')
    os.remove('image2_look.float.vrt')
    os.remove('image2_look.float.xml')

    if refinedOffsets != None:
        rangeOffset, azimuthOffset = meanOffset(refinedOffsets)
        rangeOffset -= rangeOff1 / rangeScale1
        azimuthOffset *= numberOfAzimuthLooks
        return (rangeOffset, azimuthOffset)
    else:
        return None
Example #5
0
def spectralDiversity(referenceSwath,
                      interferogramDir,
                      interferogramPrefix,
                      outputList,
                      numberLooksScanSAR=None,
                      numberRangeLooks=20,
                      numberAzimuthLooks=10,
                      coherenceThreshold=0.85,
                      keep=False,
                      filt=False,
                      filtWinSizeRange=5,
                      filtWinSizeAzimuth=5):
    '''
    numberLooksScanSAR: number of looks of the ScanSAR system
    numberRangeLooks:   number of range looks to take
    numberAzimuthLooks: number of azimuth looks to take
    keep:               whether keep intermediate files
    '''
    import os
    import numpy as np
    from isceobj.Alos2Proc.Alos2ProcPublic import create_multi_index
    from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
    from isceobj.Alos2Proc.Alos2ProcPublic import multilook
    from isceobj.Alos2Proc.Alos2ProcPublic import cal_coherence_1

    width = referenceSwath.numberOfSamples
    length = referenceSwath.numberOfLines
    lengthBurst = referenceSwath.burstSlcNumberOfLines
    nBurst = referenceSwath.numberOfBursts
    azsi = referenceSwath.azimuthLineInterval
    tc = referenceSwath.burstCycleLength / referenceSwath.prf

    bursts = [
        os.path.join(interferogramDir,
                     interferogramPrefix + '_%02d.int' % (i + 1))
        for i in range(referenceSwath.numberOfBursts)
    ]

    ####################################################
    #input parameters
    rgl = numberRangeLooks
    azl = numberAzimuthLooks
    cor_th = coherenceThreshold
    nls0 = lengthBurst / (referenceSwath.burstSlcFirstLineOffsets[nBurst - 1] /
                          (nBurst - 1.0))
    print('number of looks of the ScanSAR system: {}'.format(nls0))
    if numberLooksScanSAR != None:
        nls = numberLooksScanSAR
    else:
        nls = int(nls0)
    print('number of looks to be used: {}'.format(nls))
    ####################################################

    #read burst interferograms
    inf = np.zeros((length, width, nls), dtype=np.complex64)
    cnt = np.zeros((length, width), dtype=np.int8)
    for i in range(nBurst):
        if (i + 1) % 5 == 0 or (i + 1) == nBurst:
            print('reading burst %02d' % (i + 1))

        burst = np.fromfile(bursts[i],
                            dtype=np.complex64).reshape(lengthBurst, width)

        #subset for the burst
        cntBurst = cnt[0 +
                       referenceSwath.burstSlcFirstLineOffsets[i]:lengthBurst +
                       referenceSwath.burstSlcFirstLineOffsets[i], :]
        infBurst = inf[0 +
                       referenceSwath.burstSlcFirstLineOffsets[i]:lengthBurst +
                       referenceSwath.burstSlcFirstLineOffsets[i], :, :]

        #set number of non-zero pixels
        cntBurst[np.nonzero(burst)] += 1

        #get index
        index1 = np.nonzero(np.logical_and(burst != 0, cntBurst <= nls))
        index2 = index1 + (cntBurst[index1] - 1, )

        #set values
        infBurst[index2] = burst[index1]

    #number of looks for each sample
    if keep:
        nlFile = 'number_of_looks.nl'
        cnt.astype(np.int8).tofile(nlFile)
        create_xml(nlFile, width, length, 'byte')

    if filt:
        import scipy.signal as ss
        filterKernel = np.ones((filtWinSizeAzimuth, filtWinSizeRange),
                               dtype=np.float64)
        for i in range(nls):
            print('filtering look {}'.format(i + 1))
            flag = (inf[:, :, i] != 0)
            #scale = ss.fftconvolve(flag, filterKernel, mode='same')
            #inf[:,:,i] = flag*ss.fftconvolve(inf[:,:,i], filterKernel, mode='same') / (scale + (scale==0))
            #this should be faster?
            scale = ss.convolve2d(flag, filterKernel, mode='same')
            inf[:, :, i] = flag * ss.convolve2d(
                inf[:, :, i], filterKernel, mode='same') / (scale +
                                                            (scale == 0))

    #width and length after multilooking
    widthm = int(width / rgl)
    lengthm = int(length / azl)
    #use the convention that ka > 0
    ka = -np.polyval(referenceSwath.azimuthFmrateVsPixel[::-1],
                     create_multi_index(width, rgl))

    #get spectral diversity inteferogram
    offset_sd = []
    for i in range(1, nls):
        print('ouput spectral diversity inteferogram %d' % i)
        #original spectral diversity inteferogram
        sd = inf[:, :, 0] * np.conj(inf[:, :, i])

        #replace original amplitude with its square root
        index = np.nonzero(sd != 0)
        sd[index] /= np.sqrt(np.absolute(sd[index]))

        sdFile = outputList[i - 1]
        sd.astype(np.complex64).tofile(sdFile)
        create_xml(sdFile, width, length, 'int')

        #multi look
        sdm = multilook(sd, azl, rgl)
        cor = cal_coherence_1(sdm)

        #convert phase to offset
        offset = np.angle(sdm) / (2.0 * np.pi * ka * tc * i)[None, :] / azsi

        #compute offset using good samples
        point_index = np.nonzero(
            np.logical_and(cor >= cor_th,
                           np.angle(sdm) != 0))
        npoint = round(np.size(point_index) / 2)
        if npoint < 20:
            print(
                'WARNING: too few good samples for spectral diversity at look {}: {}'
                .format(i, npoint))
            offset_sd.append(0)
        else:
            offset_sd.append(
                np.sum(offset[point_index] * cor[point_index]) /
                np.sum(cor[point_index]))

        if keep:
            sdmFile = 'sd_%d_%drlks_%dalks.int' % (i, rgl, azl)
            sdm.astype(np.complex64).tofile(sdmFile)
            create_xml(sdmFile, widthm, lengthm, 'int')
            corFile = 'sd_%d_%drlks_%dalks.cor' % (i, rgl, azl)
            cor.astype(np.float32).tofile(corFile)
            create_xml(corFile, widthm, lengthm, 'float')
            offsetFile = 'sd_%d_%drlks_%dalks.off' % (i, rgl, azl)
            offset.astype(np.float32).tofile(offsetFile)
            create_xml(offsetFile, widthm, lengthm, 'float')

    offset_mean = np.sum(np.array(offset_sd) * np.arange(1, nls)) / np.sum(
        np.arange(1, nls))

    return offset_mean