示例#1
0
def bandpassFilter(volume, lowestFrequency, highestFrequency, bpf=None, smooth=0,fourierOnly=False):
    """
    bandpassFilter: Performs bandpass filtering of volume.
    @param volume: The volume to be filtered
    @type volume: L{pytom_volume.vol}
    @param lowestFrequency: The lowest frequency in the filter as absolute pixel value
    @type lowestFrequency: L{float}
    @param highestFrequency: The highest frequency in the filter as absolute pixel value
    @type highestFrequency: L{float} 
    @param bpf: Bandpassfilter C++ object if it is already existing 
    @param smooth: Adjusts the size of gaussian falloff around each band end.
    @return: The bandpass filtered volume, the filter and the filtered volume in fourier space
    @author: Thomas Hrabe
    """

    import pytom_freqweight
    import pytom_volume
    
    if volume.__class__ == pytom_volume.vol:
        from pytom.basic.fourier import fft
        fvolume = fft(volume)
    else:
        fvolume = volume
 
    if not bpf:
        bpf = pytom_freqweight.weight(lowestFrequency,highestFrequency,
                                      fvolume.sizeX(),fvolume.sizeY(),fvolume.sizeZ(),smooth)    

    return filter(fvolume,bpf,fourierOnly)      
示例#2
0
def wedgeFilter(volume,angle,radius=0,angleIsHalf=True,fourierOnly=False):
    """
    wedgeFilter: performs a single wedge filtering of volume.
    @param volume: The volume filtered.
    @type volume: L{pytom_volume.vol}
    @param angle: Opening angle of the wedge.
    @type angle: C{float}
    @param radius: cutoff radius (means lowpass filter radius - 0 by default)
    @type radius: C{Int}
    @param angleIsHalf: True if angle represents the whole wedge. (True by default)
    @return: The wedge filtered volume,the filter and the filtered volume in fourier space  
    @author: Thomas Hrabe
    """
    import pytom_freqweight
    
    if angle <= 0:
        return volume
    
    if not angleIsHalf:
        angle = abs(angle/2) 
    
    wf = pytom_freqweight.weight(angle,radius,volume.sizeX(),volume.sizeY(),volume.sizeZ())
    
    
    return filter(volume,wf,fourierOnly)
示例#3
0
def extractPeaksGPU(volume, reference, rotations, scoreFnc=None, mask=None, maskIsSphere=False, wedgeInfo=None, padding=True, **kwargs):
    from pytom_numpy import vol2npy
    from pytom.gpu.gpuStructures import TemplateMatchingGPU
    from pytom.tools.calcFactors import calc_fast_gpu_dimensions
    import time
    import numpy as np
    from pytom_freqweight import weight

    angles = rotations[:]
    volume, ref, mask = [vol2npy(vol) for vol in (volume, reference, mask)]

    wedgeAngle = 30
    wedgeFilter = weight(wedgeAngle, wedgeAngle, ref.shape[0] // 2, ref.shape[0], ref.shape[1], ref.shape[2], 0)
    wedgeVolume = wedgeFilter.getWeightVolume(True)
    wedge = vol2npy(wedgeVolume).copy()

    if padding:
        dimx, dimy, dimz = volume.shape

        cx = calc_fast_gpu_dimensions(dimx - 2, 4000)[0]
        cy = calc_fast_gpu_dimensions(dimy - 2, 4000)[0]
        cz = calc_fast_gpu_dimensions(dimz - 2, 4000)[0]
        voluNDAs = np.zeros([cx, cy, cz], dtype=np.float32)
        voluNDAs[:min(cx, dimx), :min(cy, dimy), :min(cz, dimz)] = volume[:min(cx, dimx), :min(cy, dimy),
                                                                   :min(cz, dimz)]
        volume = voluNDAs
        print(f'dimensions of volume: {ref.shape}')

    input = (volume, ref, mask, wedge, angles, volume.shape)

    tm_process = TemplateMatchingGPU(0, kwargs['gpuID'][0], input=input)
    tm_process.start()
    while tm_process.is_alive():
        time.sleep(1)

    if tm_process.completed:
        print('Templated matching completed successfully')
        return [tm_process.plan.scores.get(), tm_process.plan.angles.get(), None, None]
    else:
        raise Exception('failed template matching')
示例#4
0
def highpassFilter(volume,band,smooth=0,fourierOnly=False):   
    """
    highpassFilter:
    @param volume:  The volume filtered
    @type volume: L{pytom_volume.vol}
    @param band: Lower end of band (in pixels)
    @param smooth: Adjusts the size of gaussian falloff around each band end.
    @return: The highpass filtered volume,the filter and the filtered volume in fourier space
    @author: Thomas Hrabe 
    """
    import pytom_freqweight
    import pytom_volume
    
    if volume.__class__ == pytom_volume.vol:
        from pytom.basic.fourier import fft
        fvolume = fft(volume)
    else:
        fvolume = volume
    
    hpf = pytom_freqweight.weight(band,fvolume.sizeX(),fvolume.sizeX(),fvolume.sizeY(),fvolume.sizeZ(),smooth)
       
    return filter(fvolume,hpf,fourierOnly)
示例#5
0
def compareTwoVolumes(particle,reference,referenceWeighting,wedgeInfo,rotations,shift,
         scoreObject=0,mask=None,preprocessing=[],binning=1,verbose=False):
    """
    compare: Compares two volumes
    @param particle: A particle
    @param reference: A reference
    @param referenceWeighting: Fourier weighting of the reference (sum of wedges for instance)
    @param wedgeInfo: What does the wedge look alike?
    @type wedgeInfo: L{pytom.basic.structures.WedgeInfo}
    @param rotations: All rotations to be scanned
    @type rotations: Must be a L{pytom.angles.angleList.OneAngleList}
    @param scoreObject: 
    @type scoreObject: L{pytom.score.score.Score}
    @param mask: 
    @param preprocessing: Class storing preprocessing of particle and reference such as bandpass
    @type preprocessing: L{pytom.alignment.preprocessing.Preprocessing}
    @param binning: Is binning applied (Disabled when == 1)
    @return: Returns the correlation coefficient of two volumes.
    @author: Thomas Hrabe
    """

    from pytom_volume import vol,transformSpline
    from pytom.basic.filter import filter,rotateWeighting
    from pytom.angles.angleList import OneAngleList
    
    assert particle.__class__ == vol, "particle not of type vol"
    assert reference.__class__ == vol, "reference not of type vol"
    assert referenceWeighting.__class__ == vol or referenceWeighting.__class__ == str
    assert rotations.__class__ == OneAngleList or len(rotations) == 1
    
    if scoreObject == 0:
        from pytom.score.score import nxcfScore
        scoreObject = nxcfScore()
    
    particleCopy = vol(particle.sizeX(),particle.sizeY(),particle.sizeZ())
    
    #process particle   
    particle = wedgeInfo.apply(particle)
    particle = preprocessing.apply(particle,True)
    particleCopy.copyVolume(particle)
    
    #manipulate reference
    currentRotation = rotations.nextRotation()
    
    sizeX = particle.sizeX() 
    sizeY = particle.sizeY()
    sizeZ = particle.sizeZ()
    
    centerX = sizeX/2.0 
    centerY = sizeY/2.0 
    centerZ = sizeZ/2.0 
    
    
    simulatedVol= vol(sizeX,sizeY,sizeZ)

    transformSpline(reference,simulatedVol,currentRotation[0],currentRotation[1],currentRotation[2],
                    centerX,centerY,centerZ,0,0,0,shift[0]/binning,shift[1]/binning,shift[2]/binning)
    
    simulatedVol = wedgeInfo.apply(simulatedVol)
    m = mask.getVolume(currentRotation)
    simulatedVol = simulatedVol * m
    
    simulatedVol = preprocessing.apply(simulatedVol,True)

    if not referenceWeighting.__class__ == str:
        from pytom_freqweight import weight
        
        weightingRotated = rotateWeighting(weighting=referenceWeighting, z1=currentRotation[0], z2=currentRotation[1],
                                           x=currentRotation[2], isReducedComplex=None, returnReducedComplex=True,
                                           binarize=False)
        if verbose:
            particleCopy.info('pc')
            weightingRotated.info('wr')
        
        r = list(filter(particleCopy,weight(weightingRotated)))            
        particleCopy = r[0]
        
    scoringResult = scoreObject.scoringCoefficient(particleCopy,simulatedVol,m)
    
    if scoringResult.__class__ == list:
        scoringResult = scoringResult[0]
    
    
    assert scoringResult == scoringResult, "scoringResult possibly NaN"
    
    return scoringResult 
示例#6
0
def bestAlignmentGPU(particleList, rotations, plan, isSphere=True):
    """
    bestAlignment: Determines best alignment of particle relative to the reference
    @param particle: A particle
    @type particle: L{pytom_volume.vol}
    @param reference: A reference
    @type reference: L{pytom_volume.vol}
    @param referenceWeighting: Fourier weighting of the reference (sum of wedges for instance)
    @type referenceWeighting: L{pytom.basic.structures.vol}
    @param wedgeInfo: What does the wedge look alike?
    @type wedgeInfo: L{pytom.basic.structures.Wedge}
    @param rotations: All rotations to be scanned
    @type rotations: L{pytom.angles.AngleObject}
    @param scoreObject:
    @type scoreObject: L{pytom.score.score.Score}
    @param mask: real-space mask for correlation function
    @type mask: L{pytom.basic.structures.Particle}
    @param preprocessing: Class storing preprocessing of particle and reference such as bandpass
    @type preprocessing: L{pytom.alignment.preprocessing.Preprocessing}
    @param progressBar: Display progress bar of alignment. False by default.
    @param binning: binning factor - e.g. binning=2 reduces size by FACTOR of 2
    @type binning: int or float
    @param bestPeak: Initialise best peak with old values.
    @param verbose: Print out infos. Writes CC volume to disk!!! Default is False
    @return: Returns the best rotation for particle and the corresponding scoring result.
    @author: Thomas Hrabe
    """
    from pytom.gpu.tools import add_particle_to_sum
    from pytom.gpu.prepare_plans import prepare_glocal_plan
    from pytom.gpu.correlation import subPixelPeak, find_coords_max_ccmap
    from pytom.gpu.filter import filter, applyWedge
    from pytom.gpu.preprocessing import applyPreprocessing
    from pytom.gpu.transformations import resize, resizeFourier, rotate
    import numpy as np

    binningType = 'Fourier'  # or 'Fourier'

    bestPeakList = []

    for pid, particle in particleList:

        centerCoordinates = np.array([size//2 for size in plan.shape], dtype=np.int32)

        # create buffer volume for transformed particle
        applyWedge(particle)  # apply wedge to itself
        applyPreprocessing(particle, taper=np.float32(shape[0] / 10.) )  # filter particle to some resolution

        bestPeak = [-1000, [0,0,0], [0,0,0]]

        for n, currentRotation in enumerate(rotations):
            if not isSphere:
                meanUnderMask(particle, plan.mask, plan.p)
                stdUnderMask(particle, plan.mask, plan.p, plan.meanV)

            rotateWedge(plan.reference, currentRotation, plan.wedge, plan.mask, centerCoordinates)
            applyPreprocessing(plan.simulatedVol, bypassFlag=True)

            # weight particle

            rotateWeight(plan.reference, plan.referenceWeighting, currentRotation)
            filterParticle(particle, weight(weightingRotated), plan.particleCopy)

            cross_correlation(plan.particleCopy, plan.simulatedVol, plan.ccc_map)

            peakCoordinates = find_coords_max_ccmap(plan.ccc_map)

            # with subPixelPeak
            [peakValue, peakPosition] = subPixelPeak(plan.ccc_map, coordinates=peakCoordinates, cropsize=10)

            # determine shift relative to center
            shiftX = (peakPosition[0] - centerX)
            shiftY = (peakPosition[1] - centerY)
            shiftZ = (peakPosition[2] - centerZ)

            newPeak = (peakValue, currentRotation, (shiftX, shiftY, shiftZ))

            if bestPeak[0] < newPeak[0]:
                bestPeak = newPeak

        if pid % 2 == 0:
            add_particle_to_sum(particle, bestPeak[1][::-1]*-1, plan.evenSum)
        else:
            add_particle_to_sum(particle, bestPeak[1][::-1]*-1, plan.oddSum)

        bestPeakList.append(bestPeak)

    return bestPeakList
示例#7
0
def bestAlignment(particle, reference, referenceWeighting, wedgeInfo, rotations,
         scoreObject=0, mask=None, preprocessing=None, progressBar=False, binning=1,
         bestPeak=None, verbose=False):
    """
    bestAlignment: Determines best alignment of particle relative to the reference
    @param particle: A particle
    @type particle: L{pytom_volume.vol}
    @param reference: A reference
    @type reference: L{pytom_volume.vol}
    @param referenceWeighting: Fourier weighting of the reference (sum of wedges for instance)
    @type referenceWeighting: L{pytom.basic.structures.vol}
    @param wedgeInfo: What does the wedge look alike?
    @type wedgeInfo: L{pytom.basic.structures.Wedge}
    @param rotations: All rotations to be scanned
    @type rotations: L{pytom.angles.AngleObject}
    @param scoreObject: 
    @type scoreObject: L{pytom.score.score.Score}
    @param mask: real-space mask for correlation function
    @type mask: L{pytom.basic.structures.Particle}
    @param preprocessing: Class storing preprocessing of particle and reference such as bandpass
    @type preprocessing: L{pytom.alignment.preprocessing.Preprocessing}
    @param progressBar: Display progress bar of alignment. False by default.
    @param binning: binning factor - e.g. binning=2 reduces size by FACTOR of 2
    @type binning: int or float
    @param bestPeak: Initialise best peak with old values.   
    @param verbose: Print out infos. Writes CC volume to disk!!! Default is False  
    @return: Returns the best rotation for particle and the corresponding scoring result.
    @author: Thomas Hrabe
    """
    from pytom.basic.correlation import subPixelPeak, subPixelPeakParabolic
    from pytom.alignment.structures import Peak
    from pytom_volume import peak, vol, vol_comp
    from pytom.basic.filter import filter,rotateWeighting
    from pytom.basic.structures import Rotation, Shift, Particle, Mask
    from pytom.angles.angle import AngleObject
    from pytom.alignment.preprocessing import Preprocessing
    from pytom.basic.transformations import resize, resizeFourier
    binningType = 'Fourier' # or 'Fourier'

    assert isinstance(rotations, AngleObject), "bestAlignment: rotations must be " \
                                                                             "AngleObject!"
    currentRotation = rotations.nextRotation()
    if currentRotation == [None,None,None]:
        raise Exception('bestAlignment: No rotations are sampled! Something is wrong with input rotations')

    assert particle.__class__ == vol, "particle not of type vol"
    assert reference.__class__ == vol, "reference not of type vol"
    assert (referenceWeighting.__class__ == vol or referenceWeighting.__class__ == str), \
        "referenceWeighting not volume or str"
    if mask:
        assert mask.__class__ == Mask, "Mask not of type Mask"
        m = mask.getVolume()

    if scoreObject == 0 or not scoreObject:
        from pytom.score.score import xcfScore
        scoreObject = xcfScore()
    # fix binning
    if binning == 0:
        binning = 1
    if binning != 1:
        particleUnbinned = vol(particle.sizeX(), particle.sizeY(), particle.sizeZ())
        particleUnbinned.copyVolume(particle)
        particle = resize(volume=particle, factor=1./binning, interpolation=binningType)
        if type(particle) == tuple:
            particle = particle[0]
        referenceUnbinned = vol(reference.sizeX(), reference.sizeY(), reference.sizeZ())
        referenceUnbinned.copyVolume(reference)
        reference = resize(volume=reference, factor=1./binning, interpolation=binningType)
        if type(reference) == tuple:
            reference = reference[0]
        if mask:
            m = resize(volume=m, factor=1./binning, interpolation='Spline')
        if not referenceWeighting.__class__ == str:
            referenceWeightingUnbinned = vol_comp(referenceWeighting.sizeX(), referenceWeighting.sizeY(),
                                                  referenceWeighting.sizeZ())
            referenceWeightingUnbinned.copyVolume(referenceWeighting)
            if binning != 1:
                referenceWeighting = resizeFourier(fvol=referenceWeighting, factor=1./binning)
    centerX, centerY, centerZ = int(particle.sizeX()/2), int(particle.sizeY()/2), int(particle.sizeZ()/2)

    # create buffer volume for transformed particle 
    particleCopy = vol(particle.sizeX(),particle.sizeY(),particle.sizeZ())
    particle = wedgeInfo.apply(particle) #apply wedge to itself
    if preprocessing is None:
        preprocessing = Preprocessing()
    preprocessing.setTaper( taper=particle.sizeX()/10.)
    particle = preprocessing.apply(volume=particle, bypassFlag=True)  # filter particle to some resolution
    particleCopy.copyVolume(particle)
    # compute standard veviation volume really only if needed
    if mask and (scoreObject._type=='FLCFScore'):
        from pytom_volume import sum
        from pytom.basic.correlation import meanUnderMask, stdUnderMask
        p = sum(m)
        meanV = meanUnderMask(particle, m, p)
        stdV = stdUnderMask(particle, m, p, meanV)
    else:
        meanV = None
        stdV = None

    while currentRotation != [None,None,None]:
        if mask:
            m = mask.getVolume(currentRotation)
            if binning != 1:
                m = resize(volume=m, factor=1./binning, interpolation='Spline')
            #update stdV if mask is not a sphere
            # compute standard deviation volume really only if needed
            if not mask.isSphere() and (scoreObject._type=='FLCFScore'):
                meanV   = meanUnderMask(particle, m, p)
                stdV    = stdUnderMask(particle, m, p, meanV)
        else:
            m = None
        
        simulatedVol = _rotateWedgeReference(reference, currentRotation, wedgeInfo, m, [centerX, centerY, centerZ])
        simulatedVol = preprocessing.apply(volume=simulatedVol, bypassFlag=True)
        
        #weight particle
        if not referenceWeighting.__class__ == str:
            from pytom_freqweight import weight
            weightingRotated = rotateWeighting(weighting=referenceWeighting, z1=currentRotation[0],
                                               z2=currentRotation[1], x=currentRotation[2], isReducedComplex=True,
                                               returnReducedComplex=True, binarize=False)
            particleCopy.copyVolume(particle)
            r = list(filter(particleCopy, weight(weightingRotated)))
            particleCopy = r[0]
        
        scoringResult = scoreObject.score(particleCopy, simulatedVol, m, stdV)
        
        pk = peak(scoringResult)

        #with subPixelPeak
        [peakValue,peakPosition] = subPixelPeak(scoreVolume=scoringResult, coordinates=pk,
                                               interpolation='Quadratic', verbose=False)
        #[peakValue,peakPosition] = subPixelPeakParabolic(scoreVolume=scoringResult, coordinates=pk, verbose=False)

        # determine shift relative to center
        shiftX = (peakPosition[0] - centerX) * binning
        shiftY = (peakPosition[1] - centerY) * binning
        shiftZ = (peakPosition[2] - centerZ) * binning

        #NANs would fail this test.
        assert peakValue == peakValue, "peakValue seems to be NaN"
        
        newPeak = Peak(peakValue, Rotation(currentRotation), Shift(shiftX, shiftY, shiftZ))
        
        if verbose:
            print('Rotation: z1=%3.1f, z2=%3.1f, x=%3.1f; Dx=%2.2f, Dy=%2.2f, Dz=%2.2f, CC=%2.3f' % \
                  (currentRotation[0], currentRotation[1], currentRotation[2], shiftX, shiftY, shiftZ, peakValue))

        if bestPeak is None:
            bestPeak = newPeak
            if verbose:
                scoringResult.write('BestScore.em')
        if bestPeak < newPeak:
            bestPeak = newPeak
            if verbose:
                scoringResult.write('BestScore.em')

        currentRotation = rotations.nextRotation()
    # repeat ccf for binned sampling to get translation accurately
    if binning != 1:
        m = mask.getVolume(bestPeak.getRotation())
        centerX, centerY, centerZ = int(particleUnbinned.sizeX()/2), int(particleUnbinned.sizeY()/2), \
                                    int(particleUnbinned.sizeZ()/2)
        simulatedVol = _rotateWedgeReference(referenceUnbinned, bestPeak.getRotation(), wedgeInfo, m,
                                             [centerX, centerY, centerZ])
        simulatedVol = preprocessing.apply(volume=simulatedVol, bypassFlag=True)
        if mask and scoreObject._type=='FLCFScore':
            p = sum(m)
            meanV = meanUnderMask(volume=particleUnbinned, mask=m, p=p)
            stdV  = stdUnderMask(volume=particleUnbinned, mask=m, p=p, meanV=meanV)
        scoreObject._peakPrior.reset_weight()
        scoringResult = scoreObject.score(particle=particleUnbinned, reference=simulatedVol, mask=m, stdV=stdV)
        pk = peak(scoringResult)
        [peakValue,peakPosition] = subPixelPeak(scoreVolume=scoringResult, coordinates=pk, interpolation='Quadratic',
                                                verbose=False)
        shiftX = (peakPosition[0] - centerX)
        shiftY = (peakPosition[1] - centerY)
        shiftZ = (peakPosition[2] - centerZ)
        bestPeak = Peak(peakValue, bestPeak.getRotation(), Shift(shiftX, shiftY, shiftZ))
    if verbose:
            print('BestAlignment: z1=%3.1f, z2=%3.1f, x=%3.1f; Dx=%2.2f, Dy=%2.2f, Dz=%2.2f, CC=%2.3f' % \
                  (bestPeak.getRotation()[0], bestPeak.getRotation()[1], bestPeak.getRotation()[2],
                   bestPeak.getShift()[0], bestPeak.getShift()[1], bestPeak.getShift()[2], bestPeak.getScoreValue()))
    rotations.reset()
    scoreObject._peakPrior.reset_weight()
    return bestPeak
示例#8
0
def writeAlignedProjections(TiltSeries_,
                            weighting=None,
                            lowpassFilter=None,
                            binning=None,
                            verbose=False,
                            write_images=True):
    """write weighted and aligned projections to disk1

       @param TiltSeries_: Tilt Series
       @type TiltSeries_: reconstruction.TiltSeries
       @param weighting: weighting (<0: analytical weighting, >1 exact weighting (value corresponds to object diameter in pixel AFTER binning)
       @type weighting: float
       @param lowpassFilter: lowpass filter (in Nyquist)
       @type lowpassFilter: float
       @param binning: binning (default: 1 = no binning). binning=2: 2x2 pixels -> 1 pixel, binning=3: 3x3 pixels -> 1 pixel, etc.

       @author: FF
    """
    import numpy
    from pytom_numpy import vol2npy
    from pytom.basic.files import read_em, write_em
    from pytom.basic.functions import taper_edges
    from pytom.basic.transformations import general_transform2d
    from pytom.basic.fourier import ifft, fft
    from pytom.basic.filter import filter as filterFunction, bandpassFilter
    from pytom.basic.filter import circleFilter, rampFilter, exactFilter, fourierFilterShift, rotateFilter
    from pytom_volume import complexRealMult, vol
    import pytom_freqweight
    from pytom.basic.transformations import resize
    from pytom.gui.guiFunctions import fmtAR, headerAlignmentResults, datatypeAR
    import os

    if binning:
        imdim = int(float(TiltSeries_._imdim) / float(binning) + .5)
    else:
        imdim = TiltSeries_._imdim
    print('imdim', imdim)

    sliceWidth = imdim

    # pre-determine analytical weighting function and lowpass for speedup
    if (weighting != None) and (weighting < -0.001):
        w_func = fourierFilterShift(rampFilter(imdim, imdim))
    print('start weighting')
    # design lowpass filter
    if lowpassFilter:
        if lowpassFilter > 1.:
            lowpassFilter = 1.
            print("Warning: lowpassFilter > 1 - set to 1 (=Nyquist)")
        # weighting filter: arguments: (angle, cutoff radius, dimx, dimy,
        lpf = pytom_freqweight.weight(0.0, lowpassFilter * imdim / 2, imdim,
                                      imdim // 2 + 1, 1,
                                      lowpassFilter / 5. * imdim)
        #lpf = bandpassFilter(volume=vol(imdim, imdim,1),lowestFrequency=0,highestFrequency=int(lowpassFilter*imdim/2),
        #                     bpf=None,smooth=lowpassFilter/5.*imdim,fourierOnly=False)[1]

    tilt_angles = []

    for projection in TiltSeries_._ProjectionList:
        tilt_angles.append(projection._tiltAngle)
    tilt_angles = sorted(tilt_angles)
    print(tilt_angles)
    #q = numpy.matrix(abs(numpy.arange(-imdim//2, imdim//2)))

    alignmentResults = numpy.zeros((len(TiltSeries_._ProjectionList)),
                                   dtype=datatypeAR)
    alignmentResults['TiltAngle'] = tilt_angles

    for (ii, projection) in enumerate(TiltSeries_._ProjectionList):

        alignmentResults['FileName'][ii] = os.path.join(
            os.getcwd(), projection._filename)
        transX = -projection._alignmentTransX / binning
        transY = -projection._alignmentTransY / binning
        rot = -(projection._alignmentRotation + 90.)
        mag = projection._alignmentMagnification

        alignmentResults['AlignmentTransX'][ii] = transX
        alignmentResults['AlignmentTransY'][ii] = transY
        alignmentResults['InPlaneRotation'][ii] = rot
        alignmentResults['Magnification'][ii] = mag

        if write_images:
            if projection._filename.split('.')[-1] == 'st':
                from pytom.basic.files import EMHeader, read
                header = EMHeader()
                header.set_dim(x=imdim, y=imdim, z=1)
                idx = projection._index
                if verbose:
                    print("reading in projection %d" % idx)
                image = read(file=projection._filename,
                             subregion=[
                                 0, 0, idx - 1, TiltSeries_._imdim,
                                 TiltSeries_._imdim, 1
                             ],
                             sampling=[0, 0, 0],
                             binning=[0, 0, 0])
                if not (binning == 1) or (binning == None):
                    image = resize(volume=image, factor=1 / float(binning))[0]
            else:
                # read projection files
                from pytom.basic.files import EMHeader, read, read_em_header
                print(projection._filename)
                image = read(projection._filename)
                image = resize(volume=image, factor=1 / float(binning))[0]

                if projection._filename[-3:] == '.em':
                    header = read_em_header(projection._filename)
                else:
                    header = EMHeader()
                    header.set_dim(x=imdim, y=imdim, z=1)

            if lowpassFilter:
                filtered = filterFunction(volume=image,
                                          filterObject=lpf,
                                          fourierOnly=False)
                image = filtered[0]
            tiltAngle = projection._tiltAngle
            header.set_tiltangle(tiltAngle)
            # normalize to contrast - subtract mean and norm to mean
            immean = vol2npy(image).mean()
            image = (image - immean) / immean
            # smoothen borders to prevent high contrast oscillations
            image = taper_edges(image, imdim // 30)[0]
            # transform projection according to tilt alignment

            if projection._filename.split('.')[-1] == 'st':
                newFilename = (TiltSeries_._alignedTiltSeriesName + "_" +
                               str(projection.getIndex()) + '.em')
            else:
                TiltSeries_._tiltSeriesFormat = 'mrc'
                newFilename = (TiltSeries_._alignedTiltSeriesName + "_" +
                               str(projection.getIndex()) + '.' +
                               TiltSeries_._tiltSeriesFormat)
            if verbose:
                tline = ("%30s" % newFilename)
                tline = tline + (" (tiltAngle=%6.2f)" % tiltAngle)
                tline = tline + (": transX=%6.1f" % transX)
                tline = tline + (", transY=%6.1f" % transY)
                tline = tline + (", rot=%6.2f" % rot)
                tline = tline + (", mag=%5.4f" % mag)
                print(tline)

            image = general_transform2d(v=image,
                                        rot=rot,
                                        shift=[transX, transY],
                                        scale=mag,
                                        order=[2, 1, 0],
                                        crop=True)

            # smoothen once more to avoid edges
            image = taper_edges(image, imdim // 30)[0]

            # analytical weighting
            if (weighting != None) and (weighting < 0):
                image = (ifft(complexRealMult(fft(image), w_func)) /
                         (image.sizeX() * image.sizeY() * image.sizeZ()))

            elif (weighting != None) and (weighting > 0):
                if abs(weighting - 2) < 0.001:
                    w_func = fourierFilterShift(
                        rotateFilter(tilt_angles, tiltAngle, imdim, imdim,
                                     sliceWidth))
                else:
                    w_func = fourierFilterShift(
                        exactFilter(tilt_angles, tiltAngle, imdim, imdim,
                                    sliceWidth))

                image = (ifft(complexRealMult(fft(image), w_func)) /
                         (image.sizeX() * image.sizeY() * image.sizeZ()))
            header.set_tiltangle(tilt_angles[ii])

            if newFilename.endswith('.mrc'):
                data = copy.deepcopy(vol2npy(image))
                mrcfile.new(newFilename,
                            data.T.astype(float32),
                            overwrite=True)
            else:
                write_em(filename=newFilename, data=image, header=header)

            if verbose:
                tline = ("%30s written ..." % newFilename)

    outname = os.path.join(os.path.dirname(TiltSeries_._alignedTiltSeriesName),
                           'alignmentResults.txt')
    numpy.savetxt(outname,
                  alignmentResults,
                  fmt=fmtAR,
                  header=headerAlignmentResults)
    print('Alignment successful. See {} for results.'.format(outname))
示例#9
0
    def start(self, job, verbose=False):
        if self.mpi_id == 0:
            from pytom.basic.structures import ParticleList, Reference
            from pytom.basic.resolution import bandToAngstrom
            from pytom.basic.filter import lowpassFilter
            from math import ceil

            # randomly split the particle list into 2 half sets
            if len(job.particleList.splitByClass()) != 2:
                import numpy as np
                n = len(job.particleList)
                labels = np.random.randint(2, size=(n, ))
                print(self.node_name + ': Number of 1st half set:',
                      n - np.sum(labels), 'Number of 2nd half set:',
                      np.sum(labels))
                for i in range(n):
                    p = job.particleList[i]
                    p.setClass(labels[i])

            self.destination = job.destination
            new_reference = job.reference
            old_freq = job.freq
            new_freq = job.freq
            # main node
            for i in range(job.max_iter):
                if verbose:
                    print(self.node_name + ': starting iteration %d ...' % i)

                # construct a new job by updating the reference and the frequency
                new_job = FRMJob(job.particleList, new_reference, job.mask,
                                 job.peak_offset, job.sampleInformation,
                                 job.bw_range, new_freq, job.destination,
                                 job.max_iter - i, job.r_score, job.weighting)

                # distribute it
                self.distribute_job(new_job, verbose)

                # get the result back
                all_even_pre = None  # the 1st set
                all_even_wedge = None
                all_odd_pre = None  # the 2nd set
                all_odd_wedge = None
                pl = ParticleList()
                for j in range(self.num_workers):
                    result = self.get_result()
                    pl += result.pl
                    pre, wedge = self.retrieve_res_vols(result.name)

                    if self.assignment[result.worker_id] == 0:
                        if all_even_pre:
                            all_even_pre += pre
                            all_even_wedge += wedge
                        else:
                            all_even_pre = pre
                            all_even_wedge = wedge
                    else:
                        if all_odd_pre:
                            all_odd_pre += pre
                            all_odd_wedge += wedge
                        else:
                            all_odd_pre = pre
                            all_odd_wedge = wedge

                # write the new particle list to the disk
                pl.toXMLFile('aligned_pl_iter' + str(i) + '.xml')

                # create the averages separately
                if verbose:
                    print(self.node_name + ': determining the resolution ...')
                even = self.create_average(all_even_pre, all_even_wedge)
                odd = self.create_average(all_odd_pre, all_odd_wedge)

                # apply symmetries if any
                even = job.symmetries.applyToParticle(even)
                odd = job.symmetries.applyToParticle(odd)

                # determine the transformation between even and odd
                # here we assume the wedge from both sets are fully sampled
                from sh_alignment.frm import frm_align
                pos, angle, score = frm_align(odd, None, even, None,
                                              job.bw_range, new_freq,
                                              job.peak_offset)
                print(self.node_name +
                      'Transform of even set to match the odd set - shift: ' +
                      str(pos) + ' rotation: ' + str(angle))

                # transform the odd set accordingly
                from pytom_volume import vol, transformSpline
                from pytom.basic.fourier import ftshift
                from pytom_volume import reducedToFull
                from pytom_freqweight import weight
                transformed_odd_pre = vol(odd.sizeX(), odd.sizeY(),
                                          odd.sizeZ())
                full_all_odd_wedge = reducedToFull(all_odd_wedge)
                ftshift(full_all_odd_wedge)
                odd_weight = weight(
                    full_all_odd_wedge)  # the funny part of pytom
                transformed_odd = vol(odd.sizeX(), odd.sizeY(), odd.sizeZ())

                transformSpline(all_odd_pre, transformed_odd_pre, -angle[1],
                                -angle[0], -angle[2],
                                odd.sizeX() / 2,
                                odd.sizeY() / 2,
                                odd.sizeZ() / 2, -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)
                odd_weight.rotate(-angle[1], -angle[0], -angle[2])
                transformed_odd_wedge = odd_weight.getWeightVolume(True)
                transformSpline(odd, transformed_odd, -angle[1], -angle[0],
                                -angle[2],
                                odd.sizeX() / 2,
                                odd.sizeY() / 2,
                                odd.sizeZ() / 2, -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)

                all_odd_pre = transformed_odd_pre
                all_odd_wedge = transformed_odd_wedge
                odd = transformed_odd

                # determine resolution
                resNyquist, resolutionBand, numberBands = self.determine_resolution(
                    even, odd, job.fsc_criterion, None, job.mask, verbose)

                # write the half set to the disk
                even.write(
                    os.path.join(self.destination,
                                 'fsc_' + str(i) + '_even.em'))
                odd.write(
                    os.path.join(self.destination,
                                 'fsc_' + str(i) + '_odd.em'))

                current_resolution = bandToAngstrom(
                    resolutionBand, job.sampleInformation.getPixelSize(),
                    numberBands, 1)
                if verbose:
                    print(
                        self.node_name + ': current resolution ' +
                        str(current_resolution), resNyquist)

                # create new average
                all_even_pre += all_odd_pre
                all_even_wedge += all_odd_wedge
                average = self.create_average(all_even_pre, all_even_wedge)

                # apply symmetries
                average = job.symmetries.applyToParticle(average)

                # filter average to resolution
                average_name = os.path.join(self.destination,
                                            'average_iter' + str(i) + '.em')
                average.write(average_name)

                # update the references
                new_reference = [
                    Reference(
                        os.path.join(self.destination,
                                     'fsc_' + str(i) + '_even.em')),
                    Reference(
                        os.path.join(self.destination,
                                     'fsc_' + str(i) + '_odd.em'))
                ]

                # low pass filter the reference and write it to the disk
                filtered = lowpassFilter(average, ceil(resolutionBand),
                                         ceil(resolutionBand) / 10)
                filtered_ref_name = os.path.join(
                    self.destination, 'average_iter' + str(i) + '_res' +
                    str(current_resolution) + '.em')
                filtered[0].write(filtered_ref_name)

                # if the position/orientation is not improved, break it

                # change the frequency to a higher value
                new_freq = int(ceil(resolutionBand)) + 1
                if new_freq <= old_freq:
                    if job.adaptive_res is not False:  # two different strategies
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!'
                            % job.adaptive_res)
                        new_freq = int((1 + job.adaptive_res) * old_freq)
                    else:  # always increase by 1
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Increase the frequency to be aligned by 1!'
                        )
                        new_freq = old_freq + 1
                        old_freq = new_freq
                else:
                    old_freq = new_freq
                if new_freq >= numberBands:
                    print(self.node_name +
                          ': New frequency too high. Terminate!')
                    break

                if verbose:
                    print(self.node_name + ': change the frequency to ' +
                          str(new_freq))

            # send end signal to other nodes and terminate itself
            self.end(verbose)
        else:
            # other nodes
            self.run(verbose)
示例#10
0
    from pytom.basic.files import write_em
    from pytom_numpy import vol2npy, npy2vol
    from pytom_volume import pasteCenter, vol
    import numpy as np

    num_angles, size = map(int, sys.argv[1:3])
    size2 = int(sys.argv[3])
    csize = int(sys.argv[4])

    try:
        start, end = map(int, sys.argv[5:7])
    except:
        start, end = 0, csize

    wedgeAngle = 30
    wedgeFilter = weight(wedgeAngle, 0, end - start, size, size)
    wedgeVolume = wedgeFilter.getWeightVolume(True)

    filterVolume = pytom_volume.reducedToFull(wedgeVolume)
    wedgeV = vol2npy(filterVolume).copy()

    from pytom.tompy.io import read
    import mrcfile

    # NDARRAYS
    voluNDA = mrcfile.open('tomo.mrc', permissive=True).data.copy()
    tempNDA = read('template.em')
    maskNDA = read("mask.em")

    sox, soy, soz = tempNDA.shape
    spx, spy, spz = voluNDA.shape
示例#11
0
def weightedXCF(volume, reference, numberOfBands, wedgeAngle=-1, gpu=False):
    """
    weightedXCF: Determines the weighted correlation function for volume and reference
    @param volume: A volume 
    @param reference: A reference 
    @param numberOfBands:Number of bands
    @param wedgeAngle: A optional wedge angle
    @return: The weighted correlation function 
    @rtype: L{pytom_volume.vol} 
    @author: Thomas Hrabe 
    @todo: does not work yet -> test is disabled
    """

    if gpu:
        import cupy as xp
    else:
        import numpy as xp

    from pytom.tompy.correlation import bandCF
    from pytom.tompy.transforms import fourier_reduced2full
    from math import sqrt
    import pytom_freqweight

    result = xp.zeros_like(volume)

    q = 0

    if wedgeAngle >= 0:
        wedgeFilter = pytom_freqweight.weight(wedgeAngle, 0, volume.sizeX(),
                                              volume.sizeY(), volume.sizeZ())
        wedgeVolume = wedgeFilter.getWeightVolume(True)
    else:
        wedgeVolume = xp.ones_like(volume)

    w = xp.sqrt(1 / float(volume.size))

    numberVoxels = 0

    for i in range(numberOfBands):
        """
        notation according Steward/Grigorieff paper
        """
        band = [0, 0]
        band[0] = i * volume.shape[0] / numberOfBands
        band[1] = (i + 1) * volume.shape[0] / numberOfBands

        r = bandCF(volume, reference, band, gpu=gpu)

        cc = r[0]

        filter = r[1]
        #get bandVolume
        bandVolume = filter.getWeightVolume(True)

        filterVolumeReduced = bandVolume * wedgeVolume
        filterVolume = fourier_reduced2full(filterVolumeReduced)
        #determine number of voxels != 0
        N = filterVolume[abs(filterVolume) < 1].sum()

        #add to number of total voxels
        numberVoxels = numberVoxels + N

        cc2 = r[0].copy()

        cc2 = cc2**2

        cc = shiftscale(cc, w, 1)
        ccdiv = cc2 / (cc)

        ccdiv = ccdiv**3

        #abs(ccdiv); as suggested by grigorief
        ccdiv = shiftscale(ccdiv, 0, N)

        result = result + ccdiv

    result = shiftscale(result, 0, 1 / float(numberVoxels))

    return result
示例#12
0
    def apply(self, volume, bypassFlag=False, downscale=1, particle=None):
        """
        apply: Performs preprocessing of volume and reference
        @param volume: volume to be pre-processed
        @type volume: L{pytom_volume.vol}
        @param bypassFlag: Set if only bandpassFilter needed. False otherwise and all routines will be processed.
        @param downscale: not used anymore
        @param particle: particle Volume to be subtracted from input volume
        @type particle: L{pytom_volume.vol}
        @return: Returns modified volume
        @author: Thomas Hrabe  
        """

        from pytom_volume import vol

        if self._bandpassOn:

            from pytom.basic.filter import bandpassFilter

            # if frequencies specified in Nyquist, 0.5 being highest
            # fixed wrong adjustment of frequencies upon binning - FF
            if self._highestFrequency < 1:
                highestFrequency = self._highestFrequency * volume.sizeX()
                lowestFrequency = self._lowestFrequency * volume.sizeX()
            else:
                highestFrequency = self._highestFrequency
                lowestFrequency = self._lowestFrequency

            v = bandpassFilter(volume=volume,
                               lowestFrequency=lowestFrequency,
                               highestFrequency=highestFrequency,
                               bpf=0,
                               smooth=self._bandpassSmooth)
            volume = v[0]

        if self._prerotateOn and (not bypassFlag):

            from pytom_volume import rotate

            rot = vol(volume.sizeX(), volume.sizeY(), volume.sizeZ())
            rotation = self.prerotate
            rotate(volume, rot, rotation[0], rotation[1], rotation[2])
            volume = rot

        if self._weightingOn and (not bypassFlag):

            from pytom_volume import read
            from pytom_freqweight import weight
            from pytom.basic.fourier import fft, ifft

            wedgeSum = read(self._weightingFile)

            fVolume = fft(volume)
            weighting = weight(wedgeSum)
            weighting.apply(fVolume)
            volume = ifft(fVolume)

        if self._substractParticle and particle.__class__ == vol:
            volume -= particle

        if self._taper > 0:
            from pytom.tools.macros import volumesSameSize
            if self._taperMask is None or not volumesSameSize(
                    volume, self._taperMask):
                from pytom.basic.functions import taper_edges
                volume, self._taperMask = taper_edges(volume, self._taper)
            else:
                volume = volume * self._taperMask

        return volume
示例#13
0
def toProjectionStackFromAlignmentResultsFile(alignmentResultsFile,
                                              weighting=None,
                                              lowpassFilter=0.9,
                                              binning=1,
                                              circleFilter=False,
                                              num_procs=1,
                                              outdir='',
                                              prefix='sorted_aligned'):
    """read image and create aligned projection stack, based on the results described in the alignmentResultFile.

       @param alignmentResultsFile: result file generate by the alignment script.
       @type datatypeAR: gui.guiFunction.datatypeAR
       @param weighting: weighting (<0: analytical weighting, >1: exact weighting, 0/None: no weighting )
       @type weighting: float
       @param lowpassFilter: lowpass filter (in Nyquist)
       @type lowpassFilter: float
       @param binning: binning (default: 1 = no binning). binning=2: 2x2 pixels -> 1 pixel, binning=3: 3x3 pixels -> 1 pixel, etc.

       @author: GvdS
    """
    print('weighting: ', weighting)
    import numpy
    from pytom_numpy import vol2npy
    from pytom.basic.files import read_em, write_em
    from pytom.basic.functions import taper_edges
    from pytom.basic.transformations import general_transform2d
    from pytom.basic.fourier import ifft, fft
    from pytom.basic.filter import filter as filterFunction, bandpassFilter
    from pytom.basic.filter import circleFilter, rampFilter, exactFilter, fourierFilterShift, \
        fourierFilterShift_ReducedComplex
    from pytom_volume import complexRealMult, vol, paste
    import pytom_freqweight
    from pytom.basic.transformations import resize, rotate
    from pytom.gui.guiFunctions import fmtAR, headerAlignmentResults, datatype, datatypeAR, loadstar
    from pytom.reconstruction.reconstructionStructures import Projection, ProjectionList
    from pytom_numpy import vol2npy
    import mrcfile
    from pytom.tompy.io import write, read_size
    import os

    print("Create aligned images from alignResults.txt")

    alignmentResults = loadstar(alignmentResultsFile, dtype=datatypeAR)
    imageList = alignmentResults['FileName']
    tilt_angles = alignmentResults['TiltAngle']

    imdim = int(read_size(imageList[0], 'x'))

    if binning > 1:
        imdim = int(float(imdim) / float(binning) + .5)
    else:
        imdim = imdim

    sliceWidth = imdim

    # pre-determine analytical weighting function and lowpass for speedup
    if (weighting != None) and (float(weighting) < -0.001):
        weightSlice = fourierFilterShift(rampFilter(imdim, imdim))

    if circleFilter:
        circleFilterRadius = imdim // 2
        circleSlice = fourierFilterShift_ReducedComplex(
            circleFilter(imdim, imdim, circleFilterRadius))
    else:
        circleSlice = vol(imdim, imdim // 2 + 1, 1)
        circleSlice.setAll(1.0)

    # design lowpass filter
    if lowpassFilter:
        if lowpassFilter > 1.:
            lowpassFilter = 1.
            print("Warning: lowpassFilter > 1 - set to 1 (=Nyquist)")
        # weighting filter: arguments: (angle, cutoff radius, dimx, dimy,
        lpf = pytom_freqweight.weight(0.0, lowpassFilter * imdim // 2, imdim,
                                      imdim // 2 + 1, 1,
                                      lowpassFilter / 5. * imdim)
        # lpf = bandpassFilter(volume=vol(imdim, imdim,1),lowestFrequency=0,highestFrequency=int(lowpassFilter*imdim/2),
        #                     bpf=None,smooth=lowpassFilter/5.*imdim,fourierOnly=False)[1]

    projectionList = ProjectionList()
    imageList = []
    tilt_angles = []
    for n, image in enumerate(alignmentResults['FileName']):
        atx = alignmentResults['AlignmentTransX'][n]
        aty = alignmentResults['AlignmentTransY'][n]
        rot = alignmentResults['InPlaneRotation'][n]
        mag = alignmentResults['Magnification'][n]
        # print(image, alignmentResults['TiltAngle'][n])
        # if abs(alignmentResults['TiltAngle'][n]) > 20:
        #     continue
        tilt_angles.append(alignmentResults['TiltAngle'][n])
        imageList.append(image)
        projection = Projection(imageList[-1],
                                tiltAngle=tilt_angles[-1],
                                alignmentTransX=atx,
                                alignmentTransY=aty,
                                alignmentRotation=rot,
                                alignmentMagnification=mag)
        projectionList.append(projection)

    stack = vol(imdim, imdim, len(imageList))
    stack.setAll(0.0)

    phiStack = vol(1, 1, len(imageList))
    phiStack.setAll(0.0)

    thetaStack = vol(1, 1, len(imageList))
    thetaStack.setAll(0.0)

    offsetStack = vol(1, 2, len(imageList))
    offsetStack.setAll(0.0)

    for (ii, projection) in enumerate(projectionList):
        if projection._filename.split('.')[-1] == 'st':
            from pytom.basic.files import EMHeader, read
            idx = projection._index
            image = read(file=projection._filename,
                         subregion=[0, 0, idx - 1, imdim, imdim, 1],
                         sampling=[0, 0, 0],
                         binning=[0, 0, 0])
            if not (binning == 1) or (binning == None):
                image = resize(volume=image, factor=1 / float(binning))[0]
        else:
            # read projection files
            from pytom.basic.files import EMHeader, read, read_em_header
            image = read(str(projection._filename))
            # image = rotate(image,180.,0.,0.)
            image = resize(volume=image, factor=1 / float(binning))[0]

        if lowpassFilter:
            filtered = filterFunction(volume=image,
                                      filterObject=lpf,
                                      fourierOnly=False)
            image = filtered[0]

        tiltAngle = projection._tiltAngle

        # normalize to contrast - subtract mean and norm to mean
        immean = vol2npy(image).mean()
        image = (image - immean) / immean

        print(ii, immean, projection._filename)

        # smoothen borders to prevent high contrast oscillations
        image = taper_edges(image, imdim // 30)[0]

        # transform projection according to tilt alignment
        transX = projection._alignmentTransX / binning
        transY = projection._alignmentTransY / binning
        rot = float(projection._alignmentRotation)
        mag = float(projection._alignmentMagnification)

        image = general_transform2d(v=image,
                                    rot=rot,
                                    shift=[transX, transY],
                                    scale=mag,
                                    order=[2, 1, 0],
                                    crop=True)

        # smoothen once more to avoid edges
        image = taper_edges(image, imdim // 30)[0]

        # analytical weighting
        if (weighting != None) and (weighting < 0):
            # image = (ifft(complexRealMult(fft(image), w_func)) / (image.sizeX() * image.sizeY() * image.sizeZ()))
            image = ifft(complexRealMult(
                complexRealMult(fft(image), weightSlice), circleSlice),
                         scaling=True)

        elif (weighting != None) and (weighting > 0):
            weightSlice = fourierFilterShift(
                exactFilter(tilt_angles, tiltAngle, imdim, imdim, sliceWidth))
            # image = (ifft(complexRealMult(fft(image), w_func)) / (image.sizeX() * image.sizeY() * image.sizeZ()))
            image = ifft(complexRealMult(
                complexRealMult(fft(image), weightSlice), circleSlice),
                         scaling=True)

        thetaStack(int(round(projection.getTiltAngle())), 0, 0, ii)
        offsetStack(int(round(projection.getOffsetX())), 0, 0, ii)
        offsetStack(int(round(projection.getOffsetY())), 0, 1, ii)
        paste(image, stack, 0, 0, ii)
        fname = '{}_{:02d}.mrc'.format(
            prefix, int(imageList[ii].split('_')[-1].split('.')[0]))
        if outdir:
            import mrcfile
            # write_em(os.path.join(outdir, fname.replace('mrc', 'em')), image)
            write(os.path.join(outdir, fname),
                  vol2npy(image).copy().astype('float32'))
            print('written file: ', fname)

    return [stack, phiStack, thetaStack, offsetStack]
示例#14
0
def weightedXCF(volume,reference,numberOfBands,wedgeAngle=-1):
    """
    weightedXCF: Determines the weighted correlation function for volume and reference
    @param volume: A volume 
    @param reference: A reference 
    @param numberOfBands:Number of bands
    @param wedgeAngle: A optional wedge angle
    @return: The weighted correlation function 
    @rtype: L{pytom_volume.vol} 
    @author: Thomas Hrabe 
    @todo: does not work yet -> test is disabled
    """
    from pytom.basic.correlation import bandCF
    import pytom_volume
    from math import sqrt
    import pytom_freqweight
    
    result = pytom_volume.vol(volume.sizeX(),volume.sizeY(),volume.sizeZ())
    result.setAll(0)
    cc2 = pytom_volume.vol(volume.sizeX(),volume.sizeY(),volume.sizeZ())
    cc2.setAll(0)
    q = 0
    
    if wedgeAngle >=0:
        wedgeFilter = pytom_freqweight.weight(wedgeAngle,0,volume.sizeX(),volume.sizeY(),volume.sizeZ())
        wedgeVolume = wedgeFilter.getWeightVolume(True)
    else:
        wedgeVolume = pytom_volume.vol(volume.sizeX(), volume.sizeY(), int(volume.sizeZ()/2+1))
        wedgeVolume.setAll(1.0)
        
    w = sqrt(1/float(volume.sizeX()*volume.sizeY()*volume.sizeZ()))
    
    numberVoxels = 0
    
    for i in range(numberOfBands):
        """
        notation according Steward/Grigorieff paper
        """
        band = [0,0]
        band[0] = i*volume.sizeX()/numberOfBands 
        band[1] = (i+1)*volume.sizeX()/numberOfBands
        
        r = bandCF(volume,reference,band)
        
        cc = r[0]
                
        filter = r[1]
        #get bandVolume
        bandVolume = filter.getWeightVolume(True)
            
        filterVolumeReduced = bandVolume * wedgeVolume
        filterVolume = pytom_volume.reducedToFull(filterVolumeReduced)
        #determine number of voxels != 0    
        N = pytom_volume.numberSetVoxels(filterVolume)
            
        #add to number of total voxels
        numberVoxels = numberVoxels + N
                 
        cc2.copyVolume(r[0])
                
        pytom_volume.power(cc2,2)
        
        cc.shiftscale(w,1)
        ccdiv = cc2/(cc)
                
        pytom_volume.power(ccdiv,3)
        
        #abs(ccdiv); as suggested by grigorief
        ccdiv.shiftscale(0,N)
        
        result = result + ccdiv
    
    result.shiftscale(0,1/float(numberVoxels))
    
    return result
示例#15
0
    def start(self, job, verbose=False):
        if self.mpi_id == 0:
            from pytom.basic.structures import ParticleList, Reference
            from pytom.basic.resolution import bandToAngstrom
            from pytom.basic.filter import lowpassFilter
            from math import ceil
            from pytom.basic.fourier import convolute
            from pytom_volume import vol, power, read

            # randomly split the particle list into 2 half sets
            import numpy as np
            num_pairs = len(job.particleList.pairs)
            for i in range(num_pairs):
                # randomize the class labels to indicate the two half sets
                pl = job.particleList.pairs[i].get_phase_flip_pl()
                n = len(pl)
                labels = np.random.randint(2, size=(n, ))
                print(self.node_name + ': Number of 1st half set:',
                      n - np.sum(labels), 'Number of 2nd half set:',
                      np.sum(labels))
                for j in range(n):
                    p = pl[j]
                    p.setClass(labels[j])

            new_reference = job.reference
            old_freq = job.freq
            new_freq = job.freq
            # main node
            for i in range(job.max_iter):
                if verbose:
                    print(self.node_name + ': starting iteration %d ...' % i)

                # construct a new job by updating the reference and the frequency
                # here the job.particleList is actually ParticleListSet
                new_job = MultiDefocusJob(job.particleList, new_reference,
                                          job.mask, job.peak_offset,
                                          job.sampleInformation, job.bw_range,
                                          new_freq, job.destination,
                                          job.max_iter - i, job.r_score,
                                          job.weighting, job.bfactor)

                # distribute it
                num_all_particles = self.distribute_job(new_job, verbose)

                # calculate the denominator
                sum_ctf_squared = None
                for pair in job.particleList.pairs:
                    if sum_ctf_squared is None:
                        sum_ctf_squared = pair.get_ctf_sqr_vol() * pair.snr
                    else:
                        sum_ctf_squared += pair.get_ctf_sqr_vol() * pair.snr

                # get the result back
                all_even_pre = None
                all_even_wedge = None
                all_odd_pre = None
                all_odd_wedge = None
                pls = []
                for j in range(len(job.particleList.pairs)):
                    pls.append(ParticleList())

                for j in range(self.num_workers):
                    result = self.get_result()

                    pair_id = self.assignment[result.worker_id]
                    pair = job.particleList.pairs[pair_id]

                    pl = pls[pair_id]
                    pl += result.pl
                    even_pre, even_wedge, odd_pre, odd_wedge = self.retrieve_res_vols(
                        result.name)

                    if all_even_pre:
                        all_even_pre += even_pre * pair.snr
                        all_even_wedge += even_wedge
                        all_odd_pre += odd_pre * pair.snr
                        all_odd_wedge += odd_wedge
                    else:
                        all_even_pre = even_pre * pair.snr
                        all_even_wedge = even_wedge
                        all_odd_pre = odd_pre * pair.snr
                        all_odd_wedge = odd_wedge

                # write the new particle list to the disk
                for j in range(len(job.particleList.pairs)):
                    pls[j].toXMLFile('aligned_pl' + str(j) + '_iter' + str(i) +
                                     '.xml')

                # correct for the number of particles in wiener filter
                sum_ctf_squared = sum_ctf_squared / num_all_particles
                #                all_even_pre = all_even_pre/(num_all_particles/2)
                #                all_odd_pre = all_odd_pre/(num_all_particles/2)

                # bfactor
                if job.bfactor and job.bfactor != 'None':
                    #                    bfactor_kernel = create_bfactor_vol(sum_ctf_squared.sizeX(), job.sampleInformation.getPixelSize(), job.bfactor)
                    bfactor_kernel = read(job.bfactor)
                    bfactor_kernel_sqr = vol(bfactor_kernel)
                    power(bfactor_kernel_sqr, 2)
                    all_even_pre = convolute(all_even_pre, bfactor_kernel,
                                             True)
                    all_odd_pre = convolute(all_odd_pre, bfactor_kernel, True)
                    sum_ctf_squared = sum_ctf_squared * bfactor_kernel_sqr

                # create averages of two sets
                if verbose:
                    print(self.node_name + ': determining the resolution ...')
                even = self.create_average(
                    all_even_pre, sum_ctf_squared, all_even_wedge
                )  # assume that the CTF sum is the same for the even and odd
                odd = self.create_average(all_odd_pre, sum_ctf_squared,
                                          all_odd_wedge)

                # determine the transformation between even and odd
                # here we assume the wedge from both sets are fully sampled
                from sh_alignment.frm import frm_align
                pos, angle, score = frm_align(odd, None, even, None,
                                              job.bw_range, new_freq,
                                              job.peak_offset)
                print(
                    self.node_name +
                    ': transform of even set to match the odd set - shift: ' +
                    str(pos) + ' rotation: ' + str(angle))

                # transform the odd set accordingly
                from pytom_volume import vol, transformSpline
                from pytom.basic.fourier import ftshift
                from pytom_volume import reducedToFull
                from pytom_freqweight import weight
                transformed_odd_pre = vol(odd.sizeX(), odd.sizeY(),
                                          odd.sizeZ())
                full_all_odd_wedge = reducedToFull(all_odd_wedge)
                ftshift(full_all_odd_wedge)
                odd_weight = weight(
                    full_all_odd_wedge)  # the funny part of pytom
                transformed_odd = vol(odd.sizeX(), odd.sizeY(), odd.sizeZ())

                transformSpline(all_odd_pre, transformed_odd_pre, -angle[1],
                                -angle[0], -angle[2], int(odd.sizeX() / 2),
                                int(odd.sizeY() / 2), int(odd.sizeZ() / 2),
                                -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)
                odd_weight.rotate(-angle[1], -angle[0], -angle[2])
                transformed_odd_wedge = odd_weight.getWeightVolume(True)
                transformSpline(odd, transformed_odd, -angle[1], -angle[0],
                                -angle[2], int(odd.sizeX() / 2),
                                int(odd.sizeY() / 2), int(odd.sizeZ() / 2),
                                -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)

                all_odd_pre = transformed_odd_pre
                all_odd_wedge = transformed_odd_wedge
                odd = transformed_odd

                # apply symmetries before determine resolution
                # with gold standard you should be careful about applying the symmetry!
                even = job.symmetries.applyToParticle(even)
                odd = job.symmetries.applyToParticle(odd)
                resNyquist, resolutionBand, numberBands = self.determine_resolution(
                    even, odd, job.fsc_criterion, None, job.mask, verbose)

                # write the half set to the disk
                even.write('fsc_' + str(i) + '_even.em')
                odd.write('fsc_' + str(i) + '_odd.em')

                current_resolution = bandToAngstrom(
                    resolutionBand, job.sampleInformation.getPixelSize(),
                    numberBands, 1)
                if verbose:
                    print(
                        self.node_name + ': current resolution ' +
                        str(current_resolution), resNyquist)

                # create new average
                all_even_pre += all_odd_pre
                all_even_wedge += all_odd_wedge
                #                all_even_pre = all_even_pre/2 # correct for the number of particles in wiener filter
                average = self.create_average(all_even_pre, sum_ctf_squared,
                                              all_even_wedge)

                # apply symmetries
                average = job.symmetries.applyToParticle(average)

                # filter average to resolution and update the new reference
                average_name = 'average_iter' + str(i) + '.em'
                average.write(average_name)

                # update the references
                new_reference = [
                    Reference('fsc_' + str(i) + '_even.em'),
                    Reference('fsc_' + str(i) + '_odd.em')
                ]

                # low pass filter the reference and write it to the disk
                filtered = lowpassFilter(average, ceil(resolutionBand),
                                         ceil(resolutionBand) / 10)
                filtered_ref_name = 'average_iter' + str(i) + '_res' + str(
                    current_resolution) + '.em'
                filtered[0].write(filtered_ref_name)

                # change the frequency to a higher value
                new_freq = int(ceil(resolutionBand)) + 1
                if new_freq <= old_freq:
                    if job.adaptive_res is not False:  # two different strategies
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!'
                            % job.adaptive_res)
                        new_freq = int((1 + job.adaptive_res) * old_freq)
                    else:  # always increase by 1
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Increase the frequency to be aligned by 1!'
                        )
                        new_freq = old_freq + 1
                        old_freq = new_freq
                else:
                    old_freq = new_freq
                if new_freq >= numberBands:
                    print(self.node_name +
                          ': Determined frequency too high. Terminate!')
                    break

                if verbose:
                    print(self.node_name + ': change the frequency to ' +
                          str(new_freq))

            # send end signal to other nodes and terminate itself
            self.end(verbose)
        else:
            # other nodes
            self.run(verbose)