コード例 #1
0
def nXcf(volume,template,mask=None, stdV=None):
    """
    nXCF: returns the normalised cross correlation function. Autocorrelation 
    of two equal objects would yield a max nxcf peak of 1.

    @param volume: The search volume
    @param template: The template searched (this one will be used for conjugate complex multiplication)
    @type template: L{pytom_volume.vol}
    @param mask: template mask. If not given, a default sphere mask will be generated which has the same size with the given template.
    @type mask: L{pytom_volume.vol}
    @param stdV: Will be unused, only for compatibility reasons with FLCF
    @return: the calculated nXcf volume
    @rtype: L{pytom_volume.vol}
    @author: Thomas Hrabe
    @change: masking of template implemented
    """
    from pytom.basic.normalise import mean0std1

    if mask == None:
        result = xcf(mean0std1(volume,True),mean0std1(template,True), mask=None, stdV=None)
    else:
        from pytom.basic.normalise import normaliseUnderMask
        result = xcf(normaliseUnderMask(volume=volume, mask=mask, p=None)[0],
                     normaliseUnderMask(volume=template, mask=mask, p=None)[0],
                     mask=mask, stdV=None)
    #n = result.numelem()
    #result.shiftscale(0,1/float(n*n))

    return result
コード例 #2
0
ファイル: WienerFilterAlignment.py プロジェクト: xmzzaa/PyTom
    def create_average(self, sum_ctf_conv, sum_ctf_squared, wedge_weight):
        """For the master node, this function is rewritten.
        """
        from pytom_volume import vol, complexDiv, fullToReduced, initSphere, complexRealMult, limit
        from pytom.basic.fourier import fft, ifft, ftshift
        from pytom.basic.normalise import mean0std1
        
#        limit(wedge_weight, 0.1, 0, 0,0,True,False) # set all the values below the specified value to 0
        
        # for mask out the outside area
#        mask = vol(sum_ctf_conv)
#        mask.setAll(0)
#        initSphere(mask, sum_ctf_conv.sizeX()/2-1, 0,0, sum_ctf_conv.sizeX()/2, sum_ctf_conv.sizeX()/2, sum_ctf_conv.sizeX()/2)
#        mask = fullToReduced(ftshift(mask, inplace=False))
        
        # Wiener filter
        numerator = fft(sum_ctf_conv)
        sum_ctf_squared = fullToReduced(ftshift(sum_ctf_squared, inplace=False))
        denominator = (sum_ctf_squared+1)*wedge_weight
        r = complexDiv(numerator, denominator)
#        average = ifft(complexRealMult(r, mask))
        average = ifft(r)
        average.shiftscale(0.0,1/float(average.sizeX()*average.sizeY()*average.sizeZ()))
        
        # nomalize the average
        try:
            average = mean0std1(average, True)
        except:
            average *= 1000 # in case the average volume is too small to normalize
            average = mean0std1(average, True)
        
        return average
コード例 #3
0
    def sum_sub_pl(self, pl, name_prefix):
        """This is a sub-routine for average_sub_pl.
        """
        from pytom_volume import vol
        from pytom_volume import transformSpline as transform
        from pytom.basic.normalise import mean0std1

        result = None
        wedgeSum = None
        for p in pl:
            particle = p.getVolume()
            mean0std1(particle)
            wedgeInfo = p.getWedge()

            if result is None:
                sizeX = particle.sizeX()
                sizeY = particle.sizeY()
                sizeZ = particle.sizeZ()

                newParticle = vol(sizeX, sizeY, sizeZ)
                # make consistent for python3
                centerX = sizeX // 2
                centerY = sizeY // 2
                centerZ = sizeZ // 2

                result = vol(sizeX, sizeY, sizeZ)
                result.setAll(0)

                wedgeSum = wedgeInfo.returnWedgeVolume(sizeX, sizeY, sizeZ)
                wedgeSum.setAll(0)

            # create wedge weighting
            rotation = p.getRotation()

            wedge = wedgeInfo.returnWedgeVolume(sizeX, sizeY, sizeZ, False,
                                                rotation.invert())
            wedgeSum = wedgeSum + wedge

            # shift and rotate particle
            shift = p.getShift()
            newParticle.setAll(0)
            transform(particle, newParticle, -rotation[1], -rotation[0],
                      -rotation[2], centerX, centerY, centerZ, -shift[0],
                      -shift[1], -shift[2], 0, 0, 0)

            result = result + newParticle

        # write them back to disk
        result.write(name_prefix + '-PreWedge.em')
        wedgeSum.write(name_prefix + '-WedgeSumUnscaled.em')
コード例 #4
0
ファイル: imageStructures.py プロジェクト: mvanevic/PyTom
    def normalize(self, normtype="StdMeanInMask", mask=None, p=None):
        """
        normalize image

        @param normtype: normalization type
        @type normtype: str ("StdMeanInMask", "StdMean")
        @param mask: mask volume
        @type mask: L{ptom_volume.vol}
        @param p: sum of gray values in mask (if pre-computed)
        @type p: L{int} or L{float}
        """
        if normtype == "StdMeanInMask":
            from pytom.basic.normalise import normaliseUnderMask

            if mask == None:
                raise ValueError("StdMeanInMask normalization requires mask!")
            # spherical mask
            if (type(mask) == float) or (isinstance(mask, (int, long))):
                if mask <= 0:
                    raise ValueError("Value for mask radius must be > 0!")
                from pytom.basic.functions import initSphere

                mask = initSphere(sizeX=self.data.sizeX(),
                                  sizeY=self.data.sizeY(),
                                  sizeZ=self.data.sizeZ(),
                                  radius=mask,
                                  smooth=mask / 10.,
                                  maxradius=0,
                                  cent=None)
            # user-specified generic mask
            else:
                if type(mask) != vol:
                    raise TypeError("Mask must be pytom_volume.vol")
                if ((mask.sizeX() != self.data.sizeX())
                        or (mask.sizeY() != self.data.sizeY())
                        or (mask.sizeZ() != self.data.sizeZ())):
                    raise ValueError("Mask have same dimension as image")

            normvol, p = normaliseUnderMask(volume=self.data, mask=mask, p=p)
            self.data = normvol
            #return number of voxels in volume for speed-up
            return p

        if normtype == "StdMean":
            from pytom.basic.normalise import mean0std1
            mean0std1(self.data)
            # return p for consistency
            return None
コード例 #5
0
def nxcc(volume, template, mask=None, volumeIsNormalized=False):
    """
    nxcc: Calculates the normalized cross correlation coefficient in real space
    @param volume: A volume
    @type volume:  L{pytom_volume.vol}
    @param template: A template that is searched in volume. Must be of same size as volume.
    @type template:  L{pytom_volume.vol}
    @param mask: mask to constrain correlation
    @type mask: L{pytom_volume.vol}
    @param volumeIsNormalized: speed up if volume is already normalized
    @type volumeIsNormalized: L{bool}
    @return: A value between -1 and 1
    @raise exception: Raises a runtime error if volume and template have a different size.
    @author: Thomas Hrabe 
    @change: flag for pre-normalized volume, FF
    """

    from pytom_volume import vol,sum,limit
    from pytom.tools.macros import volumesSameSize
    
    if not volumesSameSize(volume,template):
        raise RuntimeError('Volume and template must have same size!')
    
    if not mask:
        from pytom.basic.normalise import mean0std1
        if not volumeIsNormalized:
           v = mean0std1(volume, True)
        t = mean0std1(template, True)
        p = volume.numelem()
        result = v*t
    else:
        from pytom_numpy import vol2npy
        from pytom.basic.normalise import normaliseUnderMask
        if not volumeIsNormalized:
            (v,p) = normaliseUnderMask(volume, mask)
            (t,p) = normaliseUnderMask(template, mask, p)
            t = t * mask # multiply with the mask
            result = v * t
        else:
            (t,p) = normaliseUnderMask(template,mask)
            t = t * mask # multiply with the mask
            result = volume * t
    
    ncc = sum(result)
    ncc = ncc / float(p)

    return ncc 
コード例 #6
0
ファイル: pytom_NormTest.py プロジェクト: xmzzaa/PyTom
    def mean0std1_Test(self):

        import pytom_volume
        from pytom.basic.normalise import mean0std1
        v = pytom_volume.read('./testData/ribo.em')

        m = mean0std1(v, True)

        me = pytom_volume.mean(m)
        var = pytom_volume.variance(m, False)

        assert epsilon >= me >= -epsilon  #mean value test
        assert 1 + epsilon >= var >= 1 - epsilon
コード例 #7
0
    def set_searchVol(self, vol1):
        """
        set search volume (vol1 internally)

        @param vol1: search volume
        @type vol1: L{pytom_volume.vol}

        """
        from pytom.basic.normalise import normaliseUnderMask, mean0std1

        if self.mask:
            (self.vol1, p) = normaliseUnderMask(vol1, self.mask)
        else:
            self.vol1 = mean0std1(vol1, True)
コード例 #8
0
ファイル: auto_focus_classify.py プロジェクト: mvanevic/PyTom
def calculate_difference_map(v1,
                             band1,
                             v2,
                             band2,
                             mask=None,
                             focus_mask=None,
                             align=True,
                             sigma=None,
                             threshold=0.4):
    """mask if for alignment, while focus_mask is for difference map.
    """
    from pytom_volume import vol, power, abs, limit, transformSpline, variance, mean, max, min
    from pytom.basic.normalise import mean0std1
    from pytom.basic.filter import lowpassFilter

    # do lowpass filtering first
    lv1 = lowpassFilter(v1, band1, band1 / 10.)[0]
    lv2 = lowpassFilter(v2, band2, band2 / 10.)[0]

    # do alignment of two volumes, if required. v1 is used as reference.
    if align:
        from sh_alignment.frm import frm_align
        band = int(band1 if band1 < band2 else band2)
        pos, angle, score = frm_align(lv2, None, lv1, None, [4, 64], band,
                                      lv1.sizeX() // 4, mask)
        shift = [
            pos[0] - v1.sizeX() // 2, pos[1] - v1.sizeY() // 2,
            pos[2] - v1.sizeZ() // 2
        ]

        # transform v2
        lvv2 = vol(lv2)
        transformSpline(lv2, lvv2, -angle[1], -angle[0], -angle[2],
                        lv2.sizeX() // 2,
                        lv2.sizeY() // 2,
                        lv2.sizeZ() // 2, -shift[0], -shift[1], -shift[2], 0,
                        0, 0)
    else:
        lvv2 = lv2

    # do normalization
    mean0std1(lv1)
    mean0std1(lvv2)

    # only consider the density beyond certain sigma
    if sigma is None or sigma == 0:
        pass
    elif sigma < 0:  # negative density counts
        assert min(lv1) < sigma
        assert min(lvv2) < sigma
        limit(lv1, 0, 0, sigma, 0, False, True)
        limit(lvv2, 0, 0, sigma, 0, False, True)
    else:  # positive density counts
        assert max(lv1) > sigma
        assert max(lvv2) > sigma
        limit(lv1, sigma, 0, 0, 0, True, False)
        limit(lvv2, sigma, 0, 0, 0, True, False)

    # if we want to focus on specific area only
    if focus_mask:
        lv1 *= focus_mask
        lvv2 *= focus_mask

    # calculate the STD map
    avg = (lv1 + lvv2) / 2
    var1 = avg - lv1
    power(var1, 2)
    var2 = avg - lvv2
    power(var2, 2)

    std_map = var1 + var2
    power(std_map, 0.5)

    # calculate the coefficient of variance map
    # std_map = std_map/abs(avg)

    if focus_mask:
        std_map *= focus_mask

    # threshold the STD map
    mv = mean(std_map)
    threshold = mv + (max(std_map) - mv) * threshold
    limit(std_map, threshold, 0, threshold, 1, True, True)

    # do a lowpass filtering
    std_map1 = lowpassFilter(std_map, v1.sizeX() // 4, v1.sizeX() / 40.)[0]

    if align:
        std_map2 = vol(std_map)
        transformSpline(std_map1, std_map2, angle[0], angle[1], angle[2],
                        v1.sizeX() // 2,
                        v1.sizeY() // 2,
                        v1.sizeZ() // 2, 0, 0, 0, shift[0], shift[1], shift[2])
    else:
        std_map2 = std_map1

    limit(std_map1, 0.5, 0, 1, 1, True, True)
    limit(std_map2, 0.5, 0, 1, 1, True, True)

    # return the respective difference maps
    return (std_map1, std_map2)
コード例 #9
0
ファイル: auto_focus_classify.py プロジェクト: mvanevic/PyTom
def paverage(particleList, norm, binning, verbose, outdir='./'):
    from pytom_volume import read, vol
    from pytom_volume import transformSpline as transform
    from pytom.basic.structures import Particle
    from pytom.basic.normalise import mean0std1
    from pytom.tools.ProgressBar import FixedProgBar
    from pytom.basic.transformations import resize

    if len(particleList) == 0:
        raise RuntimeError('The particlelist provided is empty. Aborting!')

    if verbose:
        progressBar = FixedProgBar(0, len(particleList), 'Particles averaged ')
        progressBar.update(0)
        numberAlignedParticles = 0

    result = None
    wedgeSum = None
    newParticle = None

    for particleObject in particleList:
        particle = read(particleObject.getFilename(), 0, 0, 0, 0, 0, 0, 0, 0,
                        0, 1, 1, 1)
        if binning != 1:
            particle, particlef = resize(volume=particle,
                                         factor=1. / binning,
                                         interpolation='Fourier')

        if norm:
            mean0std1(particle)

        wedgeInfo = particleObject.getWedge()
        if result is None:  # initialization
            sizeX = particle.sizeX()
            sizeY = particle.sizeY()
            sizeZ = particle.sizeZ()

            newParticle = vol(sizeX, sizeY, sizeZ)

            centerX = sizeX // 2
            centerY = sizeY // 2
            centerZ = sizeZ // 2

            result = vol(sizeX, sizeY, sizeZ)
            result.setAll(0.0)
            wedgeSum = wedgeInfo.returnWedgeVolume(sizeX, sizeY, sizeZ)
            wedgeSum.setAll(0)

        # create spectral wedge weighting
        rotation = particleObject.getRotation()
        wedge = wedgeInfo.returnWedgeVolume(sizeX, sizeY, sizeZ, False,
                                            rotation.invert())

        wedgeSum += wedge

        # shift and rotate particle
        shiftV = particleObject.getShift()
        newParticle.setAll(0)
        transform(particle, newParticle, -rotation[1], -rotation[0],
                  -rotation[2], centerX, centerY, centerZ,
                  -shiftV[0] // binning, -shiftV[1] // binning,
                  -shiftV[2] // binning, 0, 0, 0)

        result += newParticle

        if verbose:
            numberAlignedParticles = numberAlignedParticles + 1
            progressBar.update(numberAlignedParticles)

    # write to the disk

    fname_result = os.path.join(outdir, 'avg_{}.em'.format(mpi.rank))
    fname_wedge = os.path.join(outdir, 'wedge_{}.em'.format(mpi.rank))

    result.write(fname_result)
    result = Particle(fname_result)
    wedgeSum.write(fname_wedge)
    wedgeSum = Particle(fname_wedge)

    return (result, wedgeSum)
コード例 #10
0
    for r in xrange(1, radius + 1):
        corr = frm_fourier_constrained_corr(vol2sf(vfr, r,
                                                   b), vol2sf(vfi, r, b), mf,
                                            vol2sf(vgr, r, b),
                                            vol2sf(vgi, r, b), mg, True)
        res += corr * (r**2)

    return res


diff_old = []
diff_new = []
total_time1 = 0.0
total_time2 = 0.0

mean0std1(v)
for i in xrange(100):
    phi = np.random.randint(360)
    psi = np.random.randint(360)
    the = np.random.randint(180)
    rotateSpline(v, v2, phi, psi, the)

    # apply wedge
    v2 = wedge.apply(v2)

    t = timing()

    # old method
    angles.reset()
    t.start()
    tmp = vol(v)
コード例 #11
0
def average2(particleList, weighting=False, norm=False, determine_resolution=False,
             mask=None, binning=1, verbose=False):
    """
    2nd version of average function. Will not write the averages to the disk. Also support internal \
    resolution determination.
    """
    from pytom_volume import read, vol, complexDiv, complexRealMult
    from pytom_volume import transformSpline as transform
    from pytom.basic.fourier import fft, ifft, convolute
    from pytom.basic.normalise import mean0std1
    from pytom.tools.ProgressBar import FixedProgBar
    from pytom.basic.filter import lowpassFilter, rotateWeighting
    from math import exp
    
    if len(particleList) == 0:
        raise RuntimeError('The particlelist provided is empty. Aborting!')
    
    if verbose:
        progressBar = FixedProgBar(0,len(particleList),'Particles averaged ')
        progressBar.update(0)
        numberAlignedParticles = 0
    
    even = None
    odd = None
    wedgeSum_even = None
    wedgeSum_odd = None
    newParticle = None
    
    is_odd = True
    for particleObject in particleList:
        particle = read(particleObject.getFilename(), 0,0,0,0,0,0,0,0,0, binning,binning,binning)
        if norm:
            mean0std1(particle)
        wedgeInfo = particleObject.getWedge()
        
        # apply its wedge to itself
        particle = wedgeInfo.apply(particle)
        
        if odd is None: # initialization
            sizeX = particle.sizeX() 
            sizeY = particle.sizeY()
            sizeZ = particle.sizeZ()
            
            newParticle = vol(sizeX,sizeY,sizeZ)
            
            centerX = sizeX/2 
            centerY = sizeY/2 
            centerZ = sizeZ/2 
            
            odd = vol(sizeX,sizeY,sizeZ)
            odd.setAll(0.0)
            even = vol(sizeX,sizeY,sizeZ)
            even.setAll(0.0)
            
            wedgeSum_odd = wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ)
            wedgeSum_odd.setAll(0)
            wedgeSum_even = wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ)
            wedgeSum_even.setAll(0)
        

        # create spectral wedge weighting
        rotation = particleObject.getRotation()
        rotinvert =  rotation.invert()
        if analytWedge:
            # > original buggy version
            wedge = wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ,False, rotinvert)
            # < original buggy version
        else:
            # > FF: interpol bugfix
            wedge = rotateWeighting( weighting=wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ,False),
                                     z1=rotinvert[0], z2=rotinvert[1], x=rotinvert[2], mask=None,
                                     isReducedComplex=True, returnReducedComplex=True)
            # < FF
            # > TH bugfix
            #wedgeVolume = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ,
            #                                          humanUnderstandable=True, rotation=rotinvert)
            #wedge = rotate(volume=wedgeVolume, rotation=rotinvert, imethod='linear')
            # < TH
        if is_odd:
            wedgeSum_odd = wedgeSum_odd + wedge
        else:
            wedgeSum_even = wedgeSum_even + wedge
        
        # shift and rotate particle
        shiftV = particleObject.getShift()
        newParticle.setAll(0)
        transform(particle,newParticle,-rotation[1],-rotation[0],-rotation[2],
                  centerX,centerY,centerZ,-shiftV[0]/binning,
                  -shiftV[1]/binning,-shiftV[2]/binning,0,0,0)

        if is_odd:
            if weighting:
                weight = 1. - particleObject.getScore().getValue()
                #weight = weight**2
                weight = exp(-1.*weight)
                odd = odd + newParticle * weight
            else:
                odd = odd + newParticle
        else:
            if weighting:
                weight = 1. - particleObject.getScore().getValue()
                #weight = weight**2
                weight = exp(-1.*weight)
                even = even + newParticle * weight
            else:
                even = even + newParticle
        
        is_odd = not is_odd
        
        if verbose:
            numberAlignedParticles = numberAlignedParticles + 1
            progressBar.update(numberAlignedParticles)

    # determine resolution if needed
    fsc = None
    if determine_resolution:
        # apply spectral weighting to sum
        f_even = fft(even)
        w_even = complexDiv(f_even, wedgeSum_even)
        w_even = ifft(w_even)        
        w_even.shiftscale(0.0,1/float(sizeX*sizeY*sizeZ))
        
        f_odd = fft(odd)
        w_odd = complexDiv(f_odd, wedgeSum_odd)
        w_odd = ifft(w_odd)        
        w_odd.shiftscale(0.0,1/float(sizeX*sizeY*sizeZ))
        
        from pytom.basic.correlation import FSC
        fsc = FSC(w_even, w_odd, sizeX/2, mask, verbose=False)
    
    # add together
    result = even+odd
    wedgeSum = wedgeSum_even+wedgeSum_odd

    invert_WedgeSum( invol=wedgeSum, r_max=sizeX/2-2., lowlimit=.05*len(particleList), lowval=.05*len(particleList))
    #wedgeSum.write(averageName[:len(averageName)-3] + '-WedgeSumInverted.em')
    result = convolute(v=result, k=wedgeSum, kernel_in_fourier=True)
    # do a low pass filter
    #result = lowpassFilter(result, sizeX/2-2, (sizeX/2-1)/10.)[0]
    
    return (result, fsc)
コード例 #12
0
def average( particleList, averageName, showProgressBar=False, verbose=False,
        createInfoVolumes=False, weighting=False, norm=False):
    """
    average : Creates new average from a particleList
    @param particleList: The particles
    @param averageName: Filename of new average 
    @param verbose: Prints particle information. Disabled by default. 
    @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default.
    @param weighting: apply weighting to each average according to its correlation score
    @param norm: apply normalization for each particle
    @return: A new Reference object
    @rtype: L{pytom.basic.structures.Reference}
    @author: Thomas Hrabe
    @change: limit for wedgeSum set to 1% or particles to avoid division by small numbers - FF
    """
    from pytom_volume import read,vol,reducedToFull,limit, complexRealMult
    from pytom.basic.filter import lowpassFilter, rotateWeighting
    from pytom_volume import transformSpline as transform
    from pytom.basic.fourier import convolute
    from pytom.basic.structures import Reference
    from pytom.basic.normalise import mean0std1
    from pytom.tools.ProgressBar import FixedProgBar
    from math import exp
    import os

    if len(particleList) == 0:
        raise RuntimeError('The particle list is empty. Aborting!')
    
    if showProgressBar:
        progressBar = FixedProgBar(0,len(particleList),'Particles averaged ')
        progressBar.update(0)
        numberAlignedParticles = 0
    
    result = []
    wedgeSum = []
    
    newParticle = None
    # pre-check that scores != 0
    if weighting:
        wsum = 0.
        for particleObject in particleList:
            wsum += particleObject.getScore().getValue()
        if wsum < 0.00001:
            weighting = False
            print("Warning: all scores have been zero - weighting not applied")

    
    for particleObject in particleList:
        
        if verbose:
            print(particleObject)

    
        if not os.path.exists(particleObject.getFilename()): continue
        particle = read(particleObject.getFilename())
        if norm: # normalize the particle
            mean0std1(particle) # happen inplace
        
        wedgeInfo = particleObject.getWedge()
        # apply its wedge to itself
        particle = wedgeInfo.apply(particle)
        
        if result == []:
            sizeX = particle.sizeX() 
            sizeY = particle.sizeY()
            sizeZ = particle.sizeZ()
            
            newParticle = vol(sizeX,sizeY,sizeZ)
            
            centerX = sizeX/2 
            centerY = sizeY/2 
            centerZ = sizeZ/2 
            
            result = vol(sizeX,sizeY,sizeZ)
            result.setAll(0.0)
            if analytWedge:
                wedgeSum = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ)
            else:
                # > FF bugfix
                wedgeSum = wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ)
                # < FF
                # > TH bugfix
                #wedgeSum = vol(sizeX,sizeY,sizeZ)
                # < TH
                #wedgeSum.setAll(0)
            assert wedgeSum.sizeX() == sizeX and wedgeSum.sizeY() == sizeY and wedgeSum.sizeZ() == sizeZ/2+1, \
                    "wedge initialization result in wrong dims :("
            wedgeSum.setAll(0)

        ### create spectral wedge weighting
        rotation = particleObject.getRotation()
        rotinvert = rotation.invert()
        if analytWedge:
            # > analytical buggy version
            wedge = wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ,False, rotinvert)
        else:
            # > FF: interpol bugfix
            wedge = rotateWeighting( weighting=wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ,False),
                                     z1=rotinvert[0], z2=rotinvert[1], x=rotinvert[2], mask=None,
                                     isReducedComplex=True, returnReducedComplex=True)
            # < FF
            # > TH bugfix
            #wedgeVolume = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ,
            #                                    humanUnderstandable=True, rotation=rotinvert)
            #wedge = rotate(volume=wedgeVolume, rotation=rotinvert, imethod='linear')
            # < TH

        ### shift and rotate particle
        shiftV = particleObject.getShift()
        newParticle.setAll(0)
            
        transform(particle,newParticle,-rotation[1],-rotation[0],-rotation[2],
                  centerX,centerY,centerZ,-shiftV[0],-shiftV[1],-shiftV[2],0,0,0)
        
        if weighting:
            weight = 1.-particleObject.getScore().getValue()
            #weight = weight**2
            weight = exp(-1.*weight)
            result = result + newParticle * weight
            wedgeSum = wedgeSum + wedge * weight
        else:
            result = result + newParticle
            wedgeSum = wedgeSum + wedge
        
        if showProgressBar:
            numberAlignedParticles = numberAlignedParticles + 1
            progressBar.update(numberAlignedParticles)

    ###apply spectral weighting to sum
    result = lowpassFilter(result, sizeX/2-1, 0.)[0]
    #if createInfoVolumes:
    result.write(averageName[:len(averageName)-3]+'-PreWedge.em')
    wedgeSum.write(averageName[:len(averageName)-3] + '-WedgeSumUnscaled.em')
        
    invert_WedgeSum( invol=wedgeSum, r_max=sizeX/2-2., lowlimit=.05*len(particleList), lowval=.05*len(particleList))
    
    if createInfoVolumes:
        wedgeSum.write(averageName[:len(averageName)-3] + '-WedgeSumInverted.em')
        
    result = convolute(v=result, k=wedgeSum, kernel_in_fourier=True)

    # do a low pass filter
    #result = lowpassFilter(result, sizeX/2-2, (sizeX/2-1)/10.)[0]
    result.write(averageName)
    
    if createInfoVolumes:
        resultINV = result * -1
        #write sign inverted result to disk (good for chimera viewing ... )
        resultINV.write(averageName[:len(averageName)-3]+'-INV.em')
    newReference = Reference(averageName,particleList)
    
    return newReference
コード例 #13
0
ファイル: average.py プロジェクト: xmzzaa/PyTom
def averageGPU(particleList,
               averageName,
               showProgressBar=False,
               verbose=False,
               createInfoVolumes=False,
               weighting=False,
               norm=False,
               gpuId=None,
               profile=True):
    """
    average : Creates new average from a particleList
    @param particleList: The particles
    @param averageName: Filename of new average
    @param verbose: Prints particle information. Disabled by default.
    @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default.
    @param weighting: apply weighting to each average according to its correlation score
    @param norm: apply normalization for each particle
    @return: A new Reference object
    @rtype: L{pytom.basic.structures.Reference}
    @author: Thomas Hrabe
    @change: limit for wedgeSum set to 1% or particles to avoid division by small numbers - FF
    """
    import time
    from pytom.tompy.io import read, write, read_size
    from pytom.tompy.filter import bandpass as lowpassFilter, rotateWeighting, applyFourierFilter, applyFourierFilterFull, create_wedge
    from pytom.voltools import transform, StaticVolume
    from pytom.basic.structures import Reference
    from pytom.tompy.normalise import mean0std1
    from pytom.tompy.tools import volumesSameSize, invert_WedgeSum, create_sphere
    from pytom.tompy.transform import fourier_full2reduced, fourier_reduced2full
    from cupyx.scipy.fftpack.fft import fftn as fftnP
    from cupyx.scipy.fftpack.fft import ifftn as ifftnP
    from cupyx.scipy.fftpack.fft import get_fft_plan
    from pytom.tools.ProgressBar import FixedProgBar
    from multiprocessing import RawArray
    import numpy as np
    import cupy as xp

    if not gpuId is None:
        device = f'gpu:{gpuId}'
        xp.cuda.Device(gpuId).use()
    else:
        print(gpuId)
        raise Exception('Running gpu code on non-gpu device')
    print(device)
    cstream = xp.cuda.Stream()
    if profile:
        stream = xp.cuda.Stream.null
        t_start = stream.record()

    # from pytom.tools.ProgressBar import FixedProgBar
    from math import exp
    import os

    if len(particleList) == 0:
        raise RuntimeError('The particle list is empty. Aborting!')

    if showProgressBar:
        progressBar = FixedProgBar(0, len(particleList), 'Particles averaged ')
        progressBar.update(0)
        numberAlignedParticles = 0

    # pre-check that scores != 0
    if weighting:
        wsum = 0.
        for particleObject in particleList:
            wsum += particleObject.getScore().getValue()
        if wsum < 0.00001:
            weighting = False
            print("Warning: all scores have been zero - weighting not applied")
    import time
    sx, sy, sz = read_size(particleList[0].getFilename())
    wedgeInfo = particleList[0].getWedge().convert2numpy()
    print('angle: ', wedgeInfo.getWedgeAngle())
    wedgeZero = xp.fft.fftshift(
        xp.array(wedgeInfo.returnWedgeVolume(sx, sy, sz, True).get(),
                 dtype=xp.float32))
    # wedgeZeroReduced = fourier_full2reduced(wedgeZero)
    wedge = xp.zeros_like(wedgeZero, dtype=xp.float32)
    wedgeSum = xp.zeros_like(wedge, dtype=xp.float32)
    print('init texture')
    wedgeText = StaticVolume(xp.fft.fftshift(wedgeZero),
                             device=device,
                             interpolation='filt_bspline')

    newParticle = xp.zeros((sx, sy, sz), dtype=xp.float32)

    centerX = sx // 2
    centerY = sy // 2
    centerZ = sz // 2

    result = xp.zeros((sx, sy, sz), dtype=xp.float32)

    fftplan = get_fft_plan(wedge.astype(xp.complex64))

    n = 0

    total = len(particleList)
    # total = int(np.floor((11*1024**3 - mempool.total_bytes())/(sx*sy*sz*4)))
    # total = 128
    #
    #
    # particlesNP = np.zeros((total, sx, sy, sz),dtype=np.float32)
    # particles = []
    # mask = create_sphere([sx,sy,sz], sx//2-6, 2)
    # raw = RawArray('f', int(particlesNP.size))
    # shared_array = np.ctypeslib.as_array(raw)
    # shared_array[:] = particlesNP.flatten()
    # procs = allocateProcess(particleList, shared_array, n, total, wedgeZero.size)
    # del particlesNP

    if profile:
        t_end = stream.record()
        t_end.synchronize()

        time_took = xp.cuda.get_elapsed_time(t_start, t_end)
        print(f'startup time {n:5d}: \t{time_took:.3f}ms')
        t_start = stream.record()

    for particleObject in particleList:

        rotation = particleObject.getRotation()
        rotinvert = rotation.invert()
        shiftV = particleObject.getShift()

        # if n % total == 0:
        #     while len(procs):
        #         procs =[proc for proc in procs if proc.is_alive()]
        #         time.sleep(0.1)
        #         print(0.1)
        #     # del particles
        #     # xp._default_memory_pool.free_all_blocks()
        #     # pinned_mempool.free_all_blocks()
        #     particles = xp.array(shared_array.reshape(total, sx, sy, sz), dtype=xp.float32)
        #     procs = allocateProcess(particleList, shared_array, n, total, size=wedgeZero.size)
        #     #pinned_mempool.free_all_blocks()
        #     #print(mempool.total_bytes()/1024**3)

        particle = read(particleObject.getFilename(), deviceID=device)

        #particle = particles[n%total]

        if norm:  # normalize the particle
            mean0std1(particle)  # happen inplace

        # apply its wedge to
        #particle = applyFourierFilter(particle, wedgeZeroReduced)
        #particle = (xp.fft.ifftn( xp.fft.fftn(particle) * wedgeZero)).real
        particle = (ifftnP(fftnP(particle, plan=fftplan) * wedgeZero,
                           plan=fftplan)).real

        ### create spectral wedge weighting

        wedge *= 0

        wedgeText.transform(
            rotation=[rotinvert[0], rotinvert[2], rotinvert[1]],
            rotation_order='rzxz',
            output=wedge)
        #wedge = xp.fft.fftshift(fourier_reduced2full(create_wedge(30, 30, 21, 42, 42, 42, rotation=[rotinvert[0],rotinvert[2], rotinvert[1]])))
        # if analytWedge:
        #     # > analytical buggy version
        # wedge = wedgeInfo.returnWedgeVolume(sx, sy, sz, True, rotinvert)
        # else:
        #     # > FF: interpol bugfix

        # wedge = rotateWeighting(weighting=wedgeInfo.returnWedgeVolume(sx, sy, sz, True), rotation=[rotinvert[0], rotinvert[2], rotinvert[1]])
        #     # < FF
        #     # > TH bugfix
        #     # wedgeVolume = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ,
        #     #                                    humanUnderstandable=True, rotation=rotinvert)
        #     # wedge = rotate(volume=wedgeVolume, rotation=rotinvert, imethod='linear')
        #     # < TH

        ### shift and rotate particle

        newParticle *= 0
        transform(particle,
                  output=newParticle,
                  rotation=[-rotation[1], -rotation[2], -rotation[0]],
                  center=[centerX, centerY, centerZ],
                  translation=[-shiftV[0], -shiftV[1], -shiftV[2]],
                  device=device,
                  interpolation='filt_bspline',
                  rotation_order='rzxz')

        #write(f'trash/GPU_{n}.em', newParticle)
        # print(rotation.toVector())
        # break
        result += newParticle
        wedgeSum += xp.fft.fftshift(wedge)
        # if showProgressBar:
        #     numberAlignedParticles = numberAlignedParticles + 1
        #     progressBar.update(numberAlignedParticles)

        if n % total == 0:
            if profile:
                t_end = stream.record()
                t_end.synchronize()

                time_took = xp.cuda.get_elapsed_time(t_start, t_end)
                print(f'total time {n:5d}: \t{time_took:.3f}ms')
                t_start = stream.record()
        cstream.synchronize()
        n += 1

    print('averaged particles')
    ###apply spectral weighting to sum

    result = lowpassFilter(result, high=sx / 2 - 1, sigma=0)
    # if createInfoVolumes:
    write(averageName[:len(averageName) - 3] + '-PreWedge.em', result)
    write(averageName[:len(averageName) - 3] + '-WedgeSumUnscaled.em',
          fourier_full2reduced(wedgeSum))

    wedgeSumINV = invert_WedgeSum(wedgeSum,
                                  r_max=sx // 2 - 2.,
                                  lowlimit=.05 * len(particleList),
                                  lowval=.05 * len(particleList))
    wedgeSumINV = wedgeSumINV

    #print(wedgeSum.mean(), wedgeSum.std())
    if createInfoVolumes:
        write(averageName[:len(averageName) - 3] + '-WedgeSumInverted.em',
              xp.fft.fftshift(wedgeSumINV))

    result = applyFourierFilterFull(result, xp.fft.fftshift(wedgeSumINV))

    # do a low pass filter
    result = lowpassFilter(result, sx / 2 - 2, (sx / 2 - 1) / 10.)[0]
    write(averageName, result)

    if createInfoVolumes:
        resultINV = result * -1
        # write sign inverted result to disk (good for chimera viewing ... )
        write(averageName[:len(averageName) - 3] + '-INV.em', resultINV)

    newReference = Reference(averageName, particleList)

    return newReference
コード例 #14
0
    def __init__(self,
                 vol1,
                 vol2,
                 score,
                 mask=None,
                 iniRot=None,
                 iniTrans=None,
                 opti='fmin_powell',
                 interpolation='linear',
                 verbose=False):
        """
        alignment of a particle against a reference

        @param vol1: (constant) volume
        @type vol1: L{pytom_volume.vol}
        @param vol2: volume that is matched to reference
        @type vol2: L{pytom_volume.vol}
        @param score: score for alignment - e.g., pytom.basic.correlation.nxcc
        @type score: L{pytom.basic.correlation}
        @param mask: mask correlation is constrained on
        @type mask: L{pytom_volume.vol}
        @param iniRot: initial rotation of vol2
        @type iniRot: L{pytom.basic.Rotation}
        @param iniTrans: initial translation of vol2
        @type iniTrans: L{pytom.basic.Shift}
        @param opti: optimizer ('fmin_powell', 'fmin', 'fmin_cg', 'fmin_slsqp', 'fmin_bfgs')
        @param interpolation: interpolation type - 'linear' (default) or 'spline'
        @type interpolation: str
        @type opti: L{str}

        @author: FF
        """
        from pytom.basic.normalise import normaliseUnderMask, mean0std1
        from pytom.tools.macros import volumesSameSize
        from pytom_volume import vol
        from pytom.basic.structures import Rotation, Shift
        assert isinstance(interpolation,
                          str), "interpolation must be of type str"

        self.verbose = verbose
        if not volumesSameSize(vol1, vol2):
            raise RuntimeError('Vol1 and vol2 must have same size!')

        # normalize constant volume
        if mask:
            (v, p) = normaliseUnderMask(vol1, mask)
        else:
            v = mean0std1(vol1, True)

        self.vol1 = v
        self.vol2 = vol2
        self.rotvol2 = vol(self.vol1.sizeX(), self.vol2.sizeY(),
                           self.vol2.sizeZ())
        self.mask = mask

        if not iniRot:
            iniRot = Rotation()
        if not iniTrans:
            iniTrans = Shift()
        self.rot_trans = self.transRot2vector(rot=iniRot, trans=iniTrans)

        self.score = score
        self.val = -100000.
        self.centX = int(self.vol1.sizeX() // 2)
        self.centY = int(self.vol1.sizeY() // 2)
        self.centZ = int(self.vol1.sizeZ() // 2)
        self.binning = 1
        self.interpolation = interpolation

        # set optimizer
        self.opti = opti
        if opti == 'fmin':
            self.optimizer = scipy.optimize.fmin
        elif opti == 'fmin_slsqp':
            self.optimizer = scipy.optimize.fmin_slsqp
        elif opti == 'fmin_cg':
            self.optimizer = scipy.optimize.fmin_cg
        elif opti == 'fmin_bfgs':
            self.optimizer = scipy.optimize.fmin_bfgs
        elif opti == 'fmin_powell':
            self.optimizer = scipy.optimize.fmin_powell
        else:
            raise TypeError('opti must be of type str')