Ejemplo n.º 1
0
def distance(p, ref, freq, mask, binning):
    from pytom.basic.correlation import nxcc
    from pytom_volume import vol, initSphere, read, pasteCenter
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.transformations import resize
    v = p.getTransformedVolume(binning)
    w = p.getWedge()
    r = ref.getVolume()
    a = lowpassFilter(w.apply(v, p.getRotation().invert()), freq)[0]
    b = lowpassFilter(w.apply(r, p.getRotation().invert()), freq)[0]

    if not mask:
        mask = vol(r)
        initSphere(mask,
                   r.sizeX() // 2 - 3, 3, 0,
                   r.sizeX() // 2,
                   r.sizeY() // 2,
                   r.sizeZ() // 2)
    else:
        #THE MASK is binning (sampled every n-points). This does lead to a reduction of the smoothing of the edges.
        maskBin = read(mask, 0, 0, 0, 0, 0, 0, 0, 0, 0, binning, binning,
                       binning)
        if a.sizeX() != maskBin.sizeX() or a.sizeY() != maskBin.sizeY(
        ) or a.sizeZ() != maskBin.sizeZ():
            mask = vol(a.sizeX(), a.sizeY(), a.sizeZ())
            mask.setAll(0)
            pasteCenter(maskBin, mask)
        else:
            mask = maskBin

    s = nxcc(a, b, mask)

    d2 = 2 * (1 - s)

    return d2
Ejemplo n.º 2
0
def initialize(pl, settings):
    from pytom.basic.structures import Particle
    # from pytom.alignment.alignmentFunctions import average2
    from pytom.basic.filter import lowpassFilter

    print("Initializing the class centroids ...")
    pl = pl.copy()
    pl.sortByScore()
    if settings["noise"]:
        pl = pl[:int((1-settings["noise"])*len(pl))]

    K = settings["ncluster"]
    freq = settings["frequency"]
    kn = len(pl)//K 
    references = {}
    frequencies = {}
    # get the first class centroid
    pp = pl[:kn]
    # avg, fsc = average2(pp, norm=True, verbose=False)
    pp.setClassAllParticles('0')
    res, tmp, tmp2 = calculate_averages(pp, settings["binning"], None, outdir=settings["output_directory"])
    avg = res['0']
    avg = lowpassFilter(avg, freq, freq/10.)[0]
    avg.write(os.path.join(settings['output_directory'], 'initial_0.em') )
    p = Particle(os.path.join(settings['output_directory'], 'initial_0.em'))
    p.setClass('0')
    references['0'] = p
    frequencies['0'] = freq

    for k in range(1, K):
        distances = [4]*len(pl)
        for c, ref in references.items():
            args = list(zip(pl, [ref]*len(pl), [freq]*len(pl), [settings["fmask"]]*len(pl), [settings["binning"]]*len(pl)))
            dist = mpi.parfor(distance, args)
            for i in range(len(pl)):
                if distances[i] > dist[i]:
                    distances[i] = dist[i]
        
        distances = np.asarray(distances)
        print('sum distances: ', distances.sum())
        distances = distances/np.sum(distances)
        idx = np.random.choice(len(pl), kn, replace=False, p=distances)
        pp = ParticleList()
        for i in idx:
            pp.append(pl[int(i)])
        # avg, fsc = average2(pp, norm=True, verbose=False)
        pp.setClassAllParticles('0')
        res, tmp, tmp2 = calculate_averages(pp, settings["binning"], None, outdir=settings["output_directory"])
        avg = res['0']
        avg = lowpassFilter(avg, freq, freq/10.)[0]
        kname = os.path.join(settings['output_directory'], 'initial_{}.em'.format(k))
        avg.write(kname)
        p = Particle(kname)
        p.setClass(str(k))
        references[str(k)] = p
        frequencies[str(k)] = freq
    
    return references, frequencies
Ejemplo n.º 3
0
def focus_score(p, ref, freq, diff_mask, binning):
    from pytom.basic.correlation import nxcc
    from pytom.basic.filter import lowpassFilter
    v = p.getTransformedVolume(binning)
    w = p.getWedge()
    r = ref.getVolume()
    a = lowpassFilter(w.apply(v, p.getRotation().invert()), freq)[0]
    b = lowpassFilter(w.apply(r, p.getRotation().invert()), freq)[0]
    s = nxcc(a, b, diff_mask.getVolume())

    return s
Ejemplo n.º 4
0
def score_noalign_proxy(p, ref, freq, offset, binning, mask):
    from pytom.basic.structures import Shift, Rotation
    from pytom.basic.correlation import nxcc
    from pytom.basic.filter import lowpassFilter
    v = p.getTransformedVolume(binning)
    w = p.getWedge()
    r = ref.getVolume()
    a = lowpassFilter(w.apply(v, p.getRotation().invert()), freq)[0]
    b = lowpassFilter(w.apply(r, p.getRotation().invert()), freq)[0]
    score = nxcc(a, b)

    return (p.getShift(), p.getRotation(), score, p.getFilename())
Ejemplo n.º 5
0
def calculate_difference_map(v1,
                             band1,
                             v2,
                             band2,
                             mask=None,
                             focus_mask=None,
                             align=True,
                             sigma=None,
                             threshold=0.4):
    """mask if for alignment, while focus_mask is for difference map.
    """
    from pytom_volume import vol, power, abs, limit, transformSpline, variance, mean, max, min
    from pytom.basic.normalise import mean0std1
    from pytom.basic.filter import lowpassFilter

    # do lowpass filtering first
    lv1 = lowpassFilter(v1, band1, band1 / 10.)[0]
    lv2 = lowpassFilter(v2, band2, band2 / 10.)[0]

    # do alignment of two volumes, if required. v1 is used as reference.
    if align:
        from sh_alignment.frm import frm_align
        band = int(band1 if band1 < band2 else band2)
        pos, angle, score = frm_align(lv2, None, lv1, None, [4, 64], band,
                                      lv1.sizeX() // 4, mask)
        shift = [
            pos[0] - v1.sizeX() // 2, pos[1] - v1.sizeY() // 2,
            pos[2] - v1.sizeZ() // 2
        ]

        # transform v2
        lvv2 = vol(lv2)
        transformSpline(lv2, lvv2, -angle[1], -angle[0], -angle[2],
                        lv2.sizeX() // 2,
                        lv2.sizeY() // 2,
                        lv2.sizeZ() // 2, -shift[0], -shift[1], -shift[2], 0,
                        0, 0)
    else:
        lvv2 = lv2

    # do normalization
    mean0std1(lv1)
    mean0std1(lvv2)

    # only consider the density beyond certain sigma
    if sigma is None or sigma == 0:
        pass
    elif sigma < 0:  # negative density counts
        assert min(lv1) < sigma
        assert min(lvv2) < sigma
        limit(lv1, 0, 0, sigma, 0, False, True)
        limit(lvv2, 0, 0, sigma, 0, False, True)
    else:  # positive density counts
        assert max(lv1) > sigma
        assert max(lvv2) > sigma
        limit(lv1, sigma, 0, 0, 0, True, False)
        limit(lvv2, sigma, 0, 0, 0, True, False)

    # if we want to focus on specific area only
    if focus_mask:
        lv1 *= focus_mask
        lvv2 *= focus_mask

    # calculate the STD map
    avg = (lv1 + lvv2) / 2
    var1 = avg - lv1
    power(var1, 2)
    var2 = avg - lvv2
    power(var2, 2)

    std_map = var1 + var2
    power(std_map, 0.5)

    # calculate the coefficient of variance map
    # std_map = std_map/abs(avg)

    if focus_mask:
        std_map *= focus_mask

    # threshold the STD map
    mv = mean(std_map)
    threshold = mv + (max(std_map) - mv) * threshold
    limit(std_map, threshold, 0, threshold, 1, True, True)

    # do a lowpass filtering
    std_map1 = lowpassFilter(std_map, v1.sizeX() // 4, v1.sizeX() / 40.)[0]

    if align:
        std_map2 = vol(std_map)
        transformSpline(std_map1, std_map2, angle[0], angle[1], angle[2],
                        v1.sizeX() // 2,
                        v1.sizeY() // 2,
                        v1.sizeZ() // 2, 0, 0, 0, shift[0], shift[1], shift[2])
    else:
        std_map2 = std_map1

    limit(std_map1, 0.5, 0, 1, 1, True, True)
    limit(std_map2, 0.5, 0, 1, 1, True, True)

    # return the respective difference maps
    return (std_map1, std_map2)
Ejemplo n.º 6
0
def average( particleList, averageName, showProgressBar=False, verbose=False,
        createInfoVolumes=False, weighting=False, norm=False):
    """
    average : Creates new average from a particleList
    @param particleList: The particles
    @param averageName: Filename of new average 
    @param verbose: Prints particle information. Disabled by default. 
    @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default.
    @param weighting: apply weighting to each average according to its correlation score
    @param norm: apply normalization for each particle
    @return: A new Reference object
    @rtype: L{pytom.basic.structures.Reference}
    @author: Thomas Hrabe
    @change: limit for wedgeSum set to 1% or particles to avoid division by small numbers - FF
    """
    from pytom_volume import read,vol,reducedToFull,limit, complexRealMult
    from pytom.basic.filter import lowpassFilter, rotateWeighting
    from pytom_volume import transformSpline as transform
    from pytom.basic.fourier import convolute
    from pytom.basic.structures import Reference
    from pytom.basic.normalise import mean0std1
    from pytom.tools.ProgressBar import FixedProgBar
    from math import exp
    import os

    if len(particleList) == 0:
        raise RuntimeError('The particle list is empty. Aborting!')
    
    if showProgressBar:
        progressBar = FixedProgBar(0,len(particleList),'Particles averaged ')
        progressBar.update(0)
        numberAlignedParticles = 0
    
    result = []
    wedgeSum = []
    
    newParticle = None
    # pre-check that scores != 0
    if weighting:
        wsum = 0.
        for particleObject in particleList:
            wsum += particleObject.getScore().getValue()
        if wsum < 0.00001:
            weighting = False
            print("Warning: all scores have been zero - weighting not applied")

    
    for particleObject in particleList:
        
        if verbose:
            print(particleObject)

    
        if not os.path.exists(particleObject.getFilename()): continue
        particle = read(particleObject.getFilename())
        if norm: # normalize the particle
            mean0std1(particle) # happen inplace
        
        wedgeInfo = particleObject.getWedge()
        # apply its wedge to itself
        particle = wedgeInfo.apply(particle)
        
        if result == []:
            sizeX = particle.sizeX() 
            sizeY = particle.sizeY()
            sizeZ = particle.sizeZ()
            
            newParticle = vol(sizeX,sizeY,sizeZ)
            
            centerX = sizeX/2 
            centerY = sizeY/2 
            centerZ = sizeZ/2 
            
            result = vol(sizeX,sizeY,sizeZ)
            result.setAll(0.0)
            if analytWedge:
                wedgeSum = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ)
            else:
                # > FF bugfix
                wedgeSum = wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ)
                # < FF
                # > TH bugfix
                #wedgeSum = vol(sizeX,sizeY,sizeZ)
                # < TH
                #wedgeSum.setAll(0)
            assert wedgeSum.sizeX() == sizeX and wedgeSum.sizeY() == sizeY and wedgeSum.sizeZ() == sizeZ/2+1, \
                    "wedge initialization result in wrong dims :("
            wedgeSum.setAll(0)

        ### create spectral wedge weighting
        rotation = particleObject.getRotation()
        rotinvert = rotation.invert()
        if analytWedge:
            # > analytical buggy version
            wedge = wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ,False, rotinvert)
        else:
            # > FF: interpol bugfix
            wedge = rotateWeighting( weighting=wedgeInfo.returnWedgeVolume(sizeX,sizeY,sizeZ,False),
                                     z1=rotinvert[0], z2=rotinvert[1], x=rotinvert[2], mask=None,
                                     isReducedComplex=True, returnReducedComplex=True)
            # < FF
            # > TH bugfix
            #wedgeVolume = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ,
            #                                    humanUnderstandable=True, rotation=rotinvert)
            #wedge = rotate(volume=wedgeVolume, rotation=rotinvert, imethod='linear')
            # < TH

        ### shift and rotate particle
        shiftV = particleObject.getShift()
        newParticle.setAll(0)
            
        transform(particle,newParticle,-rotation[1],-rotation[0],-rotation[2],
                  centerX,centerY,centerZ,-shiftV[0],-shiftV[1],-shiftV[2],0,0,0)
        
        if weighting:
            weight = 1.-particleObject.getScore().getValue()
            #weight = weight**2
            weight = exp(-1.*weight)
            result = result + newParticle * weight
            wedgeSum = wedgeSum + wedge * weight
        else:
            result = result + newParticle
            wedgeSum = wedgeSum + wedge
        
        if showProgressBar:
            numberAlignedParticles = numberAlignedParticles + 1
            progressBar.update(numberAlignedParticles)

    ###apply spectral weighting to sum
    result = lowpassFilter(result, sizeX/2-1, 0.)[0]
    #if createInfoVolumes:
    result.write(averageName[:len(averageName)-3]+'-PreWedge.em')
    wedgeSum.write(averageName[:len(averageName)-3] + '-WedgeSumUnscaled.em')
        
    invert_WedgeSum( invol=wedgeSum, r_max=sizeX/2-2., lowlimit=.05*len(particleList), lowval=.05*len(particleList))
    
    if createInfoVolumes:
        wedgeSum.write(averageName[:len(averageName)-3] + '-WedgeSumInverted.em')
        
    result = convolute(v=result, k=wedgeSum, kernel_in_fourier=True)

    # do a low pass filter
    #result = lowpassFilter(result, sizeX/2-2, (sizeX/2-1)/10.)[0]
    result.write(averageName)
    
    if createInfoVolumes:
        resultINV = result * -1
        #write sign inverted result to disk (good for chimera viewing ... )
        resultINV.write(averageName[:len(averageName)-3]+'-INV.em')
    newReference = Reference(averageName,particleList)
    
    return newReference
Ejemplo n.º 7
0
def _disrtibuteAverageMPI(particleList,averageName,showProgressBar = False,verbose=False,
                          createInfoVolumes = False,setParticleNodesRatio = 3,sendEndMessage = False):
    """
    _distributeAverageMPI : Distributes averaging to multiple MPI nodes.
    @param particleList: The particles
    @param averageName: Filename of new average 
    @param verbose: Prints particle information. Disabled by default. 
    @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default. 
    @return: A new Reference object
    @rtype: L{pytom.basic.structures.Reference}
    @author: Thomas Hrabe
    """
    
    import pytom_mpi
    from pytom.alignment.structures import ExpectationJob
    from pytom.parallel.parallelWorker import ParallelWorker
    from pytom.parallel.alignmentMessages import ExpectationJobMsg
    from pytom_volume import read,complexDiv,complexRealMult
    from pytom.basic.fourier import fft,ifft
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.structures import Reference
    
    import os 
    import sys
    numberOfNodes = pytom_mpi.size()
    
    particleNodesRatio = float(len(particleList)) / float(numberOfNodes)

    splitFactor = numberOfNodes
    
    if particleNodesRatio < setParticleNodesRatio:
        #make sure each node gets at least 20 particles. 
        splitFactor = len(particleList) / setParticleNodesRatio
    
    splitLists = particleList.splitNSublists(splitFactor)

    msgList = []
    avgNameList = []
    preList = []
    wedgeList = []
    
    for i in range(len(splitLists)):
        plist = splitLists[i]
        avgName = averageName + '_dist' +str(i) + '.em'
        avgNameList.append(avgName)
        
        preList.append(averageName + '_dist' +str(i) + '-PreWedge.em')
        wedgeList.append(averageName + '_dist' +str(i) + '-WedgeSumUnscaled.em')
        
        job = ExpectationJob(plist,avgName)
        message = ExpectationJobMsg(0,i)
        message.setJob(job)
        msgList.append(message)
        
    #distribute averaging
    worker = ParallelWorker()
    worker.fillJobList(msgList)
    
    worker.parallelWork(True,sendEndMessage)
    
    #collect results
    result = read(preList[0])
    wedgeSum = read(wedgeList[0])
    
    for i in range(1,len(preList)):
        r = read(preList[i])
        result += r
    
        w = read(wedgeList[i])
        wedgeSum += w 
        
    result.write(averageName[:len(averageName)-3]+'-PreWedge.em')
    wedgeSum.write(averageName[:len(averageName)-3] + '-WedgeSumUnscaled.em')

    invert_WedgeSum( invol=wedgeSum, r_max=result.sizeX()/2-2., lowlimit=.05*len(particleList),
                     lowval=.05*len(particleList))
    fResult = fft(result)
    r = complexRealMult(fResult,wedgeSum)

    result = ifft(r)
    result.shiftscale(0.0,1/float(result.sizeX()*result.sizeY()*result.sizeZ()))

    # do a low pass filter
    result = lowpassFilter(result, result.sizeX()/2-2, (result.sizeX()/2-1)/10.)[0]

    result.write(averageName)
    
    # clean results
    for i in range(0,len(preList)):
        os.system('rm ' + avgNameList[i])
        os.system('rm ' + preList[i])
        os.system('rm ' + wedgeList[i])
        
    return Reference(averageName,particleList)    
Ejemplo n.º 8
0
    def run(self, verbose=False):
        from pytom_volume import read, sum
        from pytom.basic.filter import lowpassFilter
        from pytom.basic.correlation import nxcc
        from pytom.basic.structures import Rotation
        from pytom.tools.ProgressBar import FixedProgBar
        
        while True:
            # get the job
            job = self.get_job()
            
            try:
                pairs = job["Pairs"]
                pl_filename = job["ParticleList"]
            except:
                if verbose:
                    print(self.node_name + ': end')
                break # get some non-job message, break it

            from pytom.basic.structures import ParticleList
            pl = ParticleList('.')
            pl.fromXMLFile(pl_filename)

            if verbose:
                prog = FixedProgBar(0, len(pairs)-1, self.node_name+':')
                i = 0

            # run the job
            result = {}
            last_filename = None
            binning = int(job["Binning"])

            mask = read(job["Mask"], 0, 0, 0, 0, 0, 0, 0, 0, 0, binning, binning, binning)
            
            
            for pair in pairs:
                if verbose:
                    prog.update(i)
                    i += 1
                g = pl[pair[0]]
                f = pl[pair[1]]
                vf = f.getTransformedVolume(binning)
                wf = f.getWedge().getWedgeObject()
                wf_rotation = f.getRotation().invert()
                # wf.setRotation(Rotation(-rotation[1],-rotation[0],-rotation[2]))
                # wf_vol = wf.returnWedgeVolume(vf.sizeX(), vf.sizeY(), vf.sizeZ(), True, -rotation[1],-rotation[0],-rotation[2])
                vf = lowpassFilter(vf, job["Frequency"], 0)[0]

                if g.getFilename() != last_filename:
                    vg = g.getTransformedVolume(binning)
                    wg = g.getWedge().getWedgeObject()
                    wg_rotation = g.getRotation().invert()
                    # wg.setRotation(Rotation(-rotation[1],-rotation[0],-rotation[2]))
                    # wg_vol = wg.returnWedgeVolume(vg.sizeX(), vg.sizeY(), vg.sizeZ(), True, -rotation[1],-rotation[0],-rotation[2])
                    vg = lowpassFilter(vg, job["Frequency"], 0)[0]

                    last_filename = g.getFilename()

                score = nxcc( wg.apply(vf, wg_rotation), wf.apply(vg, wf_rotation), mask)
                # overlapped_wedge_vol = wf_vol * wg_vol
                # scaling = float(overlapped_wedge_vol.numelem())/sum(overlapped_wedge_vol)
                # score *= scaling

                result[pair] = score
            
            # send back the result
            self.send_result(result)
        
        pytom_mpi.finalise()
Ejemplo n.º 9
0
Archivo: misc.py Proyecto: xmzzaa/PyTom
def frm_align_vol_rscore(vf, wf, vg, wg, b, radius=None, mask=None, peak_offset=None, weights=None, position=None):
    """Obsolete.
    """
    from pytom_volume import vol, rotateSpline, peak
    from pytom.basic.transformations import shift
    from pytom.basic.correlation import xcf
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.structures import Mask
    from pytom_volume import initSphere
    from pytom_numpy import vol2npy

    if vf.sizeX()!=vg.sizeX() or vf.sizeY()!=vg.sizeY() or vf.sizeZ()!=vg.sizeZ():
        raise RuntimeError('Two volumes must have the same size!')

    if mask is None:
        mask = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(mask, vf.sizeX()/2, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif mask.__class__ == vol:
        pass
    elif mask.__class__ == Mask:
        mask = mask.getVolume()
    elif isinstance(mask, int):
        mask_radius = mask
        mask = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(mask, mask_radius, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    else:
        raise RuntimeError('Given mask has wrong type!')

    if peak_offset is None:
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset, vf.sizeX()/2, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif isinstance(peak_offset, int):
        peak_radius = peak_offset
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset, peak_radius, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif peak_offset.__class__ == vol:
        pass
    else:
        raise RuntimeError('Peak offset is given wrong!')

    # cut out the outer part which normally contains nonsense
    vf = vf*mask

    # # normalize them first
    # from pytom.basic.normalise import mean0std1
    # mean0std1(vf)
    # mean0std1(vg)

    if position is None: # if position is not given, we have to find it ourself
        # first roughtly determine the orientation (only according to the energy info)
        # get multiple candidate orientations
        orientations = frm_determine_orientation_rscore(vf, wf, vg, wg, b, radius, weights)
    else:
        # the position is given by the user
        vf2 = shift(vf, -position[0]+vf.sizeX()/2, -position[1]+vf.sizeY()/2, -position[2]+vf.sizeZ()/2, 'spline')
        res = frm_fourier_adaptive_wedge_vol_rscore(vf2, wf, vg, wg, b, radius, weights)
        orientation, max_value = frm_find_best_angle_interp(res)

        return position, orientation, max_value

    # iteratively refine the position & orientation
    from pytom.basic.structures import WedgeInfo
    from pytom.tools.maths import euclidianDistance
    max_iter = 10 # maximal number of iterations
    wedge = WedgeInfo([90+wf[0], 90-wf[1]])
    old_pos = [-1, -1, -1]
    vg2 = vol(vg.sizeX(), vg.sizeY(), vg.sizeZ())
    lowpass_vf = lowpassFilter(vf, radius, 0)[0]
    for i in range(max_iter):
        peak_value = 0.0
        position = None
        for orientation in orientations:
            orientation = orientation[0]

            rotateSpline(vg, vg2, orientation[0], orientation[1], orientation[2]) # first rotate
            vg2 = wedge.apply(vg2) # then apply the wedge
            vg2 = lowpassFilter(vg2, radius, 0)[0]
            res = xcf(lowpass_vf, vg2) # find the position
            pos = peak(res, peak_offset)
            # val = res(pos[0], pos[1], pos[2])
            pos, val = find_subpixel_peak_position(vol2npy(res), pos)
            if val > peak_value:
                position = pos
                peak_value = val

        if euclidianDistance(position, old_pos) <= 1.0:
            break
        else:
            old_pos = position

        # here we shift the target, not the reference
        # if you dont want the shift to change the energy landscape, use fourier shift
        vf2 = shift(vf, -position[0]+vf.sizeX()/2, -position[1]+vf.sizeY()/2, -position[2]+vf.sizeZ()/2, 'fourier')
        res = frm_fourier_adaptive_wedge_vol_rscore(vf2, wf, vg, wg, b, radius, weights)
        orientations = frm_find_topn_angles_interp(res)

    return position, orientations[0][0], orientations[0][1]
Ejemplo n.º 10
0
def classify(pl, settings):
    """
    auto-focused classification
    @param pl: particle list
    @type pl: L{pytom.basic.structures.ParticleList}
    @param settings: settings for autofocus classification
    @type settings: C{dict}
    """
    from pytom.basic.structures import Particle, Shift, Rotation
    from pytom.basic.filter import lowpassFilter

    # make the particle list picklable
    pl.pickle()

    # define the starting status
    offset = settings["offset"]
    binning = settings["binning"]
    mask = settings["mask"]
    sfrequency = settings["frequency"]  # starting frequency
    outdir = settings["output_directory"]

    references = {}
    frequencies = {}
    ncluster = 0
    if settings["external"]:  # use external references
        for class_label, fname in enumerate(settings["external"]):
            p = Particle(fname)
            p.setClass(str(class_label))
            references[str(class_label)] = p
            frequencies[str(class_label)] = sfrequency
            ncluster += 1
    else:
        if not settings["resume"]:
            if not settings["ncluster"]:
                print("Must specify the number of clusters!")
                return

            # k-means++ way to initialize
            ncluster = settings["ncluster"]
            references, frequencies = initialize(pl, settings)
        else:
            avgs, tmp, tmp2 = calculate_averages(pl,
                                                 binning,
                                                 mask,
                                                 outdir=outdir)

            for class_label, r in avgs.items():
                fname = os.path.join(
                    outdir, 'initial_class' + str(class_label) + '.em')
                rr = lowpassFilter(r, sfrequency, sfrequency / 10.)[0]
                rr.write(fname)
                p = Particle(fname)
                p.setClass(str(class_label))
                references[str(class_label)] = p
                frequencies[str(class_label)] = sfrequency
                ncluster += 1

    # start the classification
    for i in range(settings["niteration"]):
        if ncluster < 2:
            print('Not enough number of clusters. Exit!')
            break

        print("Starting iteration %d ..." % i)
        old_pl = pl.copy()

        # compute the difference maps
        print("Calculate difference maps ...")
        args = []
        for pair in combinations(list(references.keys()), 2):
            args.append((references[pair[0]], frequencies[pair[0]],
                         references[pair[1]], frequencies[pair[1]], mask,
                         settings["fmask"], binning, i, settings["sigma"],
                         settings["threshold"], outdir))

        dmaps = {}
        res = mpi.parfor(calculate_difference_map_proxy, args)
        for r in res:
            dmaps[(r[0].getClass(), r[1].getClass())] = r

        # start the alignments
        print("Start alignments ...")
        scores = calculate_scores(pl, references, frequencies, offset, binning,
                                  mask, settings["noalign"])

        # determine the class labels & track the class changes
        pl = determine_class_labels(pl, references, frequencies, scores, dmaps,
                                    binning, settings["noise"])

        # kick out the small classes
        pls = pl.copy().splitByClass()
        nlabels = {}
        for pp in pls:
            nlabels[pp[0].getClass()] = len(pp)
            print("Number of class " + str(pp[0].getClass()) + ": " +
                  str(len(pp)))

        max_labels = np.max(list(nlabels.values()))
        to_delete = []
        if settings["dispersion"]:
            min_labels = float(max_labels) / settings["dispersion"]
            for key, value in nlabels.items():
                if value <= min_labels:
                    to_delete.append(key)

        for pp in pls:
            if pp[0].getClass() in to_delete:
                pp.setClassAllParticles('-1')
                print("Set class " + str(pp[0].getClass()) + " to noise")

        # split the top n classes
        pl = split_topn_classes(pls, len(to_delete))

        # update the references
        print("Calculate averages ...")
        avgs, freqs, wedgeSum = calculate_averages(pl,
                                                   binning,
                                                   mask,
                                                   outdir=outdir)
        ncluster = 0
        references = {}
        for class_label, r in avgs.items():
            if not settings["fixed_frequency"]:
                freq = freqs[str(class_label)]
            else:
                freq = sfrequency
            frequencies[str(class_label)] = int(freq)
            print('Resolution of class %s: %d' % (str(class_label), freq))

            fname = os.path.join(
                outdir, 'iter' + str(i) + '_class' + str(class_label) + '.em')
            rr = lowpassFilter(r, freq, freq / 10.)[0]
            rr.write(fname)
            p = Particle(fname)
            p.setClass(str(class_label))
            references[str(class_label)] = p
            ncluster += 1

            w = wedgeSum[str(class_label)]
            fname = os.path.join(
                outdir,
                'iter' + str(i) + '_class' + str(class_label) + '_wedge.em')
            w.write(fname)

        # write the result to the disk
        pl.toXMLFile(
            os.path.join(outdir, 'classified_pl_iter' + str(i) + '.xml'))

        # check the stopping criterion
        if compare_pl(old_pl, pl):
            break
Ejemplo n.º 11
0
    def start(self, job, verbose=False):
        if self.mpi_id == 0:
            from pytom.basic.structures import ParticleList, Reference
            from pytom.basic.resolution import bandToAngstrom
            from pytom.basic.filter import lowpassFilter
            from math import ceil

            # randomly split the particle list into 2 half sets
            if len(job.particleList.splitByClass()) != 2:
                import numpy as np
                n = len(job.particleList)
                labels = np.random.randint(2, size=(n, ))
                print(self.node_name + ': Number of 1st half set:',
                      n - np.sum(labels), 'Number of 2nd half set:',
                      np.sum(labels))
                for i in range(n):
                    p = job.particleList[i]
                    p.setClass(labels[i])

            self.destination = job.destination
            new_reference = job.reference
            old_freq = job.freq
            new_freq = job.freq
            # main node
            for i in range(job.max_iter):
                if verbose:
                    print(self.node_name + ': starting iteration %d ...' % i)

                # construct a new job by updating the reference and the frequency
                new_job = FRMJob(job.particleList, new_reference, job.mask,
                                 job.peak_offset, job.sampleInformation,
                                 job.bw_range, new_freq, job.destination,
                                 job.max_iter - i, job.r_score, job.weighting)

                # distribute it
                self.distribute_job(new_job, verbose)

                # get the result back
                all_even_pre = None  # the 1st set
                all_even_wedge = None
                all_odd_pre = None  # the 2nd set
                all_odd_wedge = None
                pl = ParticleList()
                for j in range(self.num_workers):
                    result = self.get_result()
                    pl += result.pl
                    pre, wedge = self.retrieve_res_vols(result.name)

                    if self.assignment[result.worker_id] == 0:
                        if all_even_pre:
                            all_even_pre += pre
                            all_even_wedge += wedge
                        else:
                            all_even_pre = pre
                            all_even_wedge = wedge
                    else:
                        if all_odd_pre:
                            all_odd_pre += pre
                            all_odd_wedge += wedge
                        else:
                            all_odd_pre = pre
                            all_odd_wedge = wedge

                # write the new particle list to the disk
                pl.toXMLFile('aligned_pl_iter' + str(i) + '.xml')

                # create the averages separately
                if verbose:
                    print(self.node_name + ': determining the resolution ...')
                even = self.create_average(all_even_pre, all_even_wedge)
                odd = self.create_average(all_odd_pre, all_odd_wedge)

                # apply symmetries if any
                even = job.symmetries.applyToParticle(even)
                odd = job.symmetries.applyToParticle(odd)

                # determine the transformation between even and odd
                # here we assume the wedge from both sets are fully sampled
                from sh_alignment.frm import frm_align
                pos, angle, score = frm_align(odd, None, even, None,
                                              job.bw_range, new_freq,
                                              job.peak_offset)
                print(self.node_name +
                      'Transform of even set to match the odd set - shift: ' +
                      str(pos) + ' rotation: ' + str(angle))

                # transform the odd set accordingly
                from pytom_volume import vol, transformSpline
                from pytom.basic.fourier import ftshift
                from pytom_volume import reducedToFull
                from pytom_freqweight import weight
                transformed_odd_pre = vol(odd.sizeX(), odd.sizeY(),
                                          odd.sizeZ())
                full_all_odd_wedge = reducedToFull(all_odd_wedge)
                ftshift(full_all_odd_wedge)
                odd_weight = weight(
                    full_all_odd_wedge)  # the funny part of pytom
                transformed_odd = vol(odd.sizeX(), odd.sizeY(), odd.sizeZ())

                transformSpline(all_odd_pre, transformed_odd_pre, -angle[1],
                                -angle[0], -angle[2],
                                odd.sizeX() / 2,
                                odd.sizeY() / 2,
                                odd.sizeZ() / 2, -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)
                odd_weight.rotate(-angle[1], -angle[0], -angle[2])
                transformed_odd_wedge = odd_weight.getWeightVolume(True)
                transformSpline(odd, transformed_odd, -angle[1], -angle[0],
                                -angle[2],
                                odd.sizeX() / 2,
                                odd.sizeY() / 2,
                                odd.sizeZ() / 2, -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)

                all_odd_pre = transformed_odd_pre
                all_odd_wedge = transformed_odd_wedge
                odd = transformed_odd

                # determine resolution
                resNyquist, resolutionBand, numberBands = self.determine_resolution(
                    even, odd, job.fsc_criterion, None, job.mask, verbose)

                # write the half set to the disk
                even.write(
                    os.path.join(self.destination,
                                 'fsc_' + str(i) + '_even.em'))
                odd.write(
                    os.path.join(self.destination,
                                 'fsc_' + str(i) + '_odd.em'))

                current_resolution = bandToAngstrom(
                    resolutionBand, job.sampleInformation.getPixelSize(),
                    numberBands, 1)
                if verbose:
                    print(
                        self.node_name + ': current resolution ' +
                        str(current_resolution), resNyquist)

                # create new average
                all_even_pre += all_odd_pre
                all_even_wedge += all_odd_wedge
                average = self.create_average(all_even_pre, all_even_wedge)

                # apply symmetries
                average = job.symmetries.applyToParticle(average)

                # filter average to resolution
                average_name = os.path.join(self.destination,
                                            'average_iter' + str(i) + '.em')
                average.write(average_name)

                # update the references
                new_reference = [
                    Reference(
                        os.path.join(self.destination,
                                     'fsc_' + str(i) + '_even.em')),
                    Reference(
                        os.path.join(self.destination,
                                     'fsc_' + str(i) + '_odd.em'))
                ]

                # low pass filter the reference and write it to the disk
                filtered = lowpassFilter(average, ceil(resolutionBand),
                                         ceil(resolutionBand) / 10)
                filtered_ref_name = os.path.join(
                    self.destination, 'average_iter' + str(i) + '_res' +
                    str(current_resolution) + '.em')
                filtered[0].write(filtered_ref_name)

                # if the position/orientation is not improved, break it

                # change the frequency to a higher value
                new_freq = int(ceil(resolutionBand)) + 1
                if new_freq <= old_freq:
                    if job.adaptive_res is not False:  # two different strategies
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!'
                            % job.adaptive_res)
                        new_freq = int((1 + job.adaptive_res) * old_freq)
                    else:  # always increase by 1
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Increase the frequency to be aligned by 1!'
                        )
                        new_freq = old_freq + 1
                        old_freq = new_freq
                else:
                    old_freq = new_freq
                if new_freq >= numberBands:
                    print(self.node_name +
                          ': New frequency too high. Terminate!')
                    break

                if verbose:
                    print(self.node_name + ': change the frequency to ' +
                          str(new_freq))

            # send end signal to other nodes and terminate itself
            self.end(verbose)
        else:
            # other nodes
            self.run(verbose)
Ejemplo n.º 12
0
def frm_constrained_align(vf,
                          wf,
                          vg,
                          wg,
                          b,
                          max_freq,
                          peak_offset=None,
                          mask=None,
                          constraint=None,
                          weights=None,
                          position=None,
                          num_seeds=5,
                          pytom_volume=None):
    """Find the best alignment (translation & rotation) of volume vg (reference) to match vf.
    For details, please check the paper.

    Parameters
    ----------
    vf: Volume Nr. 1
        pytom_volume.vol

    wf: Mask of vf in Fourier space.
        pytom.basic.structures.Wedge. If none, no missing wedge.

    vg: Volume Nr. 2 / Reference
        pytom_volume.vol

    wg: Mask of vg in Fourier space.
        pytom.basic.structures.Wedge. If none, no missing wedge.

    b: Bandwidth range of spherical harmonics.
       None -> [4, 64]
       List -> [b_min, b_max]
       Integer -> [b, b]

    max_freq: Maximal frequency involved in calculation.
              Integer.

    peak_offset: The maximal offset which allows the peak of the score to be.
                 Or simply speaking, the maximal distance allowed to shift vg to match vf.
                 This parameter is needed to prevent shifting the reference volume out of the frame.
                 pytom_volume.vol / Integer. By default is half of the volume radius.

    mask: Mask volume for vg in real space.
          pytom_volume.vol

    constraint: Angular constraint
                sh_alignment.constrained_frm.AngularConstraint

    weights: Obsolete.

    position: If the translation is already known or not. If provided, no translational search will be conducted.
              List: [x,y,z], default None.

    num_seeds: Number of threads for the expectation maximization procedure. The more the better, yet slower.
               Integer, default is 5.

    Returns
    -------
    (The best translation and rotation (Euler angle, ZXZ convention [Phi, Psi, Theta]) to transform vg to match vf.
    (best_translation, best_rotation, correlation_score)
    """
    from pytom_volume import vol, rotateSpline, peak
    from pytom.basic.transformations import shift
    from pytom.basic.correlation import FLCF
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.structures import Mask, SingleTiltWedge
    from pytom_volume import initSphere
    from pytom_numpy import vol2npy

    if vf.sizeX() != vg.sizeX() or vf.sizeY() != vg.sizeY() or vf.sizeZ(
    ) != vg.sizeZ():
        raise RuntimeError('Two volumes must have the same size!')

    if wf is None:
        wf = SingleTiltWedge(0)
    if wg is None:
        wg = SingleTiltWedge(0)

    if peak_offset is None:
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset,
                   vf.sizeX() / 4, 0, 0,
                   vf.sizeX() / 2,
                   vf.sizeY() / 2,
                   vf.sizeZ() / 2)
    elif peak_offset.__class__ == int:
        peak_radius = peak_offset
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset, peak_radius, 0, 0,
                   vf.sizeX() / 2,
                   vf.sizeY() / 2,
                   vf.sizeZ() / 2)
    elif peak_offset.__class__ == vol:
        pass
    else:
        raise RuntimeError('Peak offset is given wrong!')

    # cut out the outer part which normally contains nonsense
    m = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
    initSphere(m,
               vf.sizeX() / 2, 0, 0,
               vf.sizeX() / 2,
               vf.sizeY() / 2,
               vf.sizeZ() / 2)
    vf = vf * m
    vg = vg * m
    if mask is None:
        mask = m
    else:
        vg = vg * mask

    if position is None:  # if position is not given, we have to find it ourself
        # first roughtly determine the orientation (only according to the energy info)
        # get multiple candidate orientations
        numerator, denominator1, denominator2 = frm_correlate(
            vf, wf, vg, wg, b, max_freq, weights, True, None, None, False)
        score = numerator / (denominator1 * denominator2)**0.5
        res = frm_find_topn_constrained_angles_interp(
            score, num_seeds,
            get_adaptive_bw(max_freq, b) / 16., constraint)
    else:
        # the position is given by the user
        vf2 = shift(vf, -position[0] + vf.sizeX() / 2,
                    -position[1] + vf.sizeY() / 2,
                    -position[2] + vf.sizeZ() / 2, 'fourier')
        score = frm_correlate(vf2, wf, vg, wg, b, max_freq, weights, ps=False)
        orientation, max_value = frm_find_best_constrained_angle_interp(
            score, constraint=constraint)

        return position, orientation, max_value

    # iteratively refine the position & orientation
    from pytom.tools.maths import euclidianDistance
    max_iter = 10  # maximal number of iterations
    mask2 = vol(mask.sizeX(), mask.sizeY(),
                mask.sizeZ())  # store the rotated mask
    vg2 = vol(vg.sizeX(), vg.sizeY(), vg.sizeZ())
    lowpass_vf = lowpassFilter(vf, max_freq, max_freq / 10.)[0]

    max_position = None
    max_orientation = None
    max_value = -1.0
    for i in xrange(num_seeds):
        old_pos = [-1, -1, -1]
        lm_pos = [-1, -1, -1]
        lm_ang = None
        lm_value = -1.0
        orientation = res[i][0]  # initial orientation
        for j in xrange(max_iter):
            rotateSpline(vg, vg2, orientation[0], orientation[1],
                         orientation[2])  # first rotate
            rotateSpline(mask, mask2, orientation[0], orientation[1],
                         orientation[2])  # rotate the mask as well
            vg2 = wf.apply(vg2)  # then apply the wedge
            vg2 = lowpassFilter(vg2, max_freq, max_freq / 10.)[0]
            score = FLCF(lowpass_vf, vg2, mask2)  # find the position
            pos = peak(score, peak_offset)
            pos, val = find_subpixel_peak_position(vol2npy(score), pos)
            if val > lm_value:
                lm_pos = pos
                lm_ang = orientation
                lm_value = val

            if euclidianDistance(lm_pos, old_pos) <= 1.0:
                # terminate this thread
                if lm_value > max_value:
                    max_position = lm_pos
                    max_orientation = lm_ang
                    max_value = lm_value

                break
            else:
                old_pos = lm_pos

            # here we shift the target, not the reference
            # if you dont want the shift to change the energy landscape, use fourier shift
            vf2 = shift(vf, -lm_pos[0] + vf.sizeX() / 2,
                        -lm_pos[1] + vf.sizeY() / 2,
                        -lm_pos[2] + vf.sizeZ() / 2, 'fourier')
            score = frm_correlate(vf2, wf, vg, wg, b, max_freq, weights, False,
                                  denominator1, denominator2, True)
            orientation, val = frm_find_best_constrained_angle_interp(
                score, constraint=constraint)

        else:  # no converge after the specified iteration, still get the best result as we can
            if lm_value > max_value:
                max_position = lm_pos
                max_orientation = lm_ang
                max_value = lm_value

        # print max_value # for show the convergence of the algorithm

    return max_position, max_orientation, max_value
Ejemplo n.º 13
0
def averageGPU(particleList,
               averageName,
               showProgressBar=False,
               verbose=False,
               createInfoVolumes=False,
               weighting=False,
               norm=False,
               gpuId=None,
               profile=True):
    """
    average : Creates new average from a particleList
    @param particleList: The particles
    @param averageName: Filename of new average
    @param verbose: Prints particle information. Disabled by default.
    @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default.
    @param weighting: apply weighting to each average according to its correlation score
    @param norm: apply normalization for each particle
    @return: A new Reference object
    @rtype: L{pytom.basic.structures.Reference}
    @author: Thomas Hrabe
    @change: limit for wedgeSum set to 1% or particles to avoid division by small numbers - FF
    """
    import time
    from pytom.tompy.io import read, write, read_size
    from pytom.tompy.filter import bandpass as lowpassFilter, rotateWeighting, applyFourierFilter, applyFourierFilterFull, create_wedge
    from pytom.voltools import transform, StaticVolume
    from pytom.basic.structures import Reference
    from pytom.tompy.normalise import mean0std1
    from pytom.tompy.tools import volumesSameSize, invert_WedgeSum, create_sphere
    from pytom.tompy.transform import fourier_full2reduced, fourier_reduced2full
    from cupyx.scipy.fftpack.fft import fftn as fftnP
    from cupyx.scipy.fftpack.fft import ifftn as ifftnP
    from cupyx.scipy.fftpack.fft import get_fft_plan
    from pytom.tools.ProgressBar import FixedProgBar
    from multiprocessing import RawArray
    import numpy as np
    import cupy as xp

    if not gpuId is None:
        device = f'gpu:{gpuId}'
        xp.cuda.Device(gpuId).use()
    else:
        print(gpuId)
        raise Exception('Running gpu code on non-gpu device')
    print(device)
    cstream = xp.cuda.Stream()
    if profile:
        stream = xp.cuda.Stream.null
        t_start = stream.record()

    # from pytom.tools.ProgressBar import FixedProgBar
    from math import exp
    import os

    if len(particleList) == 0:
        raise RuntimeError('The particle list is empty. Aborting!')

    if showProgressBar:
        progressBar = FixedProgBar(0, len(particleList), 'Particles averaged ')
        progressBar.update(0)
        numberAlignedParticles = 0

    # pre-check that scores != 0
    if weighting:
        wsum = 0.
        for particleObject in particleList:
            wsum += particleObject.getScore().getValue()
        if wsum < 0.00001:
            weighting = False
            print("Warning: all scores have been zero - weighting not applied")
    import time
    sx, sy, sz = read_size(particleList[0].getFilename())
    wedgeInfo = particleList[0].getWedge().convert2numpy()
    print('angle: ', wedgeInfo.getWedgeAngle())
    wedgeZero = xp.fft.fftshift(
        xp.array(wedgeInfo.returnWedgeVolume(sx, sy, sz, True).get(),
                 dtype=xp.float32))
    # wedgeZeroReduced = fourier_full2reduced(wedgeZero)
    wedge = xp.zeros_like(wedgeZero, dtype=xp.float32)
    wedgeSum = xp.zeros_like(wedge, dtype=xp.float32)
    print('init texture')
    wedgeText = StaticVolume(xp.fft.fftshift(wedgeZero),
                             device=device,
                             interpolation='filt_bspline')

    newParticle = xp.zeros((sx, sy, sz), dtype=xp.float32)

    centerX = sx // 2
    centerY = sy // 2
    centerZ = sz // 2

    result = xp.zeros((sx, sy, sz), dtype=xp.float32)

    fftplan = get_fft_plan(wedge.astype(xp.complex64))

    n = 0

    total = len(particleList)
    # total = int(np.floor((11*1024**3 - mempool.total_bytes())/(sx*sy*sz*4)))
    # total = 128
    #
    #
    # particlesNP = np.zeros((total, sx, sy, sz),dtype=np.float32)
    # particles = []
    # mask = create_sphere([sx,sy,sz], sx//2-6, 2)
    # raw = RawArray('f', int(particlesNP.size))
    # shared_array = np.ctypeslib.as_array(raw)
    # shared_array[:] = particlesNP.flatten()
    # procs = allocateProcess(particleList, shared_array, n, total, wedgeZero.size)
    # del particlesNP

    if profile:
        t_end = stream.record()
        t_end.synchronize()

        time_took = xp.cuda.get_elapsed_time(t_start, t_end)
        print(f'startup time {n:5d}: \t{time_took:.3f}ms')
        t_start = stream.record()

    for particleObject in particleList:

        rotation = particleObject.getRotation()
        rotinvert = rotation.invert()
        shiftV = particleObject.getShift()

        # if n % total == 0:
        #     while len(procs):
        #         procs =[proc for proc in procs if proc.is_alive()]
        #         time.sleep(0.1)
        #         print(0.1)
        #     # del particles
        #     # xp._default_memory_pool.free_all_blocks()
        #     # pinned_mempool.free_all_blocks()
        #     particles = xp.array(shared_array.reshape(total, sx, sy, sz), dtype=xp.float32)
        #     procs = allocateProcess(particleList, shared_array, n, total, size=wedgeZero.size)
        #     #pinned_mempool.free_all_blocks()
        #     #print(mempool.total_bytes()/1024**3)

        particle = read(particleObject.getFilename(), deviceID=device)

        #particle = particles[n%total]

        if norm:  # normalize the particle
            mean0std1(particle)  # happen inplace

        # apply its wedge to
        #particle = applyFourierFilter(particle, wedgeZeroReduced)
        #particle = (xp.fft.ifftn( xp.fft.fftn(particle) * wedgeZero)).real
        particle = (ifftnP(fftnP(particle, plan=fftplan) * wedgeZero,
                           plan=fftplan)).real

        ### create spectral wedge weighting

        wedge *= 0

        wedgeText.transform(
            rotation=[rotinvert[0], rotinvert[2], rotinvert[1]],
            rotation_order='rzxz',
            output=wedge)
        #wedge = xp.fft.fftshift(fourier_reduced2full(create_wedge(30, 30, 21, 42, 42, 42, rotation=[rotinvert[0],rotinvert[2], rotinvert[1]])))
        # if analytWedge:
        #     # > analytical buggy version
        # wedge = wedgeInfo.returnWedgeVolume(sx, sy, sz, True, rotinvert)
        # else:
        #     # > FF: interpol bugfix

        # wedge = rotateWeighting(weighting=wedgeInfo.returnWedgeVolume(sx, sy, sz, True), rotation=[rotinvert[0], rotinvert[2], rotinvert[1]])
        #     # < FF
        #     # > TH bugfix
        #     # wedgeVolume = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ,
        #     #                                    humanUnderstandable=True, rotation=rotinvert)
        #     # wedge = rotate(volume=wedgeVolume, rotation=rotinvert, imethod='linear')
        #     # < TH

        ### shift and rotate particle

        newParticle *= 0
        transform(particle,
                  output=newParticle,
                  rotation=[-rotation[1], -rotation[2], -rotation[0]],
                  center=[centerX, centerY, centerZ],
                  translation=[-shiftV[0], -shiftV[1], -shiftV[2]],
                  device=device,
                  interpolation='filt_bspline',
                  rotation_order='rzxz')

        #write(f'trash/GPU_{n}.em', newParticle)
        # print(rotation.toVector())
        # break
        result += newParticle
        wedgeSum += xp.fft.fftshift(wedge)
        # if showProgressBar:
        #     numberAlignedParticles = numberAlignedParticles + 1
        #     progressBar.update(numberAlignedParticles)

        if n % total == 0:
            if profile:
                t_end = stream.record()
                t_end.synchronize()

                time_took = xp.cuda.get_elapsed_time(t_start, t_end)
                print(f'total time {n:5d}: \t{time_took:.3f}ms')
                t_start = stream.record()
        cstream.synchronize()
        n += 1

    print('averaged particles')
    ###apply spectral weighting to sum

    result = lowpassFilter(result, high=sx / 2 - 1, sigma=0)
    # if createInfoVolumes:
    write(averageName[:len(averageName) - 3] + '-PreWedge.em', result)
    write(averageName[:len(averageName) - 3] + '-WedgeSumUnscaled.em',
          fourier_full2reduced(wedgeSum))

    wedgeSumINV = invert_WedgeSum(wedgeSum,
                                  r_max=sx // 2 - 2.,
                                  lowlimit=.05 * len(particleList),
                                  lowval=.05 * len(particleList))
    wedgeSumINV = wedgeSumINV

    #print(wedgeSum.mean(), wedgeSum.std())
    if createInfoVolumes:
        write(averageName[:len(averageName) - 3] + '-WedgeSumInverted.em',
              xp.fft.fftshift(wedgeSumINV))

    result = applyFourierFilterFull(result, xp.fft.fftshift(wedgeSumINV))

    # do a low pass filter
    result = lowpassFilter(result, sx / 2 - 2, (sx / 2 - 1) / 10.)[0]
    write(averageName, result)

    if createInfoVolumes:
        resultINV = result * -1
        # write sign inverted result to disk (good for chimera viewing ... )
        write(averageName[:len(averageName) - 3] + '-INV.em', resultINV)

    newReference = Reference(averageName, particleList)

    return newReference
Ejemplo n.º 14
0
    def start(self, job, verbose=False):
        if self.mpi_id == 0:
            from pytom.basic.structures import ParticleList, Reference
            from pytom.basic.resolution import bandToAngstrom
            from pytom.basic.filter import lowpassFilter
            from math import ceil
            from pytom.basic.fourier import convolute
            from pytom_volume import vol, power, read

            # randomly split the particle list into 2 half sets
            import numpy as np
            num_pairs = len(job.particleList.pairs)
            for i in range(num_pairs):
                # randomize the class labels to indicate the two half sets
                pl = job.particleList.pairs[i].get_phase_flip_pl()
                n = len(pl)
                labels = np.random.randint(2, size=(n, ))
                print(self.node_name + ': Number of 1st half set:',
                      n - np.sum(labels), 'Number of 2nd half set:',
                      np.sum(labels))
                for j in range(n):
                    p = pl[j]
                    p.setClass(labels[j])

            new_reference = job.reference
            old_freq = job.freq
            new_freq = job.freq
            # main node
            for i in range(job.max_iter):
                if verbose:
                    print(self.node_name + ': starting iteration %d ...' % i)

                # construct a new job by updating the reference and the frequency
                # here the job.particleList is actually ParticleListSet
                new_job = MultiDefocusJob(job.particleList, new_reference,
                                          job.mask, job.peak_offset,
                                          job.sampleInformation, job.bw_range,
                                          new_freq, job.destination,
                                          job.max_iter - i, job.r_score,
                                          job.weighting, job.bfactor)

                # distribute it
                num_all_particles = self.distribute_job(new_job, verbose)

                # calculate the denominator
                sum_ctf_squared = None
                for pair in job.particleList.pairs:
                    if sum_ctf_squared is None:
                        sum_ctf_squared = pair.get_ctf_sqr_vol() * pair.snr
                    else:
                        sum_ctf_squared += pair.get_ctf_sqr_vol() * pair.snr

                # get the result back
                all_even_pre = None
                all_even_wedge = None
                all_odd_pre = None
                all_odd_wedge = None
                pls = []
                for j in range(len(job.particleList.pairs)):
                    pls.append(ParticleList())

                for j in range(self.num_workers):
                    result = self.get_result()

                    pair_id = self.assignment[result.worker_id]
                    pair = job.particleList.pairs[pair_id]

                    pl = pls[pair_id]
                    pl += result.pl
                    even_pre, even_wedge, odd_pre, odd_wedge = self.retrieve_res_vols(
                        result.name)

                    if all_even_pre:
                        all_even_pre += even_pre * pair.snr
                        all_even_wedge += even_wedge
                        all_odd_pre += odd_pre * pair.snr
                        all_odd_wedge += odd_wedge
                    else:
                        all_even_pre = even_pre * pair.snr
                        all_even_wedge = even_wedge
                        all_odd_pre = odd_pre * pair.snr
                        all_odd_wedge = odd_wedge

                # write the new particle list to the disk
                for j in range(len(job.particleList.pairs)):
                    pls[j].toXMLFile('aligned_pl' + str(j) + '_iter' + str(i) +
                                     '.xml')

                # correct for the number of particles in wiener filter
                sum_ctf_squared = sum_ctf_squared / num_all_particles
                #                all_even_pre = all_even_pre/(num_all_particles/2)
                #                all_odd_pre = all_odd_pre/(num_all_particles/2)

                # bfactor
                if job.bfactor and job.bfactor != 'None':
                    #                    bfactor_kernel = create_bfactor_vol(sum_ctf_squared.sizeX(), job.sampleInformation.getPixelSize(), job.bfactor)
                    bfactor_kernel = read(job.bfactor)
                    bfactor_kernel_sqr = vol(bfactor_kernel)
                    power(bfactor_kernel_sqr, 2)
                    all_even_pre = convolute(all_even_pre, bfactor_kernel,
                                             True)
                    all_odd_pre = convolute(all_odd_pre, bfactor_kernel, True)
                    sum_ctf_squared = sum_ctf_squared * bfactor_kernel_sqr

                # create averages of two sets
                if verbose:
                    print(self.node_name + ': determining the resolution ...')
                even = self.create_average(
                    all_even_pre, sum_ctf_squared, all_even_wedge
                )  # assume that the CTF sum is the same for the even and odd
                odd = self.create_average(all_odd_pre, sum_ctf_squared,
                                          all_odd_wedge)

                # determine the transformation between even and odd
                # here we assume the wedge from both sets are fully sampled
                from sh_alignment.frm import frm_align
                pos, angle, score = frm_align(odd, None, even, None,
                                              job.bw_range, new_freq,
                                              job.peak_offset)
                print(
                    self.node_name +
                    ': transform of even set to match the odd set - shift: ' +
                    str(pos) + ' rotation: ' + str(angle))

                # transform the odd set accordingly
                from pytom_volume import vol, transformSpline
                from pytom.basic.fourier import ftshift
                from pytom_volume import reducedToFull
                from pytom_freqweight import weight
                transformed_odd_pre = vol(odd.sizeX(), odd.sizeY(),
                                          odd.sizeZ())
                full_all_odd_wedge = reducedToFull(all_odd_wedge)
                ftshift(full_all_odd_wedge)
                odd_weight = weight(
                    full_all_odd_wedge)  # the funny part of pytom
                transformed_odd = vol(odd.sizeX(), odd.sizeY(), odd.sizeZ())

                transformSpline(all_odd_pre, transformed_odd_pre, -angle[1],
                                -angle[0], -angle[2], int(odd.sizeX() / 2),
                                int(odd.sizeY() / 2), int(odd.sizeZ() / 2),
                                -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)
                odd_weight.rotate(-angle[1], -angle[0], -angle[2])
                transformed_odd_wedge = odd_weight.getWeightVolume(True)
                transformSpline(odd, transformed_odd, -angle[1], -angle[0],
                                -angle[2], int(odd.sizeX() / 2),
                                int(odd.sizeY() / 2), int(odd.sizeZ() / 2),
                                -(pos[0] - odd.sizeX() / 2),
                                -(pos[1] - odd.sizeY() / 2),
                                -(pos[2] - odd.sizeZ() / 2), 0, 0, 0)

                all_odd_pre = transformed_odd_pre
                all_odd_wedge = transformed_odd_wedge
                odd = transformed_odd

                # apply symmetries before determine resolution
                # with gold standard you should be careful about applying the symmetry!
                even = job.symmetries.applyToParticle(even)
                odd = job.symmetries.applyToParticle(odd)
                resNyquist, resolutionBand, numberBands = self.determine_resolution(
                    even, odd, job.fsc_criterion, None, job.mask, verbose)

                # write the half set to the disk
                even.write('fsc_' + str(i) + '_even.em')
                odd.write('fsc_' + str(i) + '_odd.em')

                current_resolution = bandToAngstrom(
                    resolutionBand, job.sampleInformation.getPixelSize(),
                    numberBands, 1)
                if verbose:
                    print(
                        self.node_name + ': current resolution ' +
                        str(current_resolution), resNyquist)

                # create new average
                all_even_pre += all_odd_pre
                all_even_wedge += all_odd_wedge
                #                all_even_pre = all_even_pre/2 # correct for the number of particles in wiener filter
                average = self.create_average(all_even_pre, sum_ctf_squared,
                                              all_even_wedge)

                # apply symmetries
                average = job.symmetries.applyToParticle(average)

                # filter average to resolution and update the new reference
                average_name = 'average_iter' + str(i) + '.em'
                average.write(average_name)

                # update the references
                new_reference = [
                    Reference('fsc_' + str(i) + '_even.em'),
                    Reference('fsc_' + str(i) + '_odd.em')
                ]

                # low pass filter the reference and write it to the disk
                filtered = lowpassFilter(average, ceil(resolutionBand),
                                         ceil(resolutionBand) / 10)
                filtered_ref_name = 'average_iter' + str(i) + '_res' + str(
                    current_resolution) + '.em'
                filtered[0].write(filtered_ref_name)

                # change the frequency to a higher value
                new_freq = int(ceil(resolutionBand)) + 1
                if new_freq <= old_freq:
                    if job.adaptive_res is not False:  # two different strategies
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!'
                            % job.adaptive_res)
                        new_freq = int((1 + job.adaptive_res) * old_freq)
                    else:  # always increase by 1
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Increase the frequency to be aligned by 1!'
                        )
                        new_freq = old_freq + 1
                        old_freq = new_freq
                else:
                    old_freq = new_freq
                if new_freq >= numberBands:
                    print(self.node_name +
                          ': Determined frequency too high. Terminate!')
                    break

                if verbose:
                    print(self.node_name + ': change the frequency to ' +
                          str(new_freq))

            # send end signal to other nodes and terminate itself
            self.end(verbose)
        else:
            # other nodes
            self.run(verbose)
Ejemplo n.º 15
0
Archivo: misc.py Proyecto: xmzzaa/PyTom
def xu_align_vol(vf, wf, vg, wg, b, radius=None, mask=None, peak_offset=None):
    """Implementation of Xu's approach for alignment. For detail, please check the paper.

    Parameters
    ----------
    vf: The volume you want to match.
        pytom_volume.vol

    wf: The single tilt wedge information of volume vf.
        [missing_wedge_angle1, missing_wedge_angle2]. Note this is defined different with frm_align im frm.py!

    vg: The reference volume.
        pytom_volume.vol

    wg: The single tilt wedge information of volume vg.
        [missing_wedge_angle1, missing_wedge_angle2]. Note this is defined different with frm_align im frm.py!

    b: The adaptive bandwidth of spherical harmonics.
       List [min_bandwidth, max_bandwidth], min_bandwidth, max_bandwidth in the range [4, 64].
       Or integer, which would then mean to use fixed bandwidth: min_bandwidth = max_bandwidth = integer.

    radius: The maximal radius in the Fourier space, which is equal to say the maximal frequency involved in calculation.
            Integer. By default is half of the volume size.

    peak_offset: The maximal offset which allows the peak of the score to be.
                 Or simply speaking, the maximal distance allowed to shift vg to match vf.
                 This parameter is needed to prevent shifting the reference volume out of the frame.
                 Integer. By default is half of the volume size.

    Returns
    -------
    The best translation and rotation (Euler angle, ZXZ convention [Phi, Psi, Theta]) to transform vg to match vf.
    (best_translation, best_rotation, correlation_score)
    """
    from pytom_volume import vol, rotateSpline, peak
    from pytom.basic.transformations import shift
    from pytom.basic.correlation import nXcf
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.structures import Mask
    from pytom_volume import initSphere
    from pytom_numpy import vol2npy

    if vf.sizeX()!=vg.sizeX() or vf.sizeY()!=vg.sizeY() or vf.sizeZ()!=vg.sizeZ():
        raise RuntimeError('Two volumes must have the same size!')

    if mask is None:
        mask = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(mask, vf.sizeX()/2, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif mask.__class__ == vol:
        pass
    elif mask.__class__ == Mask:
        mask = mask.getVolume()
    elif isinstance(mask, int):
        mask_radius = mask
        mask = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(mask, mask_radius, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    else:
        raise RuntimeError('Given mask has wrong type!')

    if peak_offset is None:
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset, vf.sizeX()/2, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif isinstance(peak_offset, int):
        peak_radius = peak_offset
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset, peak_radius, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif peak_offset.__class__ == vol:
        pass
    else:
        raise RuntimeError('Peak offset is given wrong!')

    # cut out the outer part which normally contains nonsense
    vf = vf*mask

    position = None
    if position is None: # if position is not given, we have to find it ourself
        # first roughtly determine the orientation (only according to the energy info)
        # get multiple candidate orientations
        orientations = frm_determine_orientation(vf, wf, vg, wg, b, radius, None, None, False)
    else:
        # the position is given by the user
        vf2 = shift(vf, -position[0]+vf.sizeX()/2, -position[1]+vf.sizeY()/2, -position[2]+vf.sizeZ()/2, 'spline')
        res = frm_fourier_adaptive_wedge_vol(vf2, wf, vg, wg, b, radius, None, None, False)
        orientation, max_value = frm_find_best_angle_interp(res)

        return position, orientation, max_value

    
    from pytom.basic.structures import WedgeInfo
    from pytom.tools.maths import euclidianDistance
    max_iter = 1 # maximal number of iterations
    wedge = WedgeInfo([90+wf[0], 90-wf[1]])
    old_pos = [-1, -1, -1]
    vg2 = vol(vg.sizeX(), vg.sizeY(), vg.sizeZ())
    lowpass_vf = lowpassFilter(vf, radius, 0)[0]
    
    peak_value = 0.0
    position = None
    ang = None
    for orientation in orientations:
        orientation = orientation[0]

        rotateSpline(vg, vg2, orientation[0], orientation[1], orientation[2]) # first rotate
        vg2 = wedge.apply(vg2) # then apply the wedge
        vg2 = lowpassFilter(vg2, radius, 0)[0]
        res = nXcf(lowpass_vf, vg2) # find the position
        pos = peak(res, peak_offset)
        # val = res(pos[0], pos[1], pos[2])
        pos, val = find_subpixel_peak_position(vol2npy(res), pos)
        if val > peak_value:
            position = pos
            ang = orientation
            peak_value = val

    return position, ang, peak_value
Ejemplo n.º 16
0
Archivo: misc.py Proyecto: xmzzaa/PyTom
def bart_align_vol(vf, wf, vg, wg, b, radius=None, peak_offset=None):
    """Implementation of Bartesaghi's approach for alignment. For detail, please check the paper.

    Parameters
    ----------
    vf: The volume you want to match.
        pytom_volume.vol

    wf: The single tilt wedge information of volume vf.
        [missing_wedge_angle1, missing_wedge_angle2]. Note this is defined different with frm_align im frm.py!

    vg: The reference volume.
        pytom_volume.vol

    wg: The single tilt wedge information of volume vg.
        [missing_wedge_angle1, missing_wedge_angle2]. Note this is defined different with frm_align im frm.py!

    b: The bandwidth of spherical harmonics.
       Integer in the range [4, 64]

    radius: The maximal radius in the Fourier space, which is equal to say the maximal frequency involved in calculation.
            Integer. By default is half of the volume size.

    peak_offset: The maximal offset which allows the peak of the score to be.
                 Or simply speaking, the maximal distance allowed to shift vg to match vf.
                 This parameter is needed to prevent shifting the reference volume out of the frame.
                 Integer. By default is half of the volume size.

    Returns
    -------
    The best translation and rotation (Euler angle, ZXZ convention [Phi, Psi, Theta]) to transform vg to match vf.
    (best_translation, best_rotation, correlation_score)
    """
    from pytom_volume import vol, rotateSpline, max, peak
    from pytom.basic.correlation import nXcf
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.structures import WedgeInfo
    from pytom_volume import initSphere

    if not radius: # set the radius
        radius = vf.sizeX()/2

    if peak_offset is None:
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset, vf.sizeX()/2, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif isinstance(peak_offset, int):
        peak_radius = peak_offset
        peak_offset = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(peak_offset, peak_radius, 0,0, vf.sizeX()/2,vf.sizeY()/2,vf.sizeZ()/2)
    elif peak_offset.__class__ == vol:
        pass
    else:
        raise RuntimeError('Peak offset is given wrong!')
    
    from pytom.basic.fourier import fft, ifft, ftshift, iftshift
    from pytom_volume import vol, reducedToFull, rescale, abs, real
    from .vol2sf import vol2sf
    from pytom_numpy import vol2npy
    from math import log, ceil, pow

    # IMPORTANT!!! Should firstly do the IFFTSHIFT on the volume data (NOT FFTSHIFT since for odd-sized data it matters!),
    # and then followed by the FFT.
    ff = abs(ftshift(reducedToFull(fft(iftshift(vf, inplace=False))), inplace=False))
    ff = real(ff)
    gg = abs(ftshift(reducedToFull(fft(iftshift(vg, inplace=False))), inplace=False))
    gg = real(gg)
    
    sf = None
    sg = None
    mf = create_wedge_sf(wf[0], wf[1], b)
    mg = create_wedge_sf(wg[0], wg[1], b)

    for r in range(3, radius+1): # Should start from 3 since the interpolation in the first 2 bands is not accurate.
        if sf is None:
            sf = vol2sf(ff, r, b)
            sg = vol2sf(gg, r, b)
        else:
            sf += vol2sf(ff, r, b)
            sg += vol2sf(gg, r, b)
    
    corr = frm_constrained_corr(sf, mf, sg, mg)
    ang, val = frm_find_best_angle_interp(corr)

    tmp = vol(vg.sizeX(),vg.sizeY(),vg.sizeZ())
    rotateSpline(vg, tmp, ang[0], ang[1], ang[2])
    wedge_f = WedgeInfo(90+wf[0], 90-wf[1])
    wedge_g = WedgeInfo(90+wg[0], 90-wg[1])
    cc = nXcf(lowpassFilter(wedge_g.apply(vf), radius, 0)[0], lowpassFilter(wedge_f.apply(tmp), radius, 0)[0])
    pos = peak(cc, peak_offset)
    pos, score = find_subpixel_peak_position(vol2npy(cc), pos)

    return (pos, ang, score)
Ejemplo n.º 17
0
    def start(self, job, verbose=False):
        """
        start FRM job
        @param job: FRM job
        @type job: L{FRMJob}
        @param verbose: print stuff (default: False)
        @type verbose: C{bool}
        """
        if self.mpi_id == 0:
            from pytom.basic.structures import ParticleList, Reference
            from pytom.basic.resolution import bandToAngstrom
            from pytom.basic.filter import lowpassFilter
            from math import ceil
            self.destination = job.destination
            new_reference = job.reference
            old_freq = job.freq
            new_freq = job.freq
            #print(f"reference  = {job.reference}")
            #print(f"particlelist = {job.particleList}")
            print(f"iterations = {job.max_iter:d}")
            print(f"binning    = {job.binning:d}")
            #print(f"mask       = {job.mask}")
            #print(f"peak_offset= {job.peak_offset:f2.1}")
            print(f"destination= {job.destination:s}")
            print(f"freq cut   = {job.freq:d}")
            # main node
            for i in range(job.max_iter):
                if verbose:
                    print(self.node_name + ': starting iteration %d ...' % i)

                # construct a new job by updating the reference and the frequency
                new_job = FRMJob(job.particleList,
                                 new_reference,
                                 job.mask,
                                 job.peak_offset,
                                 job.sampleInformation,
                                 job.bw_range,
                                 new_freq,
                                 job.destination,
                                 job.max_iter - i,
                                 job.r_score,
                                 job.weighting,
                                 constraint=job.constraint,
                                 binning=job.binning)

                # distribute it
                self.distribute_job(new_job, verbose)

                # get the result back
                all_even_pre = None
                all_even_wedge = None
                all_odd_pre = None
                all_odd_wedge = None
                pl = ParticleList()
                for j in range(self.num_workers):
                    result = self.get_result()
                    pl += result.pl
                    even_pre, even_wedge, odd_pre, odd_wedge = self.retrieve_res_vols(
                        result.name)

                    if all_even_pre:
                        all_even_pre += even_pre
                        all_even_wedge += even_wedge
                        all_odd_pre += odd_pre
                        all_odd_wedge += odd_wedge
                    else:
                        all_even_pre = even_pre
                        all_even_wedge = even_wedge
                        all_odd_pre = odd_pre
                        all_odd_wedge = odd_wedge

                # write the new particle list to the disk
                pl.toXMLFile(
                    os.path.join(job.destination,
                                 'aligned_pl_iter' + str(i) + '.xml'))

                # create half sets
                even = self.create_average(all_even_pre, all_even_wedge)
                odd = self.create_average(all_odd_pre, all_odd_wedge)

                # apply symmetries before determine resolution
                even = job.symmetries.applyToParticle(even)
                odd = job.symmetries.applyToParticle(odd)
                resNyquist, resolutionBand, numberBands = self.determine_resolution(
                    even, odd, job.fsc_criterion, None, job.mask, verbose)

                # write the half set to the disk
                even.write(
                    os.path.join(self.destination,
                                 'fsc_' + str(i) + '_even.em'))
                odd.write(
                    os.path.join(self.destination,
                                 'fsc_' + str(i) + '_odd.em'))

                # determine the resolution
                if verbose:
                    print(self.node_name + ': determining the resolution ...')
                current_resolution = bandToAngstrom(
                    resolutionBand, job.sampleInformation.getPixelSize(),
                    numberBands, 1)
                if verbose:
                    print(
                        self.node_name + ': current resolution ' +
                        str(current_resolution), resNyquist)

                # create new average
                all_even_pre += all_odd_pre
                all_even_wedge += all_odd_wedge
                average = self.create_average(all_even_pre, all_even_wedge)

                # apply symmetries
                average = job.symmetries.applyToParticle(average)

                # filter average to resolution and update the new reference
                average_name = os.path.join(self.destination,
                                            'average_iter' + str(i) + '.em')
                #                pl.average(average_name, True)
                average.write(average_name)
                new_reference = Reference(average_name)

                # low pass filter the reference and write it to the disk
                filtered = lowpassFilter(average, ceil(resolutionBand),
                                         ceil(resolutionBand) / 10)
                filtered_ref_name = os.path.join(
                    self.destination, 'average_iter' + str(i) + '_res' +
                    str(current_resolution) + '.em')
                filtered[0].write(filtered_ref_name)

                # if the position/orientation is not improved, break it

                # change the frequency to a higher value
                new_freq = int(ceil(resolutionBand)) + 1
                if new_freq <= old_freq:
                    if job.adaptive_res is not False:  # two different strategies
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!'
                            % job.adaptive_res)
                        new_freq = int((1 + job.adaptive_res) * new_freq)
                        old_freq = new_freq
                    else:  # always increase by 1
                        print(
                            self.node_name +
                            ': Determined resolution gets worse. Increase the frequency to be aligned by 1!'
                        )
                        new_freq = old_freq + 1
                        old_freq = new_freq
                else:
                    old_freq = new_freq
                if new_freq >= numberBands:
                    print(self.node_name +
                          ': New frequency too high. Terminate!')
                    break

                if verbose:
                    print(self.node_name + ': change the frequency to ' +
                          str(new_freq))

            # send end signal to other nodes and terminate itself
            self.end(verbose)
        else:
            # other nodes
            self.run(verbose)
Ejemplo n.º 18
0
def averageParallel(particleList,
                    averageName,
                    showProgressBar=False,
                    verbose=False,
                    createInfoVolumes=False,
                    weighting=None,
                    norm=False,
                    setParticleNodesRatio=3,
                    cores=6):
    """
    compute average using parfor
    @param particleList: The particles
    @param averageName: Filename of new average
    @param verbose: Prints particle information. Disabled by default.
    @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default.
    @param weighting: weight particles by exp CC in average
    @type weighting: bool
    @param setParticleNodesRatio: minimum number of particles per node
    @type setParticleNodesRatio: L{int}
    @return: A new Reference object
    @rtype: L{pytom.basic.structures.Reference}
    @author: FF

    """
    from pytom_volume import read, complexRealMult
    from pytom.basic.fourier import fft, ifft
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.structures import Reference
    from pytom.alignment.alignmentFunctions import invert_WedgeSum

    import os

    splitLists = splitParticleList(particleList,
                                   setParticleNodesRatio=setParticleNodesRatio,
                                   numberOfNodes=cores)
    splitFactor = len(splitLists)

    avgNameList = []
    preList = []
    wedgeList = []
    for ii in range(splitFactor):
        avgName = averageName + '_dist' + str(ii) + '.em'
        avgNameList.append(avgName)
        preList.append(averageName + '_dist' + str(ii) + '-PreWedge.em')
        wedgeList.append(averageName + '_dist' + str(ii) +
                         '-WedgeSumUnscaled.em')

    #reference = average(particleList=plist, averageName=xxx, showProgressBar=True, verbose=False,
    # createInfoVolumes=False, weighting=weighting, norm=False)
    from multiprocessing import Process

    procs = []
    for i in range(splitFactor):
        proc = Process(target=average,
                       args=(splitLists[i], avgNameList[i], showProgressBar,
                             verbose, createInfoVolumes, weighting, norm))
        procs.append(proc)
        proc.start()

    import time
    while procs:
        procs = [proc for proc in procs if proc.is_alive()]
        time.sleep(.1)

    #averageList = mpi.parfor( average, list(zip(splitLists, avgNameList, [showProgressBar]*splitFactor,
    #                                       [verbose]*splitFactor, [createInfoVolumes]*splitFactor,
    #                                            [weighting]*splitFactor, [norm]*splitFactor)), verbose=True)

    #collect results from files
    unweiAv = read(preList[0])
    wedgeSum = read(wedgeList[0])
    os.system('rm ' + wedgeList[0])
    os.system('rm ' + avgNameList[0])
    os.system('rm ' + preList[0])
    for ii in range(1, splitFactor):
        av = read(preList[ii])
        unweiAv += av
        os.system('rm ' + preList[ii])
        w = read(wedgeList[ii])
        wedgeSum += w
        os.system('rm ' + wedgeList[ii])
        os.system('rm ' + avgNameList[ii])

    if createInfoVolumes:
        unweiAv.write(averageName[:len(averageName) - 3] + '-PreWedge.em')
        wedgeSum.write(averageName[:len(averageName) - 3] +
                       '-WedgeSumUnscaled.em')

    # convolute unweighted average with inverse of wedge sum
    invert_WedgeSum(invol=wedgeSum,
                    r_max=unweiAv.sizeX() / 2 - 2.,
                    lowlimit=.05 * len(particleList),
                    lowval=.05 * len(particleList))
    fResult = fft(unweiAv)
    r = complexRealMult(fResult, wedgeSum)
    unweiAv = ifft(r)
    unweiAv.shiftscale(
        0.0, 1 / float(unweiAv.sizeX() * unweiAv.sizeY() * unweiAv.sizeZ()))
    # low pass filter to remove artifacts at fringes
    unweiAv = lowpassFilter(volume=unweiAv,
                            band=unweiAv.sizeX() / 2 - 2,
                            smooth=(unweiAv.sizeX() / 2 - 1) / 10.)[0]

    unweiAv.write(averageName)

    return Reference(averageName, particleList)
def sag_fine_grained_alignment(vf,
                               wf,
                               vg,
                               wg,
                               max_freq,
                               ang=[0, 0, 0],
                               loc=[0, 0, 0],
                               mask=None,
                               B,
                               alpha,
                               maxIter,
                               lambda1):
    """SAG-based fine-grained alignment between experimental data and reference data.
    Parameters
    vf: Experimental data
        pytom_volume.vol

    wf: Mask of vf in Fourier space.
        pytom.basic.structures.Wedge. If none, no missing wedge.

    vg: Reference data 
        pytom_volume.vol

    wg: Mask of vg in Fourier space.
        pytom.basic.structures.Wedge. If none, no missing wedge.

    max_freq: Maximal frequency involved in calculation.
              Integer.

    ang: Initial rotation angle

    loc: Initial translation value
    
    mask: Mask volume in real space.
          pytom_volume.vol

    B: Batch number 

    alpha: Step size

    maxIter: The max iteration number

    lambda1: Regularization parameter

    Returns
    -------
    (Optimal rotation angle and translation value.
    (best_translation, best_rotation, correlation_score)
    """
    from pytom_volume import vol, rotateSpline, peak, sum, power
    from pytom.basic.transformations import shift
    from pytom.basic.filter import lowpassFilter
    from pytom.basic.structures import Mask, SingleTiltWedge, Rotation
    from pytom_volume import initSphere
    from pytom_numpy import vol2npy

    import math
    import random

    if vf.sizeX() != vg.sizeX() or vf.sizeY() != vg.sizeY() or vf.sizeZ(
    ) != vg.sizeZ():
        raise RuntimeError('Two volumes must have the same size!')

    if wf is None:
        wf = SingleTiltWedge(0)
    else:
        vf = wf.apply(vf)
    if wg is None:
        wg = SingleTiltWedge(0)
    else:
        vg = wg.apply(vg)

    if mask is None:
        m = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
        initSphere(m,
                   vf.sizeX() / 2, 0, 0,
                   vf.sizeX() / 2,
                   vf.sizeY() / 2,
                   vf.sizeZ() / 2)
        mask = m

    old_value = -1
    max_pos = [-1, -1, -1]
    max_ang = None
    max_value = -1.0

    ang_epsilon = np.ones(3) * (math.pi * (1.0 / 180))
    loc_epsilon = np.ones(3) * 1.0

    n = vf.sizeX()
    vf0_n = vol2npy(vf)

    if maxIter is None:
        maxIter = n / 2
    iVals = np.int32(np.ceil((n - B) * np.random.random(maxIter)))

    if lambda1 is None:
        lambda1 = 1 / n
    eps = np.finfo(np.float32).eps
    Lmax = 0.25 * np.max(np.sum(vf0_n**2)) + lambda1
    if alpha is None:
        alpha = 1 / Lmax

    d = np.zeros(6)
    g = np.zeros([n, 6])
    covered = np.int32(np.zeros(n))
    nCovered = 0
    grad = np.zeros(6)
    deri = np.zeros(6)
    vg2 = vol(vf.sizeX(), vf.sizeY(), vf.sizeZ())
    mask2 = vol(mask.sizeX(), mask.sizeY(), mask.sizeZ())

    for i in range(n):
        if (covered[i] != 0):
            nCovered += 1

    for k in range(maxIter):
        i = iVals[k] - 1
        if k == 0:
            rotateSpline(vg, vg2, ang[0], ang[1], ang[2])
            rotateSpline(mask, mask2, ang[0], ang[1], ang[2])

            vg2 = wf.apply(vg2)
            vg2 = lowpassFilter(vg2, max_freq, max_freq / 10.)[0]
            vg2_s = transform_single_vol(vg2, mask2)

            vf2 = shift(vf,
                        -loc[0] + vf.sizeX() / 2,
                        -loc[1] + vf.sizeY() / 2,
                        -loc[2] + vf.sizeZ() / 2,
                        imethod='spline')
            vf2 = lowpassFilter(vf2, max_freq, max_freq / 10.)[0]
            vf2 = wg.apply(vf2, Rotation(ang))
            vf2_s = transform_single_vol(vf2, mask2)

            i = 0
            ri = np.sum(
                vol2npy(vf2_s)[i:i + B, :, :] - vol2npy(vg2_s)[i:i + B, :, :])

        vg2_p = vol(n, n, n)
        vg2_m = vol(n, n, n)
        mask2_p = vol(n, n, n)
        mask2_m = vol(n, n, n)
        for dim_i in range(3):
            if abs(ang_epsilon[dim_i]) > eps:
                ang_epsilon_t = np.zeros(3)
                ang_epsilon_t[dim_i] = ang_epsilon[dim_i]

                angle = ang + ang_epsilon_t
                rotateSpline(vg, vg2_p, angle[0], angle[1], angle[2])
                rotateSpline(mask, mask2_p, angle[0], angle[1], angle[2])
                vg2_p = wf.apply(vg2_p)
                vg2_p = lowpassFilter(vg2_p, max_freq, max_freq / 10.)[0]
                vg2_pf = transform_single_vol(vg2_p, mask2_p)

                angle = ang - ang_epsilon_t
                rotateSpline(vg, vg2_m, angle[0], angle[1], angle[2])
                rotateSpline(mask, mask2_m, angle[0], angle[1], angle[2])
                vg2_m = wf.apply(vg2_m)
                vg2_m = lowpassFilter(vg2_m, max_freq, max_freq / 10.)[0]
                vg2_mf = transform_single_vol(vg2_m, mask2_m)

                vg2_ang_deri = (vg2_pf - vg2_mf) / (2 * ang_epsilon[dim_i])
                vg2_ang_deri_n = vol2npy(vg2_ang_deri)
                deri[dim_i] = np.sum(vg2_ang_deri_n[i:i + B, :, :])

                del vg2_pf, vg2_mf, vg2_ang_deri, vg2_ang_deri_n, angle
        del vg2_p, vg2_m, mask2_p, mask2_m

        vf1_ps = vol(n, n, n)
        vf1_ms = vol(n, n, n)
        ang_f = [ang[0], ang[1], ang[2]]
        for dim_i in range(3):
            if abs(loc_epsilon[dim_i]) > eps:
                loc_epsilon_t = np.zeros(3)
                loc_epsilon_t[dim_i] = ang_epsilon[dim_i]

                vf1_ps.copyVolume(vf)
                vf1_ms.copyVolume(vf)

                loc_r = loc + loc_epsilon_t
                vf1_tp = shift(vf1_ps, -loc_r[0] + vf1_ps.sizeX() / 2,
                               -loc_r[1] + vf1_ps.sizeY() / 2,
                               -loc_r[2] + vf1_ps.sizeZ() / 2, 'spline')
                vf1_tp = lowpassFilter(vf1_tp, max_freq, max_freq / 10.)[0]
                vf1_tp = wg.apply(vf1_tp, Rotation(ang_f))

                loc_r = loc - loc_epsilon_t
                vf1_tm = shift(vf1_ms, -loc_r[0] + vf1_ms.sizeX() / 2,
                               -loc_r[1] + vf1_ms.sizeY() / 2,
                               -loc_r[2] + vf1_ms.sizeZ() / 2, 'spline')
                vf1_tm = lowpassFilter(vf1_tm, max_freq, max_freq / 10.)[0]
                vf1_tm = wg.apply(vf1_tm, Rotation(ang_f))

                vf1_tpf = transform_single_vol(vf1_tp, mask2)
                vf1_tmf = transform_single_vol(vf1_tm, mask2)
                vf1_loc_deri = (vf1_tpf - vf1_tmf) / (2 * ang_epsilon[dim_i])
                vf1_loc_deri_n = vol2npy(vf1_loc_deri)
                deri[dim_i + 3] = np.sum(vf1_loc_deri_n[i:i + B, :, :])

                del vf1_tp, vf1_tm, vf1_tpf, vf1_tmf, vf1_loc_deri, vf1_loc_deri_n
        del vf1_ps, vf1_ms, ang_f

        for dim_i in range(6):
            grad[dim_i] = ri * deri[dim_i] / B

        for dim_i in range(6):
            d[dim_i] += grad[dim_i] - np.sum(g[i:i + B, dim_i])

        for dim_i in range(6):
            g[i:i + B, dim_i] = grad[dim_i]

        for j0 in range(i, i + B + 1):
            if (covered[j0] == 0):
                covered[j0] = 1
                nCovered += 1

        for dim_i in range(6):
            opt_beta[dim_i] -= alpha * d[dim_i] / nCovered
        ang = opt_beta[:3]
        loc = opt_beta[3:]

        rotateSpline(vg, vg2, ang[0], ang[1], ang[2])
        rotateSpline(mask, mask2, ang[0], ang[1], ang[2])

        vg2 = wf.apply(vg2)
        vg2 = lowpassFilter(vg2, max_freq, max_freq / 10.)[0]
        vg2_s = transform_single_vol(vg2, mask2)

        vf2 = shift(vf,
                    -loc[0] + vf.sizeX() / 2,
                    -loc[1] + vf.sizeY() / 2,
                    -loc[2] + vf.sizeZ() / 2,
                    imethod='spline')
        vf2 = lowpassFilter(vf2, max_freq, max_freq / 10.)[0]
        vf2 = wg.apply(vf2, Rotation(ang))
        vf2_s = transform_single_vol(vf2, mask2)
        ri = np.sum(
            vol2npy(vf2_s)[i:i + B, :, :] - vol2npy(vg2_s)[i:i + B, :, :])
        from pytom.basic.correlation import nxcc
        val = nxcc(vf2_s, vg2_s, mask)

        if val > max_value:
            max_pos = loc
            max_ang = ang
            max_value = val
        if abs(max_value - old_value) <= eps:
            break
        else:
            old_value = max_value
        del vg2_s, vf2, vf2_s

    del d, g, grad, deri

    return max_pos, max_ang, max_value
Ejemplo n.º 20
0
    def start(self, job, verbose=False):
        if self.mpi_id == 0:
            from pytom.basic.structures import ParticleList, Reference
            from pytom.basic.resolution import bandToAngstrom
            from pytom.basic.filter import lowpassFilter
            from math import ceil
            from pytom.basic.fourier import convolute
            from pytom_volume import vol, power, read
            
            new_reference = job.reference
            old_freq = job.freq
            new_freq = job.freq
            # main node
            for i in range(job.max_iter):
                if verbose:
                    print(self.node_name + ': starting iteration %d ...' % i)
                
                # construct a new job by updating the reference and the frequency
                # here the job.particleList is actually ParticleListSet
                new_job = MultiDefocusJob(job.particleList, new_reference, job.mask, job.peak_offset, job.sampleInformation, job.bw_range, new_freq, job.destination, job.max_iter-i, job.r_score, job.weighting, job.bfactor)
                
                # distribute it
                num_all_particles = self.distribute_job(new_job, verbose)
                
                # calculate the denominator
                sum_ctf_squared = None
                for pair in job.particleList.pairs:
                    if sum_ctf_squared is None:
                        sum_ctf_squared = pair.get_ctf_sqr_vol() * pair.snr
                    else:
                        sum_ctf_squared += pair.get_ctf_sqr_vol() * pair.snr
                
                # get the result back
                all_even_pre = None
                all_even_wedge = None
                all_odd_pre = None
                all_odd_wedge = None
                pls = []
                for j in range(len(job.particleList.pairs)):
                    pls.append(ParticleList())
                
                for j in range(self.num_workers):
                    result = self.get_result()
                    
                    pair_id = self.assignment[result.worker_id]
                    pair = job.particleList.pairs[pair_id]
                    
                    pl = pls[pair_id]
                    pl += result.pl
                    even_pre, even_wedge, odd_pre, odd_wedge = self.retrieve_res_vols(result.name)
                    
                    if all_even_pre:
                        all_even_pre += even_pre * pair.snr
                        all_even_wedge += even_wedge
                        all_odd_pre += odd_pre * pair.snr
                        all_odd_wedge += odd_wedge
                    else:
                        all_even_pre = even_pre * pair.snr
                        all_even_wedge = even_wedge
                        all_odd_pre = odd_pre * pair.snr
                        all_odd_wedge = odd_wedge
                
                # write the new particle list to the disk
                for j in range(len(job.particleList.pairs)):
                    pls[j].toXMLFile('aligned_pl'+str(j)+'_iter'+str(i)+'.xml')
                
                # correct for the number of particles in wiener filter
                sum_ctf_squared = sum_ctf_squared/num_all_particles
#                all_even_pre = all_even_pre/(num_all_particles/2)
#                all_odd_pre = all_odd_pre/(num_all_particles/2)
                
                # bfactor
                if job.bfactor and job.bfactor != 'None':
#                    bfactor_kernel = create_bfactor_vol(sum_ctf_squared.sizeX(), job.sampleInformation.getPixelSize(), job.bfactor)
                    bfactor_kernel = read(job.bfactor)
                    bfactor_kernel_sqr = vol(bfactor_kernel)
                    power(bfactor_kernel_sqr, 2)
                    all_even_pre = convolute(all_even_pre, bfactor_kernel, True)
                    all_odd_pre = convolute(all_odd_pre, bfactor_kernel, True)
                    sum_ctf_squared = sum_ctf_squared*bfactor_kernel_sqr
                
                # determine the resolution
                if verbose:
                    print(self.node_name + ': determining the resolution ...')
                even = self.create_average(all_even_pre, sum_ctf_squared, all_even_wedge) # assume that the CTF sum is the same for the even and odd
                odd = self.create_average(all_odd_pre, sum_ctf_squared, all_odd_wedge)
                
                # apply symmetries before determine resolution
                even = job.symmetries.applyToParticle(even)
                odd = job.symmetries.applyToParticle(odd)
                resNyquist, resolutionBand, numberBands = self.determine_resolution(even, odd, job.fsc_criterion, None, job.mask, verbose)
                
                # write the half set to the disk
                even.write('fsc_'+str(i)+'_even.em')
                odd.write('fsc_'+str(i)+'_odd.em')
                
                current_resolution = bandToAngstrom(resolutionBand, job.sampleInformation.getPixelSize(), numberBands, 1)
                if verbose:
                    print(self.node_name + ': current resolution ' + str(current_resolution), resNyquist)
                
                # create new average
                all_even_pre += all_odd_pre
                all_even_wedge += all_odd_wedge
#                all_even_pre = all_even_pre/2 # correct for the number of particles in wiener filter
                average = self.create_average(all_even_pre, sum_ctf_squared, all_even_wedge)
                
                # apply symmetries
                average = job.symmetries.applyToParticle(average)
                
                # filter average to resolution and update the new reference
                average_name = 'average_iter'+str(i)+'.em'
                average.write(average_name)
                new_reference = Reference(average_name)
                
                # low pass filter the reference and write it to the disk
                filtered = lowpassFilter(average, ceil(resolutionBand), ceil(resolutionBand)/10)
                filtered_ref_name = 'average_iter'+str(i)+'_res'+str(current_resolution)+'.em'
                filtered[0].write(filtered_ref_name)
                
                # change the frequency to a higher value
                new_freq = int(ceil(resolutionBand))+1
                if new_freq <= old_freq:
                    if job.adaptive_res is not False: # two different strategies
                        print(self.node_name + ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!' % job.adaptive_res)
                        new_freq = int((1+job.adaptive_res)*old_freq)
                    else: # always increase by 1
                        print(self.node_name + ': Determined resolution gets worse. Increase the frequency to be aligned by 1!')
                        new_freq = old_freq+1
                        old_freq = new_freq
                else:
                    old_freq = new_freq
                if new_freq >= numberBands:
                    print(self.node_name + ': Determined frequency too high. Terminate!')
                    break
                
                if verbose:
                    print(self.node_name + ': change the frequency to ' + str(new_freq))
            
            # send end signal to other nodes and terminate itself
            self.end(verbose)
        else:
            # other nodes
            self.run(verbose)