def power(volume, exponent, inplace=False): """ power: Pixelwise power @param volume: The volume @type volume: L{pytom_volume.vol} @param exponent: The exponent @type exponent: L{float} @param inplace: Perform power inplace? Default is False @type inplace: L{bool} @return: volume @rtype: L{pytom_volume.vol} """ if inplace: from pytom_volume import power power(volume, exponent) else: #return new volume object from pytom_volume import vol, power volume2 = vol(volume.sizeX(), volume.sizeY(), volume.sizeZ()) volume2.copyVolume(volume) power(volume2, exponent) return volume2
def bandCF(volume, reference, band=[0, 100]): """ bandCF: @param volume: The volume @param reference: The reference @param band: [a,b] - specify the lower and upper end of band. [0,1] if not set. @return: First parameter - The correlation of the two volumes in the specified ring. Second parameter - The bandpass filter used. @rtype: List - [L{pytom_volume.vol},L{pytom_freqweight.weight}] @author: Thomas Hrabe @todo: does not work yet -> test is disabled """ if gpu: import cupy as xp else: import numpy as xp import pytom_volume from math import sqrt from pytom.basic import fourier from pytom.basic.filter import bandpassFilter from pytom.basic.correlation import nXcf vf = bandpassFilter(volume, band[0], band[1], fourierOnly=True) rf = bandpassFilter(reference, band[0], band[1], vf[1], fourierOnly=True) v = pytom_volume.reducedToFull(vf[0]) r = pytom_volume.reducedToFull(rf[0]) absV = pytom_volume.abs(v) absR = pytom_volume.abs(r) pytom_volume.power(absV, 2) pytom_volume.power(absR, 2) sumV = abs(pytom_volume.sum(absV)) sumR = abs(pytom_volume.sum(absR)) if sumV == 0: sumV = 1 if sumR == 0: sumR = 1 pytom_volume.conjugate(rf[0]) fresult = vf[0] * rf[0] #transform back to real space result = fourier.ifft(fresult) fourier.iftshift(result) result.shiftscale(0, 1 / float(sqrt(sumV * sumR))) return [result, vf[1]]
def std(volume, mask, meanVolume=0, fMask=0, numberOfMaskVoxels=-1): """ suggestion frido: rename to std_moving_mask std: Determines the std of volume under moving mask. Each assigned value is determined through the value of the voxels surrounding the current voxel that are covered by the mask. @param volume: The volume of interest @param mask: A mask under which the std is determined @param meanVolume: Optional - the meanVolume determined by mean. (mean must not be recalculated) @param fMask: Optional - the fouriertransformed mask (fft must not be repeated) @param numberOfMaskVoxels: Optional - the number of voxels != 0 """ from pytom.basic.fourier import fft, ifft, iftshift from pytom_volume import power import pytom_volume if not fMask.__class__ == pytom_volume.vol_comp: fMask = fft(mask) if not meanVolume.__class__ == pytom_volume.vol: meanVolume = mean(volume, mask) if numberOfMaskVoxels < 0: numberOfMaskVoxels = pytom_volume.numberSetVoxels(mask) volumeSqrd = volume * volume fVolumeSqrd = fft(volumeSqrd) fVolumeSqrd = fVolumeSqrd * fMask varianceVolume = iftshift(ifft(fVolumeSqrd)) varianceVolume.shiftscale( 0, 1.0 / (numberOfMaskVoxels * varianceVolume.numelem())) power(meanVolume, 2) varianceVolume = varianceVolume - meanVolume power(varianceVolume, 0.5) return varianceVolume
def stdUnderMask(volume, mask, p, meanV): """ stdUnderMask: calculate the std volume under the given mask @param volume: input volume @type volume: L{pytom_volume.vol} @param mask: mask @type mask: L{pytom_volume.vol} @param p: non zero value numbers in the mask @type p: L{int} @param meanV: mean volume under mask, which should already been caculated @type meanV: L{pytom_volume.vol} @return: the calculated std volume under mask @rtype: L{pytom_volume.vol} @author: Yuxiang Chen """ from pytom.basic.fourier import fft, ifft, iftshift from pytom_volume import vol, power, limit copyV = vol(volume.sizeX(), volume.sizeY(), volume.sizeZ()) copyV.copyVolume(volume) power(copyV, 2) #calculate the square of the volume copyMean = vol(meanV.sizeX(), meanV.sizeY(), meanV.sizeZ()) copyMean.copyVolume(meanV) power(copyMean, 2) result = meanUnderMask(copyV, mask, p) - copyMean # from pytom_volume import abs # abs(result) limit(result, 1e-09, 1, 0, 0, True, False) # this step is needed to set all those value (close to 0) to 1 power(result, 0.5) return result
def stdValueUnderMask(volume, mask, meanValue, p=None): """ stdValueUnderMask: Determines the std value under a mask @param volume: input volume @type volume: L{pytom_volume.vol} @param mask: mask @type mask: L{pytom_volume.vol} @param p: non zero value numbers in the mask @type p: L{float} or L{int} @return: A value @rtype: L{float} @change: support None as mask, FF 08.07.2014 """ from pytom_volume import sum from pytom_volume import vol, power, variance assert volume.__class__ == vol if mask: if not p: p = sum(mask) else: p = volume.sizeX()*volume.sizeY()*volume.sizeZ() squareM = meanValue**2 squareV = vol(volume.sizeX(), volume.sizeY(), volume.sizeZ()) squareV.copyVolume(volume) power(squareV, 2) res = meanValueUnderMask(squareV, mask, p) res = res - squareM try: res = res**0.5 except ValueError: print("Res = %.6f < 0 => standard deviation determination fails :(") print(" something went terribly wrong and program has to stop") raise ValueError('Program stopped in stdValueUnderMask') return res
def calculate_difference_map(v1, band1, v2, band2, mask=None, focus_mask=None, align=True, sigma=None, threshold=0.4): """mask if for alignment, while focus_mask is for difference map. """ from pytom_volume import vol, power, abs, limit, transformSpline, variance, mean, max, min from pytom.basic.normalise import mean0std1 from pytom.basic.filter import lowpassFilter # do lowpass filtering first lv1 = lowpassFilter(v1, band1, band1 / 10.)[0] lv2 = lowpassFilter(v2, band2, band2 / 10.)[0] # do alignment of two volumes, if required. v1 is used as reference. if align: from sh_alignment.frm import frm_align band = int(band1 if band1 < band2 else band2) pos, angle, score = frm_align(lv2, None, lv1, None, [4, 64], band, lv1.sizeX() // 4, mask) shift = [ pos[0] - v1.sizeX() // 2, pos[1] - v1.sizeY() // 2, pos[2] - v1.sizeZ() // 2 ] # transform v2 lvv2 = vol(lv2) transformSpline(lv2, lvv2, -angle[1], -angle[0], -angle[2], lv2.sizeX() // 2, lv2.sizeY() // 2, lv2.sizeZ() // 2, -shift[0], -shift[1], -shift[2], 0, 0, 0) else: lvv2 = lv2 # do normalization mean0std1(lv1) mean0std1(lvv2) # only consider the density beyond certain sigma if sigma is None or sigma == 0: pass elif sigma < 0: # negative density counts assert min(lv1) < sigma assert min(lvv2) < sigma limit(lv1, 0, 0, sigma, 0, False, True) limit(lvv2, 0, 0, sigma, 0, False, True) else: # positive density counts assert max(lv1) > sigma assert max(lvv2) > sigma limit(lv1, sigma, 0, 0, 0, True, False) limit(lvv2, sigma, 0, 0, 0, True, False) # if we want to focus on specific area only if focus_mask: lv1 *= focus_mask lvv2 *= focus_mask # calculate the STD map avg = (lv1 + lvv2) / 2 var1 = avg - lv1 power(var1, 2) var2 = avg - lvv2 power(var2, 2) std_map = var1 + var2 power(std_map, 0.5) # calculate the coefficient of variance map # std_map = std_map/abs(avg) if focus_mask: std_map *= focus_mask # threshold the STD map mv = mean(std_map) threshold = mv + (max(std_map) - mv) * threshold limit(std_map, threshold, 0, threshold, 1, True, True) # do a lowpass filtering std_map1 = lowpassFilter(std_map, v1.sizeX() // 4, v1.sizeX() / 40.)[0] if align: std_map2 = vol(std_map) transformSpline(std_map1, std_map2, angle[0], angle[1], angle[2], v1.sizeX() // 2, v1.sizeY() // 2, v1.sizeZ() // 2, 0, 0, 0, shift[0], shift[1], shift[2]) else: std_map2 = std_map1 limit(std_map1, 0.5, 0, 1, 1, True, True) limit(std_map2, 0.5, 0, 1, 1, True, True) # return the respective difference maps return (std_map1, std_map2)
def weightedXCF(volume,reference,numberOfBands,wedgeAngle=-1): """ weightedXCF: Determines the weighted correlation function for volume and reference @param volume: A volume @param reference: A reference @param numberOfBands:Number of bands @param wedgeAngle: A optional wedge angle @return: The weighted correlation function @rtype: L{pytom_volume.vol} @author: Thomas Hrabe @todo: does not work yet -> test is disabled """ from pytom.basic.correlation import bandCF import pytom_volume from math import sqrt import pytom_freqweight result = pytom_volume.vol(volume.sizeX(),volume.sizeY(),volume.sizeZ()) result.setAll(0) cc2 = pytom_volume.vol(volume.sizeX(),volume.sizeY(),volume.sizeZ()) cc2.setAll(0) q = 0 if wedgeAngle >=0: wedgeFilter = pytom_freqweight.weight(wedgeAngle,0,volume.sizeX(),volume.sizeY(),volume.sizeZ()) wedgeVolume = wedgeFilter.getWeightVolume(True) else: wedgeVolume = pytom_volume.vol(volume.sizeX(), volume.sizeY(), int(volume.sizeZ()/2+1)) wedgeVolume.setAll(1.0) w = sqrt(1/float(volume.sizeX()*volume.sizeY()*volume.sizeZ())) numberVoxels = 0 for i in range(numberOfBands): """ notation according Steward/Grigorieff paper """ band = [0,0] band[0] = i*volume.sizeX()/numberOfBands band[1] = (i+1)*volume.sizeX()/numberOfBands r = bandCF(volume,reference,band) cc = r[0] filter = r[1] #get bandVolume bandVolume = filter.getWeightVolume(True) filterVolumeReduced = bandVolume * wedgeVolume filterVolume = pytom_volume.reducedToFull(filterVolumeReduced) #determine number of voxels != 0 N = pytom_volume.numberSetVoxels(filterVolume) #add to number of total voxels numberVoxels = numberVoxels + N cc2.copyVolume(r[0]) pytom_volume.power(cc2,2) cc.shiftscale(w,1) ccdiv = cc2/(cc) pytom_volume.power(ccdiv,3) #abs(ccdiv); as suggested by grigorief ccdiv.shiftscale(0,N) result = result + ccdiv result.shiftscale(0,1/float(numberVoxels)) return result
def bandCC(volume,reference,band,verbose = False): """ bandCC: Determines the normalised correlation coefficient within a band @param volume: The volume @type volume: L{pytom_volume.vol} @param reference: The reference @type reference: L{pytom_volume.vol} @param band: [a,b] - specify the lower and upper end of band. @return: First parameter - The correlation of the two volumes in the specified band. Second parameter - The bandpass filter used. @rtype: List - [float,L{pytom_freqweight.weight}] @author: Thomas Hrabe """ import pytom_volume from pytom.basic.filter import bandpassFilter from pytom.basic.correlation import xcf from math import sqrt if verbose: print('lowest freq : ', band[0],' highest freq' , band[1]) vf = bandpassFilter(volume,band[0],band[1],fourierOnly=True) rf = bandpassFilter(reference,band[0],band[1],vf[1],fourierOnly=True) ccVolume = pytom_volume.vol_comp(rf[0].sizeX(),rf[0].sizeY(),rf[0].sizeZ()) ccVolume.copyVolume(rf[0]) pytom_volume.conj_mult(ccVolume,vf[0]) cc = pytom_volume.sum(ccVolume) cc = cc.real v = vf[0] r = rf[0] absV = pytom_volume.abs(v) absR = pytom_volume.abs(r) pytom_volume.power(absV,2) pytom_volume.power(absR,2) sumV = pytom_volume.sum(absV) sumR = pytom_volume.sum(absR) sumV = abs(sumV) sumR = abs(sumR) if sumV == 0: sumV =1 if sumR == 0: sumR =1 cc = cc / (sqrt(sumV*sumR)) #numerical errors will be punished with nan if abs(cc) > 1.1 : cc = float('nan') return [cc,vf[1]];
def volCTF(defocus, x_dim, y_dim, z_dim, pixel_size=None, voltage=None, Cs=None, sigma=None): """ @param defocus: defocus in mu m @type defocus: L{float} @param x_dim: dimension of volume in x @param y_dim: dimension of volume in y @param z_dim: dimension of volume in z @return: 3-dim volumes with x, y, z values in x,y,z dimension, respectively @rtype: L{pytom_volume.vol} """ from pytom_volume import vol, power from pytom.tools.macros import frange import math if Cs == None: Cs = 2*(10**(-3)) else: Cs = Cs*(10**(-3)) if voltage == None: voltage = 300000 else: voltage = voltage * 1000 if pixel_size == None: pixel_size = 0.72*(10**(-9)) else: pixel_size = pixel_size*(10**(-9)) Dz = defocus * (10**(-6)) Ny = 1/(2*pixel_size) voltagest = voltage*(1+voltage/1022000) lam = ((150.4/voltagest)**0.5)*(10**(-10)); # x_array = frange(-Ny, Ny-(Ny/x_dim), 2*Ny/x_dim) # y_array = frange(-Ny, Ny-(Ny/y_dim), 2*Ny/y_dim) # z_array = frange(-Ny, Ny-(Ny/z_dim), 2*Ny/z_dim) x_array = frange(-Ny, Ny, 2*Ny/x_dim) y_array = frange(-Ny, Ny, 2*Ny/y_dim) z_array = frange(-Ny, Ny, 2*Ny/z_dim) r, y, z = gridCTF(x_array, y_array, z_array) power(r, 2) power(y, 2) power(z, 2) r = r + y + z power(r, 0.5) r4 = vol(r) power(r4, 4) r2 = vol(r) power(r2, 2) vol = ( r4*Cs*(lam**3) - r2*2*Dz*lam )*math.pi/2 for i in range(vol.sizeX()): for j in range(vol.sizeY()): for k in range(vol.sizeZ()): vol(math.sin(vol(i, j, k)) , i, j, k) return vol
def run(self, verbose=False): from sh_alignment.frm import frm_align from pytom.basic.structures import Shift, Rotation from pytom.tools.ProgressBar import FixedProgBar from pytom.basic.fourier import convolute from pytom_volume import read, power while True: # get the job try: job = self.get_job() except: if verbose: print(self.node_name + ': end') break # get some non-job message, break it if verbose: prog = FixedProgBar(0, len(job.particleList) - 1, self.node_name + ':') i = 0 ref = [] ref.append(job.reference[0].getVolume()) ref.append(job.reference[1].getVolume()) # convolute with the approximation of the CTF if job.sum_ctf_sqr: ctf = read(job.sum_ctf_sqr) power(ctf, 0.5) # the number of CTFs should not matter, should it? ref0 = ref[0] ref1 = ref[1] ref0 = convolute(ref0, ctf, True) ref1 = convolute(ref1, ctf, True) ref = [ref0, ref1] if job.bfactor and job.bfactor != 'None': # restore_kernel = create_bfactor_restore_vol(ref.sizeX(), job.sampleInformation.getPixelSize(), job.bfactor) from pytom_volume import vol, read bfactor_kernel = read(job.bfactor) unit = vol(bfactor_kernel) unit.setAll(1) restore_kernel = unit / bfactor_kernel # run the job for p in job.particleList: if verbose: prog.update(i) i += 1 v = p.getVolume() # if weights is None: # create the weights according to the bfactor # if job.bfactor == 0: # weights = [1 for k in xrange(job.freq)] # else: # restore_fnc = create_bfactor_restore_fnc(ref.sizeX(), job.sampleInformation.getPixelSize(), job.bfactor) # # cut out the corresponding part and square it to get the weights! # weights = restore_fnc[1:job.freq+1]**2 if job.bfactor and job.bfactor != 'None': v = convolute(v, restore_kernel, True) # if bfactor is set, restore it pos, angle, score = frm_align(v, p.getWedge(), ref[int(p.getClass())], None, job.bw_range, job.freq, job.peak_offset, job.mask.getVolume()) p.setShift( Shift([ pos[0] - v.sizeX() / 2, pos[1] - v.sizeY() / 2, pos[2] - v.sizeZ() / 2 ])) p.setRotation(Rotation(angle)) p.setScore(FRMScore(score)) # average the particle list name_prefix = self.node_name + '_' + str(job.max_iter) pair = ParticleListPair('', job.ctf_conv_pl, None, None) pair.set_phase_flip_pl(job.particleList) self.average_sub_pl( pair.get_ctf_conv_pl(), name_prefix) # operate on the CTF convoluted projection! # send back the result self.send_result( FRMResult(name_prefix, job.particleList, self.mpi_id)) pytom_mpi.finalise()
def start(self, job, verbose=False): if self.mpi_id == 0: from pytom.basic.structures import ParticleList, Reference from pytom.basic.resolution import bandToAngstrom from pytom.basic.filter import lowpassFilter from math import ceil from pytom.basic.fourier import convolute from pytom_volume import vol, power, read # randomly split the particle list into 2 half sets import numpy as np num_pairs = len(job.particleList.pairs) for i in range(num_pairs): # randomize the class labels to indicate the two half sets pl = job.particleList.pairs[i].get_phase_flip_pl() n = len(pl) labels = np.random.randint(2, size=(n, )) print(self.node_name + ': Number of 1st half set:', n - np.sum(labels), 'Number of 2nd half set:', np.sum(labels)) for j in range(n): p = pl[j] p.setClass(labels[j]) new_reference = job.reference old_freq = job.freq new_freq = job.freq # main node for i in range(job.max_iter): if verbose: print(self.node_name + ': starting iteration %d ...' % i) # construct a new job by updating the reference and the frequency # here the job.particleList is actually ParticleListSet new_job = MultiDefocusJob(job.particleList, new_reference, job.mask, job.peak_offset, job.sampleInformation, job.bw_range, new_freq, job.destination, job.max_iter - i, job.r_score, job.weighting, job.bfactor) # distribute it num_all_particles = self.distribute_job(new_job, verbose) # calculate the denominator sum_ctf_squared = None for pair in job.particleList.pairs: if sum_ctf_squared is None: sum_ctf_squared = pair.get_ctf_sqr_vol() * pair.snr else: sum_ctf_squared += pair.get_ctf_sqr_vol() * pair.snr # get the result back all_even_pre = None all_even_wedge = None all_odd_pre = None all_odd_wedge = None pls = [] for j in range(len(job.particleList.pairs)): pls.append(ParticleList()) for j in range(self.num_workers): result = self.get_result() pair_id = self.assignment[result.worker_id] pair = job.particleList.pairs[pair_id] pl = pls[pair_id] pl += result.pl even_pre, even_wedge, odd_pre, odd_wedge = self.retrieve_res_vols( result.name) if all_even_pre: all_even_pre += even_pre * pair.snr all_even_wedge += even_wedge all_odd_pre += odd_pre * pair.snr all_odd_wedge += odd_wedge else: all_even_pre = even_pre * pair.snr all_even_wedge = even_wedge all_odd_pre = odd_pre * pair.snr all_odd_wedge = odd_wedge # write the new particle list to the disk for j in range(len(job.particleList.pairs)): pls[j].toXMLFile('aligned_pl' + str(j) + '_iter' + str(i) + '.xml') # correct for the number of particles in wiener filter sum_ctf_squared = sum_ctf_squared / num_all_particles # all_even_pre = all_even_pre/(num_all_particles/2) # all_odd_pre = all_odd_pre/(num_all_particles/2) # bfactor if job.bfactor and job.bfactor != 'None': # bfactor_kernel = create_bfactor_vol(sum_ctf_squared.sizeX(), job.sampleInformation.getPixelSize(), job.bfactor) bfactor_kernel = read(job.bfactor) bfactor_kernel_sqr = vol(bfactor_kernel) power(bfactor_kernel_sqr, 2) all_even_pre = convolute(all_even_pre, bfactor_kernel, True) all_odd_pre = convolute(all_odd_pre, bfactor_kernel, True) sum_ctf_squared = sum_ctf_squared * bfactor_kernel_sqr # create averages of two sets if verbose: print(self.node_name + ': determining the resolution ...') even = self.create_average( all_even_pre, sum_ctf_squared, all_even_wedge ) # assume that the CTF sum is the same for the even and odd odd = self.create_average(all_odd_pre, sum_ctf_squared, all_odd_wedge) # determine the transformation between even and odd # here we assume the wedge from both sets are fully sampled from sh_alignment.frm import frm_align pos, angle, score = frm_align(odd, None, even, None, job.bw_range, new_freq, job.peak_offset) print( self.node_name + ': transform of even set to match the odd set - shift: ' + str(pos) + ' rotation: ' + str(angle)) # transform the odd set accordingly from pytom_volume import vol, transformSpline from pytom.basic.fourier import ftshift from pytom_volume import reducedToFull from pytom_freqweight import weight transformed_odd_pre = vol(odd.sizeX(), odd.sizeY(), odd.sizeZ()) full_all_odd_wedge = reducedToFull(all_odd_wedge) ftshift(full_all_odd_wedge) odd_weight = weight( full_all_odd_wedge) # the funny part of pytom transformed_odd = vol(odd.sizeX(), odd.sizeY(), odd.sizeZ()) transformSpline(all_odd_pre, transformed_odd_pre, -angle[1], -angle[0], -angle[2], int(odd.sizeX() / 2), int(odd.sizeY() / 2), int(odd.sizeZ() / 2), -(pos[0] - odd.sizeX() / 2), -(pos[1] - odd.sizeY() / 2), -(pos[2] - odd.sizeZ() / 2), 0, 0, 0) odd_weight.rotate(-angle[1], -angle[0], -angle[2]) transformed_odd_wedge = odd_weight.getWeightVolume(True) transformSpline(odd, transformed_odd, -angle[1], -angle[0], -angle[2], int(odd.sizeX() / 2), int(odd.sizeY() / 2), int(odd.sizeZ() / 2), -(pos[0] - odd.sizeX() / 2), -(pos[1] - odd.sizeY() / 2), -(pos[2] - odd.sizeZ() / 2), 0, 0, 0) all_odd_pre = transformed_odd_pre all_odd_wedge = transformed_odd_wedge odd = transformed_odd # apply symmetries before determine resolution # with gold standard you should be careful about applying the symmetry! even = job.symmetries.applyToParticle(even) odd = job.symmetries.applyToParticle(odd) resNyquist, resolutionBand, numberBands = self.determine_resolution( even, odd, job.fsc_criterion, None, job.mask, verbose) # write the half set to the disk even.write('fsc_' + str(i) + '_even.em') odd.write('fsc_' + str(i) + '_odd.em') current_resolution = bandToAngstrom( resolutionBand, job.sampleInformation.getPixelSize(), numberBands, 1) if verbose: print( self.node_name + ': current resolution ' + str(current_resolution), resNyquist) # create new average all_even_pre += all_odd_pre all_even_wedge += all_odd_wedge # all_even_pre = all_even_pre/2 # correct for the number of particles in wiener filter average = self.create_average(all_even_pre, sum_ctf_squared, all_even_wedge) # apply symmetries average = job.symmetries.applyToParticle(average) # filter average to resolution and update the new reference average_name = 'average_iter' + str(i) + '.em' average.write(average_name) # update the references new_reference = [ Reference('fsc_' + str(i) + '_even.em'), Reference('fsc_' + str(i) + '_odd.em') ] # low pass filter the reference and write it to the disk filtered = lowpassFilter(average, ceil(resolutionBand), ceil(resolutionBand) / 10) filtered_ref_name = 'average_iter' + str(i) + '_res' + str( current_resolution) + '.em' filtered[0].write(filtered_ref_name) # change the frequency to a higher value new_freq = int(ceil(resolutionBand)) + 1 if new_freq <= old_freq: if job.adaptive_res is not False: # two different strategies print( self.node_name + ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!' % job.adaptive_res) new_freq = int((1 + job.adaptive_res) * old_freq) else: # always increase by 1 print( self.node_name + ': Determined resolution gets worse. Increase the frequency to be aligned by 1!' ) new_freq = old_freq + 1 old_freq = new_freq else: old_freq = new_freq if new_freq >= numberBands: print(self.node_name + ': Determined frequency too high. Terminate!') break if verbose: print(self.node_name + ': change the frequency to ' + str(new_freq)) # send end signal to other nodes and terminate itself self.end(verbose) else: # other nodes self.run(verbose)
def start(self, job, verbose=False): if self.mpi_id == 0: from pytom.basic.structures import ParticleList, Reference from pytom.basic.resolution import bandToAngstrom from pytom.basic.filter import lowpassFilter from math import ceil from pytom.basic.fourier import convolute from pytom_volume import vol, power, read new_reference = job.reference old_freq = job.freq new_freq = job.freq # main node for i in range(job.max_iter): if verbose: print(self.node_name + ': starting iteration %d ...' % i) # construct a new job by updating the reference and the frequency # here the job.particleList is actually ParticleListSet new_job = MultiDefocusJob(job.particleList, new_reference, job.mask, job.peak_offset, job.sampleInformation, job.bw_range, new_freq, job.destination, job.max_iter-i, job.r_score, job.weighting, job.bfactor) # distribute it num_all_particles = self.distribute_job(new_job, verbose) # calculate the denominator sum_ctf_squared = None for pair in job.particleList.pairs: if sum_ctf_squared is None: sum_ctf_squared = pair.get_ctf_sqr_vol() * pair.snr else: sum_ctf_squared += pair.get_ctf_sqr_vol() * pair.snr # get the result back all_even_pre = None all_even_wedge = None all_odd_pre = None all_odd_wedge = None pls = [] for j in range(len(job.particleList.pairs)): pls.append(ParticleList()) for j in range(self.num_workers): result = self.get_result() pair_id = self.assignment[result.worker_id] pair = job.particleList.pairs[pair_id] pl = pls[pair_id] pl += result.pl even_pre, even_wedge, odd_pre, odd_wedge = self.retrieve_res_vols(result.name) if all_even_pre: all_even_pre += even_pre * pair.snr all_even_wedge += even_wedge all_odd_pre += odd_pre * pair.snr all_odd_wedge += odd_wedge else: all_even_pre = even_pre * pair.snr all_even_wedge = even_wedge all_odd_pre = odd_pre * pair.snr all_odd_wedge = odd_wedge # write the new particle list to the disk for j in range(len(job.particleList.pairs)): pls[j].toXMLFile('aligned_pl'+str(j)+'_iter'+str(i)+'.xml') # correct for the number of particles in wiener filter sum_ctf_squared = sum_ctf_squared/num_all_particles # all_even_pre = all_even_pre/(num_all_particles/2) # all_odd_pre = all_odd_pre/(num_all_particles/2) # bfactor if job.bfactor and job.bfactor != 'None': # bfactor_kernel = create_bfactor_vol(sum_ctf_squared.sizeX(), job.sampleInformation.getPixelSize(), job.bfactor) bfactor_kernel = read(job.bfactor) bfactor_kernel_sqr = vol(bfactor_kernel) power(bfactor_kernel_sqr, 2) all_even_pre = convolute(all_even_pre, bfactor_kernel, True) all_odd_pre = convolute(all_odd_pre, bfactor_kernel, True) sum_ctf_squared = sum_ctf_squared*bfactor_kernel_sqr # determine the resolution if verbose: print(self.node_name + ': determining the resolution ...') even = self.create_average(all_even_pre, sum_ctf_squared, all_even_wedge) # assume that the CTF sum is the same for the even and odd odd = self.create_average(all_odd_pre, sum_ctf_squared, all_odd_wedge) # apply symmetries before determine resolution even = job.symmetries.applyToParticle(even) odd = job.symmetries.applyToParticle(odd) resNyquist, resolutionBand, numberBands = self.determine_resolution(even, odd, job.fsc_criterion, None, job.mask, verbose) # write the half set to the disk even.write('fsc_'+str(i)+'_even.em') odd.write('fsc_'+str(i)+'_odd.em') current_resolution = bandToAngstrom(resolutionBand, job.sampleInformation.getPixelSize(), numberBands, 1) if verbose: print(self.node_name + ': current resolution ' + str(current_resolution), resNyquist) # create new average all_even_pre += all_odd_pre all_even_wedge += all_odd_wedge # all_even_pre = all_even_pre/2 # correct for the number of particles in wiener filter average = self.create_average(all_even_pre, sum_ctf_squared, all_even_wedge) # apply symmetries average = job.symmetries.applyToParticle(average) # filter average to resolution and update the new reference average_name = 'average_iter'+str(i)+'.em' average.write(average_name) new_reference = Reference(average_name) # low pass filter the reference and write it to the disk filtered = lowpassFilter(average, ceil(resolutionBand), ceil(resolutionBand)/10) filtered_ref_name = 'average_iter'+str(i)+'_res'+str(current_resolution)+'.em' filtered[0].write(filtered_ref_name) # change the frequency to a higher value new_freq = int(ceil(resolutionBand))+1 if new_freq <= old_freq: if job.adaptive_res is not False: # two different strategies print(self.node_name + ': Determined resolution gets worse. Include additional %f percent frequency to be aligned!' % job.adaptive_res) new_freq = int((1+job.adaptive_res)*old_freq) else: # always increase by 1 print(self.node_name + ': Determined resolution gets worse. Increase the frequency to be aligned by 1!') new_freq = old_freq+1 old_freq = new_freq else: old_freq = new_freq if new_freq >= numberBands: print(self.node_name + ': Determined frequency too high. Terminate!') break if verbose: print(self.node_name + ': change the frequency to ' + str(new_freq)) # send end signal to other nodes and terminate itself self.end(verbose) else: # other nodes self.run(verbose)