def gen_mask_fsc(data, num_cycles, outname=None, num_stds=1, smooth=2, maskD=None): '''This script generates a structured mask around the particle. @:param data: 3d array that constitutes the particle @:type data: ndarray of float32/64 @:param num_cycles: number of binaray dilation cycles after thresholding. @:type num_cycles: int @:param outname: filename of output file. If not None, output file is written. @:type outname: str @:param num_stds: 3d ndarray @:type num_stds: float @:param smooth: 3d ndarray @:type smooth: float @:return return mask @:rtype ndarray of float32''' mask = zeros_like(data, dtype=int) print(data.std(), num_stds) mask[data < data.mean() - float(num_stds) * data.std()] = 1 mask = remove_small_objects(mask.astype(bool)) mask = binary_fill_holes(mask) l, n = label(mask) part, total = 0, 0 for i in range(1, n + 1): if total < (l == i).sum(): part = i total = (l == i).sum() mask = (l == part) if not maskD is None: mask *= maskD > 0 mask = binary_dilation(mask) mask = median_filter(mask, 6) for i in range(num_cycles): mask = binary_dilation(mask) mask = gaussian_filter(mask * 100., smooth) mask[mask > 78] = 78. mask /= mask.max() if outname is None: return mask else: write(outname, mask.astype(float32)) return mask
def create_TiltSeries(data, tiltAngles, outputfolder='./'): from pytom.tompy.io import read, write from pytom.tompy.transform import rotate_axis import os if data.__class__ == str: data = read(data) if not os.path.exists(outputfolder): os.mkdir(outputfolder) for n, tiltAngle in enumerate(tiltAngles): outname = os.path.join(outputfolder, 'sorted_{:03d}.mrc'.format(n)) write(outname, rotate_axis(data, tiltAngle, axis='y').sum(axis=2), tilt_angle=tiltAngle)
def writeRes(self, resV, orientV, jobID=None): """ writeRes: Write the result back to the disk, and return the PeakJobResult. @param resV: result volume @type resV: L{pytom_volume.vol} @param orientV: orientation volume @type orientV: L{pytom_volume.vol} @param jobID: ID of job @type jobID: integer @rtype: L{pytom.localization.peak_job.PeakResult} """ from pytom.tompy.io import read, write if jobID != None: resFilename = self.dstDir + self.name + '_job' + str( jobID) + '_res.em' orientFilename = self.dstDir + self.name + '_job' + str( jobID) + '_orient.em' else: resFilename = self.dstDir + self.name + '_res.em' orientFilename = self.dstDir + self.name + '_orient.em' try: resV.write(resFilename) orientV.write(orientFilename) except: print(resFilename, orientFilename) write(resFilename, resV) write(orientFilename, orientV) from pytom.localization.structures import Volume, Orientation res = Volume(resFilename) orient = Orientation(orientFilename) # construct the result from pytom.localization.peak_job import PeakResult result = PeakResult(res, orient, jobID) return result
def recenterVolume(volume, densityNegative=False): from scipy.ndimage import center_of_mass from pytom.tompy.io import read, write from pytom.tompy.tools import paste_in_center from pytom.gpu.initialize import xp from pytom_numpy import vol2npy import os try: a = vol2npy(volume).copy() vol = True except: a = volume vol = False if densityNegative: a *= -1 x, y, z = list(map(int, center_of_mass(a))) cx, cy, cz = a.shape[0] // 2, a.shape[1] // 2, a.shape[2] // 2 sx = min(x, a.shape[0] - x) sy = min(y, a.shape[0] - y) sz = min(z, a.shape[0] - z) ac = a[x - sx:x + sx, y - sy:y + sy, z - sz:z + sz] b = xp.zeros_like(a) b = paste_in_center(ac, b) if densityNegative: b *= -1 if vol: write('recenteredDBV21.em', b) from pytom.basic.files import read vol = read('recenteredDBV21.em') os.system('rm recenteredDBV21.em') return vol else: return b
else: print(helper) sys.exit() if num_cycles is None: num_cycles = 0 else: num_cylces = int(num_cycles) data = read(filename) mask = zeros_like(data, dtype=int) mask[data < data.mean() - data.std()] = 1 mask = remove_small_objects(mask.astype(bool)) mask = binary_fill_holes(mask) l, n = label(mask) dx, dy, dz = data.shape mask = (l == l[dx // 2, dy // 2, dz // 2]) mask = binary_dilation(mask) mask = median_filter(mask, 6) for i in range(num_cycles + 2): mask = binary_dilation(mask) mask = gaussian_filter(mask * 100., 2) mask[mask > 78] = 78. mask /= mask.max() write(outname, mask.astype(float32))
iter = 10 else: iter = int(iter) if metafile and os.path.exists(metafile): metadata = loadstar(metafile, dtype=datatype) tiltAngles = metadata['TiltAngle'] else: tiltAngles = [] metafile = '' if metafile is None or not os.path.exists( metafile) else metafile # start reconstruction from pytom.tompy.io import read, write from nufft.reconstruction import fourier_2d1d_iter_reconstruct from pytom.reconstruction.reconstructionStructures import ProjectionList projections = ProjectionList() projections.loadDirectory(proj_dir, metafile=metafile) projections.sort() projs = [] tilt_angles = [] for p in projections: print(p.getTiltAngle(), p.getFilename()) projs.append(read(p.getFilename())) tilt_angles.append(p.getTiltAngle()) v = fourier_2d1d_iter_reconstruct(projs, tilt_angles, iter) write(output_filename, v)
tilt_angles = metadata['TiltAngle'] size = [464, 464, 464] if size is None else list(map(int, size.split(','))) patches = xp.zeros((size[0], size[1], len(tilt_angles)), dtype=xp.float32) images = [] tt = time() cur = 0 missed = 0 for i in range(patches.shape[2]): temp_image = alignImageUsingAlignmentResultFile( alignmentfile, i, weighting=-1, circleFilter=True, binning=coordinateBinning) patches[:, :, i] = temp_image[:, :] del temp_image print(time() - tt) vol_bp = xp.zeros((size[0], size[1], size[2]), dtype=xp.float32) tt = time() s = 100 bp = backProjectGPU(patches[:s, :s, :], vol_bp[:s, :s, :s], 0, tilt_angles) print(time() - tt) write(f'{outdir}/reconstruction.mrc', bp) #[ndim//2-vol_size//2:ndim//2+vol_size//2 + vol_size%2, #ndim//2-vol_size//2:ndim//2+vol_size//2 + vol_size%2, #ndim//2-vol_size//2:ndim//2+vol_size//2 + vol_size%2])
def averageParallelGPU(particleList, averageName, showProgressBar=False, verbose=False, createInfoVolumes=False, weighting=None, norm=False, setParticleNodesRatio=3, cores=6): """ compute average using parfor @param particleList: The particles @param averageName: Filename of new average @param verbose: Prints particle information. Disabled by default. @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default. @param weighting: weight particles by exp CC in average @type weighting: bool @param setParticleNodesRatio: minimum number of particles per node @type setParticleNodesRatio: L{int} @return: A new Reference object @rtype: L{pytom.basic.structures.Reference} @author: FF """ from pytom_volume import read, complexRealMult from pytom.basic.fourier import fft, ifft from pytom.basic.filter import lowpassFilter from pytom.basic.structures import Reference from pytom.alignment.alignmentFunctions import averageGPU from pytom.tompy.tools import invert_WedgeSum from pytom_numpy import vol2npy from pytom.tompy.io import write, read import os splitLists = splitParticleList(particleList, setParticleNodesRatio=setParticleNodesRatio, numberOfNodes=cores) splitFactor = len(splitLists) avgNameList = [] preList = [] wedgeList = [] for ii in range(splitFactor): avgName = averageName + '_dist' + str(ii) + '.em' avgNameList.append(avgName) preList.append(averageName + '_dist' + str(ii) + '-PreWedge.em') wedgeList.append(averageName + '_dist' + str(ii) + '-WedgeSumUnscaled.em') ##### averageGPU(splitLists[0], avgNameList[0], showProgressBar, verbose, createInfoVolumes, weighting, norm) #averageList = mpi.parfor( average, list(zip(splitLists, avgNameList, [showProgressBar]*splitFactor, # [verbose]*splitFactor, [createInfoVolumes]*splitFactor, # [weighting]*splitFactor, [norm]*splitFactor)), verbose=True) unweiAv = read(preList[0]) wedgeSum = read(wedgeList[0]) os.system('rm ' + wedgeList[0]) os.system('rm ' + avgNameList[0]) os.system('rm ' + preList[0]) for ii in range(1, splitFactor): print(preList[ii], wedgeList[ii], avgNameList[ii]) av = read(preList[ii]) unweiAv += av os.system('rm ' + preList[ii]) w = read(wedgeList[ii]) wedgeSum += w os.system('rm ' + wedgeList[ii]) os.system('rm ' + avgNameList[ii]) if createInfoVolumes: write(averageName[:len(averageName) - 3] + '-PreWedge.em', unweiAv) write(averageName[:len(averageName) - 3] + '-WedgeSumUnscaled.em', wedgeSum) # convolute unweighted average with inverse of wedge sum wedgeINV = invert_WedgeSum((wedgeSum), r_max=unweiAv.shape[0] / 2 - 2., lowlimit=.05 * len(particleList), lowval=.05 * len(particleList)) if createInfoVolumes: write(averageName[:len(averageName) - 3] + '-WedgeSumINV.em', wedgeINV) r = xp.fft.rfftn(unweiAv) * wedgeINV unweiAv = (xp.fft.irfftn(r)).real # unweiAv.shiftscale(0.0,1/float(unweiAv.sizeX()*unweiAv.sizeY()*unweiAv.sizeZ())) # low pass filter to remove artifacts at fringes # unweiAv = lowpassFilter(volume=unweiAv, band=unweiAv.sizeX()/2-2, smooth=(unweiAv.sizeX()/2-1)/10.)[0] write(averageName, unweiAv) return 1
def averageGPU(particleList, averageName, showProgressBar=False, verbose=False, createInfoVolumes=False, weighting=False, norm=False, gpuId=None, profile=True): """ average : Creates new average from a particleList @param particleList: The particles @param averageName: Filename of new average @param verbose: Prints particle information. Disabled by default. @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default. @param weighting: apply weighting to each average according to its correlation score @param norm: apply normalization for each particle @return: A new Reference object @rtype: L{pytom.basic.structures.Reference} @author: Thomas Hrabe @change: limit for wedgeSum set to 1% or particles to avoid division by small numbers - FF """ import time from pytom.tompy.io import read, write, read_size from pytom.tompy.filter import bandpass as lowpassFilter, rotateWeighting, applyFourierFilter, applyFourierFilterFull, create_wedge from pytom.voltools import transform, StaticVolume from pytom.basic.structures import Reference from pytom.tompy.normalise import mean0std1 from pytom.tompy.tools import volumesSameSize, invert_WedgeSum, create_sphere from pytom.tompy.transform import fourier_full2reduced, fourier_reduced2full from cupyx.scipy.fftpack.fft import fftn as fftnP from cupyx.scipy.fftpack.fft import ifftn as ifftnP from cupyx.scipy.fftpack.fft import get_fft_plan from pytom.tools.ProgressBar import FixedProgBar from multiprocessing import RawArray import numpy as np import cupy as xp if not gpuId is None: device = f'gpu:{gpuId}' xp.cuda.Device(gpuId).use() else: print(gpuId) raise Exception('Running gpu code on non-gpu device') print(device) cstream = xp.cuda.Stream() if profile: stream = xp.cuda.Stream.null t_start = stream.record() # from pytom.tools.ProgressBar import FixedProgBar from math import exp import os if len(particleList) == 0: raise RuntimeError('The particle list is empty. Aborting!') if showProgressBar: progressBar = FixedProgBar(0, len(particleList), 'Particles averaged ') progressBar.update(0) numberAlignedParticles = 0 # pre-check that scores != 0 if weighting: wsum = 0. for particleObject in particleList: wsum += particleObject.getScore().getValue() if wsum < 0.00001: weighting = False print("Warning: all scores have been zero - weighting not applied") import time sx, sy, sz = read_size(particleList[0].getFilename()) wedgeInfo = particleList[0].getWedge().convert2numpy() print('angle: ', wedgeInfo.getWedgeAngle()) wedgeZero = xp.fft.fftshift( xp.array(wedgeInfo.returnWedgeVolume(sx, sy, sz, True).get(), dtype=xp.float32)) # wedgeZeroReduced = fourier_full2reduced(wedgeZero) wedge = xp.zeros_like(wedgeZero, dtype=xp.float32) wedgeSum = xp.zeros_like(wedge, dtype=xp.float32) print('init texture') wedgeText = StaticVolume(xp.fft.fftshift(wedgeZero), device=device, interpolation='filt_bspline') newParticle = xp.zeros((sx, sy, sz), dtype=xp.float32) centerX = sx // 2 centerY = sy // 2 centerZ = sz // 2 result = xp.zeros((sx, sy, sz), dtype=xp.float32) fftplan = get_fft_plan(wedge.astype(xp.complex64)) n = 0 total = len(particleList) # total = int(np.floor((11*1024**3 - mempool.total_bytes())/(sx*sy*sz*4))) # total = 128 # # # particlesNP = np.zeros((total, sx, sy, sz),dtype=np.float32) # particles = [] # mask = create_sphere([sx,sy,sz], sx//2-6, 2) # raw = RawArray('f', int(particlesNP.size)) # shared_array = np.ctypeslib.as_array(raw) # shared_array[:] = particlesNP.flatten() # procs = allocateProcess(particleList, shared_array, n, total, wedgeZero.size) # del particlesNP if profile: t_end = stream.record() t_end.synchronize() time_took = xp.cuda.get_elapsed_time(t_start, t_end) print(f'startup time {n:5d}: \t{time_took:.3f}ms') t_start = stream.record() for particleObject in particleList: rotation = particleObject.getRotation() rotinvert = rotation.invert() shiftV = particleObject.getShift() # if n % total == 0: # while len(procs): # procs =[proc for proc in procs if proc.is_alive()] # time.sleep(0.1) # print(0.1) # # del particles # # xp._default_memory_pool.free_all_blocks() # # pinned_mempool.free_all_blocks() # particles = xp.array(shared_array.reshape(total, sx, sy, sz), dtype=xp.float32) # procs = allocateProcess(particleList, shared_array, n, total, size=wedgeZero.size) # #pinned_mempool.free_all_blocks() # #print(mempool.total_bytes()/1024**3) particle = read(particleObject.getFilename(), deviceID=device) #particle = particles[n%total] if norm: # normalize the particle mean0std1(particle) # happen inplace # apply its wedge to #particle = applyFourierFilter(particle, wedgeZeroReduced) #particle = (xp.fft.ifftn( xp.fft.fftn(particle) * wedgeZero)).real particle = (ifftnP(fftnP(particle, plan=fftplan) * wedgeZero, plan=fftplan)).real ### create spectral wedge weighting wedge *= 0 wedgeText.transform( rotation=[rotinvert[0], rotinvert[2], rotinvert[1]], rotation_order='rzxz', output=wedge) #wedge = xp.fft.fftshift(fourier_reduced2full(create_wedge(30, 30, 21, 42, 42, 42, rotation=[rotinvert[0],rotinvert[2], rotinvert[1]]))) # if analytWedge: # # > analytical buggy version # wedge = wedgeInfo.returnWedgeVolume(sx, sy, sz, True, rotinvert) # else: # # > FF: interpol bugfix # wedge = rotateWeighting(weighting=wedgeInfo.returnWedgeVolume(sx, sy, sz, True), rotation=[rotinvert[0], rotinvert[2], rotinvert[1]]) # # < FF # # > TH bugfix # # wedgeVolume = wedgeInfo.returnWedgeVolume(wedgeSizeX=sizeX, wedgeSizeY=sizeY, wedgeSizeZ=sizeZ, # # humanUnderstandable=True, rotation=rotinvert) # # wedge = rotate(volume=wedgeVolume, rotation=rotinvert, imethod='linear') # # < TH ### shift and rotate particle newParticle *= 0 transform(particle, output=newParticle, rotation=[-rotation[1], -rotation[2], -rotation[0]], center=[centerX, centerY, centerZ], translation=[-shiftV[0], -shiftV[1], -shiftV[2]], device=device, interpolation='filt_bspline', rotation_order='rzxz') #write(f'trash/GPU_{n}.em', newParticle) # print(rotation.toVector()) # break result += newParticle wedgeSum += xp.fft.fftshift(wedge) # if showProgressBar: # numberAlignedParticles = numberAlignedParticles + 1 # progressBar.update(numberAlignedParticles) if n % total == 0: if profile: t_end = stream.record() t_end.synchronize() time_took = xp.cuda.get_elapsed_time(t_start, t_end) print(f'total time {n:5d}: \t{time_took:.3f}ms') t_start = stream.record() cstream.synchronize() n += 1 print('averaged particles') ###apply spectral weighting to sum result = lowpassFilter(result, high=sx / 2 - 1, sigma=0) # if createInfoVolumes: write(averageName[:len(averageName) - 3] + '-PreWedge.em', result) write(averageName[:len(averageName) - 3] + '-WedgeSumUnscaled.em', fourier_full2reduced(wedgeSum)) wedgeSumINV = invert_WedgeSum(wedgeSum, r_max=sx // 2 - 2., lowlimit=.05 * len(particleList), lowval=.05 * len(particleList)) wedgeSumINV = wedgeSumINV #print(wedgeSum.mean(), wedgeSum.std()) if createInfoVolumes: write(averageName[:len(averageName) - 3] + '-WedgeSumInverted.em', xp.fft.fftshift(wedgeSumINV)) result = applyFourierFilterFull(result, xp.fft.fftshift(wedgeSumINV)) # do a low pass filter result = lowpassFilter(result, sx / 2 - 2, (sx / 2 - 1) / 10.)[0] write(averageName, result) if createInfoVolumes: resultINV = result * -1 # write sign inverted result to disk (good for chimera viewing ... ) write(averageName[:len(averageName) - 3] + '-INV.em', resultINV) newReference = Reference(averageName, particleList) return newReference
def toProjectionStackFromAlignmentResultsFile(alignmentResultsFile, weighting=None, lowpassFilter=0.9, binning=1, circleFilter=False, num_procs=1, outdir='', prefix='sorted_aligned'): """read image and create aligned projection stack, based on the results described in the alignmentResultFile. @param alignmentResultsFile: result file generate by the alignment script. @type datatypeAR: gui.guiFunction.datatypeAR @param weighting: weighting (<0: analytical weighting, >1: exact weighting, 0/None: no weighting ) @type weighting: float @param lowpassFilter: lowpass filter (in Nyquist) @type lowpassFilter: float @param binning: binning (default: 1 = no binning). binning=2: 2x2 pixels -> 1 pixel, binning=3: 3x3 pixels -> 1 pixel, etc. @author: GvdS """ print('weighting: ', weighting) import numpy from pytom_numpy import vol2npy from pytom.basic.files import read_em, write_em from pytom.basic.functions import taper_edges from pytom.basic.transformations import general_transform2d from pytom.basic.fourier import ifft, fft from pytom.basic.filter import filter as filterFunction, bandpassFilter from pytom.basic.filter import circleFilter, rampFilter, exactFilter, fourierFilterShift, \ fourierFilterShift_ReducedComplex from pytom_volume import complexRealMult, vol, paste import pytom_freqweight from pytom.basic.transformations import resize, rotate from pytom.gui.guiFunctions import fmtAR, headerAlignmentResults, datatype, datatypeAR, loadstar from pytom.reconstruction.reconstructionStructures import Projection, ProjectionList from pytom_numpy import vol2npy import mrcfile from pytom.tompy.io import write, read_size import os print("Create aligned images from alignResults.txt") alignmentResults = loadstar(alignmentResultsFile, dtype=datatypeAR) imageList = alignmentResults['FileName'] tilt_angles = alignmentResults['TiltAngle'] imdim = int(read_size(imageList[0], 'x')) if binning > 1: imdim = int(float(imdim) / float(binning) + .5) else: imdim = imdim sliceWidth = imdim # pre-determine analytical weighting function and lowpass for speedup if (weighting != None) and (float(weighting) < -0.001): weightSlice = fourierFilterShift(rampFilter(imdim, imdim)) if circleFilter: circleFilterRadius = imdim // 2 circleSlice = fourierFilterShift_ReducedComplex( circleFilter(imdim, imdim, circleFilterRadius)) else: circleSlice = vol(imdim, imdim // 2 + 1, 1) circleSlice.setAll(1.0) # design lowpass filter if lowpassFilter: if lowpassFilter > 1.: lowpassFilter = 1. print("Warning: lowpassFilter > 1 - set to 1 (=Nyquist)") # weighting filter: arguments: (angle, cutoff radius, dimx, dimy, lpf = pytom_freqweight.weight(0.0, lowpassFilter * imdim // 2, imdim, imdim // 2 + 1, 1, lowpassFilter / 5. * imdim) # lpf = bandpassFilter(volume=vol(imdim, imdim,1),lowestFrequency=0,highestFrequency=int(lowpassFilter*imdim/2), # bpf=None,smooth=lowpassFilter/5.*imdim,fourierOnly=False)[1] projectionList = ProjectionList() imageList = [] tilt_angles = [] for n, image in enumerate(alignmentResults['FileName']): atx = alignmentResults['AlignmentTransX'][n] aty = alignmentResults['AlignmentTransY'][n] rot = alignmentResults['InPlaneRotation'][n] mag = alignmentResults['Magnification'][n] # print(image, alignmentResults['TiltAngle'][n]) # if abs(alignmentResults['TiltAngle'][n]) > 20: # continue tilt_angles.append(alignmentResults['TiltAngle'][n]) imageList.append(image) projection = Projection(imageList[-1], tiltAngle=tilt_angles[-1], alignmentTransX=atx, alignmentTransY=aty, alignmentRotation=rot, alignmentMagnification=mag) projectionList.append(projection) stack = vol(imdim, imdim, len(imageList)) stack.setAll(0.0) phiStack = vol(1, 1, len(imageList)) phiStack.setAll(0.0) thetaStack = vol(1, 1, len(imageList)) thetaStack.setAll(0.0) offsetStack = vol(1, 2, len(imageList)) offsetStack.setAll(0.0) for (ii, projection) in enumerate(projectionList): if projection._filename.split('.')[-1] == 'st': from pytom.basic.files import EMHeader, read idx = projection._index image = read(file=projection._filename, subregion=[0, 0, idx - 1, imdim, imdim, 1], sampling=[0, 0, 0], binning=[0, 0, 0]) if not (binning == 1) or (binning == None): image = resize(volume=image, factor=1 / float(binning))[0] else: # read projection files from pytom.basic.files import EMHeader, read, read_em_header image = read(str(projection._filename)) # image = rotate(image,180.,0.,0.) image = resize(volume=image, factor=1 / float(binning))[0] if lowpassFilter: filtered = filterFunction(volume=image, filterObject=lpf, fourierOnly=False) image = filtered[0] tiltAngle = projection._tiltAngle # normalize to contrast - subtract mean and norm to mean immean = vol2npy(image).mean() image = (image - immean) / immean print(ii, immean, projection._filename) # smoothen borders to prevent high contrast oscillations image = taper_edges(image, imdim // 30)[0] # transform projection according to tilt alignment transX = projection._alignmentTransX / binning transY = projection._alignmentTransY / binning rot = float(projection._alignmentRotation) mag = float(projection._alignmentMagnification) image = general_transform2d(v=image, rot=rot, shift=[transX, transY], scale=mag, order=[2, 1, 0], crop=True) # smoothen once more to avoid edges image = taper_edges(image, imdim // 30)[0] # analytical weighting if (weighting != None) and (weighting < 0): # image = (ifft(complexRealMult(fft(image), w_func)) / (image.sizeX() * image.sizeY() * image.sizeZ())) image = ifft(complexRealMult( complexRealMult(fft(image), weightSlice), circleSlice), scaling=True) elif (weighting != None) and (weighting > 0): weightSlice = fourierFilterShift( exactFilter(tilt_angles, tiltAngle, imdim, imdim, sliceWidth)) # image = (ifft(complexRealMult(fft(image), w_func)) / (image.sizeX() * image.sizeY() * image.sizeZ())) image = ifft(complexRealMult( complexRealMult(fft(image), weightSlice), circleSlice), scaling=True) thetaStack(int(round(projection.getTiltAngle())), 0, 0, ii) offsetStack(int(round(projection.getOffsetX())), 0, 0, ii) offsetStack(int(round(projection.getOffsetY())), 0, 1, ii) paste(image, stack, 0, 0, ii) fname = '{}_{:02d}.mrc'.format( prefix, int(imageList[ii].split('_')[-1].split('.')[0])) if outdir: import mrcfile # write_em(os.path.join(outdir, fname.replace('mrc', 'em')), image) write(os.path.join(outdir, fname), vol2npy(image).copy().astype('float32')) print('written file: ', fname) return [stack, phiStack, thetaStack, offsetStack]
def alignImageUsingAlignmentResultFile(alignmentResultsFile, indexImage, weighting=None, lowpassFilter=0.9, binning=1, circleFilter=False): import pytom_freqweight from pytom_numpy import vol2npy from pytom.gui.guiFunctions import fmtAR, headerAlignmentResults, datatype, datatypeAR, loadstar from pytom.reconstruction.reconstructionStructures import Projection, ProjectionList from pytom.tompy.io import read, write, read_size from pytom.tompy.tools import taper_edges, create_circle from pytom.tompy.filter import circle_filter, ramp_filter, exact_filter, ellipse_filter import pytom.voltools as vt from pytom.gpu.initialize import xp, device # print("Create aligned images from alignResults.txt") alignmentResults = loadstar(alignmentResultsFile, dtype=datatypeAR) imageList = alignmentResults['FileName'] tilt_angles = alignmentResults['TiltAngle'] imdimX = read_size(imageList[0], 'x') imdimY = read_size(imageList[0], 'y') if binning > 1: imdimX = int(float(imdimX) / float(binning) + .5) imdimY = int(float(imdimY) / float(binning) + .5) sliceWidth = imdimX if (weighting != None) and (float(weighting) < -0.001): weightSlice = xp.fft.fftshift(ramp_filter(imdimY, imdimX)) if circleFilter: circleFilterRadiusX = imdimX // 2 circleFilterRadiusY = imdimY // 2 circleSlice = xp.fft.fftshift( ellipse_filter(imdimX, imdimY, circleFilterRadiusX, circleFilterRadiusY)) else: circleSlice = xp.ones((imdimX, imdimY)) # design lowpass filter if lowpassFilter: if lowpassFilter > 1.: lowpassFilter = 1. print("Warning: lowpassFilter > 1 - set to 1 (=Nyquist)") # weighting filter: arguments: (()dimx, dimy), cutoff radius, sigma # lpf = xp.fft.fftshift(create_circle((imdimX,imdimY),lowpassFilter*(imdim//2), sigma=0.4*lowpassFilter*(imdim//2))) projectionList = ProjectionList() for n, image in enumerate(imageList): atx = alignmentResults['AlignmentTransX'][n] aty = alignmentResults['AlignmentTransY'][n] rot = alignmentResults['InPlaneRotation'][n] mag = 1 / (alignmentResults['Magnification'][n]) projection = Projection(imageList[n], tiltAngle=tilt_angles[n], alignmentTransX=atx, alignmentTransY=aty, alignmentRotation=rot, alignmentMagnification=mag) projectionList.append(projection) imdim = min(imdimY, imdimX) for (ii, projection) in enumerate(projectionList): if not ii == indexImage: continue from pytom.tompy.transform import resize # print(f'read {projection._filename}') image = read(str(projection._filename)).squeeze() if binning > 1: image = resize(image, 1 / binning) #write(f'test/image_{ii}.mrc', image, tilt_angle=tilt_angles[ii]) tiltAngle = projection._tiltAngle # 1 -- normalize to contrast - subtract mean and norm to mean immean = image.mean() image = (image - immean) / immean # 2 -- smoothen borders to prevent high contrast oscillations image = taper_edges(image, imdim // 30)[0] # 3 -- square if needed if 0 and imdimY != imdimX: newImage = xp.zeros((imdim, imdim, 1), dtype=xp.float32) pasteCenter(image, newImage) image = newImage # 4 -- transform projection according to tilt alignment transX = projection._alignmentTransX / binning transY = projection._alignmentTransY / binning rot = float(projection._alignmentRotation) mag = float(projection._alignmentMagnification) inputImage = xp.expand_dims(image, 2).copy() outputImage = xp.zeros_like(inputImage, dtype=xp.float32) vt.transform( inputImage.astype(xp.float32), rotation=[0, 0, rot], rotation_order='rxyz', output=outputImage, center=[inputImage.shape[0] // 2, inputImage.shape[1] // 2, 0], device=device, translation=[transX, transY, 0], scale=[mag, mag, 1], interpolation='filt_bspline') del image image = outputImage.squeeze() # 5 -- Optional Low Pass Filter if lowpassFilter: from pytom.tompy.filter import bandpass_circle image = bandpass_circle( image, high=lowpassFilter * (min(imdimX, imdimY) // 2), sigma=0.4 * lowpassFilter * (min(imdimX, imdimY) // 2)) # image = xp.abs((xp.fft.ifftn(xp.fft.fftn(image) * lpf))) # 6 -- smoothen once more to avoid edges image = taper_edges(image, imdim // 30)[0] # 7 -- analytical weighting if (weighting != None) and (weighting < 0): # image = (ifft(complexRealMult(fft(image), w_func)) / (image.sizeX() * image.sizeY() * image.sizeZ())) image = xp.fft.ifftn( xp.fft.fftn(image) * weightSlice.T * circleSlice).real elif (weighting != None) and (weighting > 0): weightSlice = xp.fft.fftshift( exact_filter(tilt_angles, tiltAngle, imdim, imdim, sliceWidth)) image = xp.fft.ifftn( xp.fft.fftn(image) * weightSlice * circleSlice).real del inputImage, outputImage, circleSlice write(f'inputImage_{ii}.mrc', image) return image.astype(xp.float32)
import pytom.tompy.correlation as correlation from pytom_numpy import vol2npy import numpy as np from pytom.tompy.io import write if randomize is None: for (ii, fscel) in enumerate(f): f[ii] = 2. * fscel / (1. + fscel) r = determineResolution(f, fscCriterion, verbose) else: randomizationFrequency = np.floor( determineResolution(np.array(f), randomize, verbose)[1]) oddVolumeRandomizedPhase = correlation.randomizePhaseBeyondFreq( vol2npy(v1), randomizationFrequency) evenVolumeRandomizedPhase = correlation.randomizePhaseBeyondFreq( vol2npy(v2), randomizationFrequency) write(os.path.join(outdir, 'randOdd.mrc'), oddVolumeRandomizedPhase) write(os.path.join(outdir, 'randEven.mrc'), evenVolumeRandomizedPhase) oddVolumeRandomizedPhase = read(os.path.join( outdir, 'randOdd.mrc')) evenVolumeRandomizedPhase = read( os.path.join(outdir, 'randEven.mrc')) fsc_rand = FSC(oddVolumeRandomizedPhase, evenVolumeRandomizedPhase, numberBands, mask, verbose) if verbose: print('FSC_Random:\n', fsc_rand) fsc_corr = list( correlation.calc_FSC_true(np.array(f), np.array(fsc_rand))) if verbose: print('FSC_true:\n', fsc_corr)
# # # fill in the subregion # subregion[cl-cs:cl+cs, cl-vol_size/2:cl+vol_size-vol_size/2] = patch # subregions.append(subregion) # # # reconstruct # v = fourier_2d1d_iter_reconstruct(subregions, tilt_angles, iter) # # # get the center part # v = v[cl-vol_size/2:cl+vol_size-vol_size/2, cl-vol_size/2:cl+vol_size-vol_size/2, cl-vol_size/2:cl+vol_size-vol_size/2] subregions = [] for img, ang in zip(proj, tilt_angles): # project the coordinate to 2D image yy = y # assume the rotation axis is around y xx = (cos(ang * pi / 180) * (x - dim_x / 2) - sin(ang * pi / 180) * (z - dim_z / 2)) + dim_x / 2 # cut the small patch out patch = cut_from_projection(img, [xx, yy], [vol_size, vol_size]) patch = patch - np.mean(patch) # fill in the subregion subregions.append(patch) # reconstruct v = fourier_2d1d_iter_reconstruct(subregions, tilt_angles, iter) # write to the disk write(p.getFilename(), v)