def tempMatchStep(self): self.box = self.boxSize.get() # volFile = os.path.abspath(self.ref.get().getFileName()) volFile = pwutils.removeBaseExt(self.ref.get().getFileName()) + '.mrc' volFile = os.path.abspath(self._getTmpPath(volFile)) params = "" for tomo in self.inputSet.get(): tomoFile = pwutils.removeBaseExt(tomo.getFileName()) + '.mrc' tomoFile = os.path.abspath(self._getTmpPath(tomoFile)) params = params + " %s" % tomoFile params = params + " --reference=%s --nptcl=%d --dthr=%f --vthr=%f --delta=%f --sym=%s " \ "--rmedge --rmgold --boxsz=%d" % (volFile, self.nptcl.get(), self.dthr.get(), self.vthr.get(), self.delta.get(), self.sym.get(), self.box) program = emantomo.Plugin.getProgram("e2spt_tempmatch.py") self.runJob(program, params, cwd=os.path.abspath(self._getTmpPath()), env=emantomo.Plugin.getEnviron(), numberOfMpi=1, numberOfThreads=1) # Move output files to Extra Path # moveFile(self._getTmpPath("ccc.hdf"), self._getExtraPath("particles" + ".hdf")) for tomo in self.inputSet.get(): tomoName = os.path.basename(tomo.getFileName()) tomoName = os.path.splitext(tomoName)[0] tomoCoord = tomoName + "_info.json" moveFile(self._getTmpPath(os.path.join("info", tomoCoord)), self._getExtraPath(tomoCoord))
def createOutputStep(self): outputVols = self._createSetOfVolumes() imgSet = self.inputParticles.get() for i, vol in enumerate(self._iterInputVols()): volume = vol.clone() volDir = self._getVolDir(i + 1) volPrefix = 'vol%03d_' % (i + 1) validationMd = self._getExtraPath(volPrefix + 'validation.xmd') moveFile(join(volDir, 'validation.xmd'), validationMd) clusterMd = self._getExtraPath(volPrefix + 'clusteringTendency.xmd') moveFile(join(volDir, 'clusteringTendency.xmd'), clusterMd) outImgSet = self._createSetOfParticles(volPrefix) outImgSet.copyInfo(imgSet) outImgSet.copyItems(imgSet, updateItemCallback=self._setWeight, itemDataIterator=md.iterRows( clusterMd, sortByLabel=md.MDL_ITEM_ID)) mdValidatoin = md.MetaData(validationMd) weight = mdValidatoin.getValue(md.MDL_WEIGHT, mdValidatoin.firstObject()) volume.weight = Float(weight) volume.clusterMd = String(clusterMd) volume.cleanObjId( ) # clean objects id to assign new ones inside the set outputVols.append(volume) self._defineOutputs(outputParticles=outImgSet) outputVols.setSamplingRate(volume.getSamplingRate()) self._defineOutputs(outputVolumes=outputVols)
def symStep(self): inputVol = self.inputVol.get() fnVol = inputVol.getFileName() samplingRate = inputVol.getSamplingRate() volName = os.path.basename(fnVol) volName = os.path.splitext(volName)[0] tmpDir = self._getTmpPath(volName) fnVol = os.path.abspath(fnVol) makePath(tmpDir) maskRadius = self.mask.get() if maskRadius<0: Xdim = inputVol.getDim()[0] maskRadius=Xdim/2-1 lpCutoff = inputVol.getSamplingRate()/self.lp.get() paramsSym = ' prg=symmetry_test vol1=%s smpd=%f msk=%d lp=%f nthr=%d' \ %(fnVol, samplingRate, maskRadius, lpCutoff, self.numberOfThreads.get()) self.runJob(simple.Plugin.sim_exec(), 'prg=new_project projname=temp', cwd=os.path.abspath(tmpDir), env=simple.Plugin.getEnviron()) self.runJob(simple.Plugin.sim_exec(), paramsSym, cwd=os.path.abspath(tmpDir)+'/temp', env=simple.Plugin.getEnviron()) #Move output files to ExtraPath and rename them properly mvRoot1 = os.path.join(tmpDir+'/temp/1_symmetry_test', "symmetry_test_output.txt") moveFile(mvRoot1, self._getExtraPath('point_group_symmetry_.txt')) cleanPath(tmpDir)
def _mergeAllParFiles(self, iterN, numberOfBlocks): """ This method merge all parameters files that has been created in a refineStep. """ self._enterDir(self._getExtraPath()) outFn = self._getFileName('iter_par', iter=iterN) if numberOfBlocks != 1: f1 = open(outFn, 'w+') f1.write( "C PSI THETA PHI SHX SHY MAG " "FILM DF1 DF2 ANGAST PSHIFT OCC LogP" " SIGMA SCORE CHANGE\n") for block in range(1, numberOfBlocks + 1): parFn = self._getFileName('iter_par_block', iter=iterN, block=block) if not os.path.exists(parFn): raise FileNotFoundError("Error: file %s does not exist" % parFn) f2 = open(parFn) for l in f2: if not l.startswith('C'): f1.write(l) f2.close() cleanPattern(parFn) f1.close() else: parFn = self._getFileName('iter_par_block', iter=iterN, block=1) moveFile(parFn, outFn) self._leaveDir()
def reformatPdbOutputStep(self, numberOfModes): self._enterWorkingDir() makePath('modes') Natoms = self._countAtoms("atoms.pdb") fhIn = open('diagrtb.eigenfacs') fhAni = open('vec_ani.txt','w') for n in range(numberOfModes): # Skip two lines fhIn.readline() fhIn.readline() fhOut=open('modes/vec.%d'%(n+1),'w') for i in range(Natoms): line=fhIn.readline() fhOut.write(line) fhAni.write(line.rstrip().lstrip()+" ") fhOut.close() if n!=(numberOfModes-1): fhAni.write("\n") fhIn.close() fhAni.close() self.runJob("nma_prepare_for_animate.py","",env=getNMAEnviron()) cleanPath("vec_ani.txt") moveFile('vec_ani.pkl', 'extra/vec_ani.pkl') self._leaveWorkingDir()
def reformatPdbOutputStep(self, numberOfModes): self._enterWorkingDir() makePath('modes') Natoms = self._countAtoms("atoms.pdb") fhIn = open('diagrtb.eigenfacs') fhAni = open('vec_ani.txt', 'w') for n in range(numberOfModes): # Skip two lines fhIn.readline() fhIn.readline() fhOut = open('modes/vec.%d' % (n + 1), 'w') for i in range(Natoms): line = fhIn.readline() fhOut.write(line) fhAni.write(line.rstrip().lstrip() + " ") fhOut.close() if n != (numberOfModes - 1): fhAni.write("\n") fhIn.close() fhAni.close() self.runJob("nma_prepare_for_animate.py", "", env=getNMAEnviron()) cleanPath("vec_ani.txt") moveFile('vec_ani.pkl', 'extra/vec_ani.pkl') self._leaveWorkingDir()
def extractParticles(self): samplingRateCoord = self.inputCoordinates.get().getSamplingRate() samplingRateTomo = self.getInputTomograms().getFirstItem( ).getSamplingRate() for tomo in self.tomoFiles: args = '%s ' % os.path.abspath(tomo) args += "--coords %s --boxsize %i" % (pwutils.replaceBaseExt( tomo, 'coords'), self.boxSize.get()) if self.doInvert: args += ' --invert' if self.doNormalize: args += ' --normproc %s' % self.getEnumText('normproc') args += ' --cshrink %d' % (samplingRateCoord / samplingRateTomo) program = emantomo.Plugin.getProgram('e2spt_boxer_old.py') self.runJob(program, args, cwd=self._getExtraPath(), numberOfMpi=1, numberOfThreads=1) moveFile( self._getExtraPath(os.path.join('sptboxer_01', 'basename.hdf')), self._getExtraPath(pwutils.replaceBaseExt(tomo, 'hdf'))) cleanPath(self._getExtraPath("sptboxer_01"))
def createOutputStep(self): outputVols = self._createSetOfVolumes() imgSet = self.inputParticles.get() for i, vol in enumerate(self._iterInputVols()): volume = vol.clone() volDir = self._getVolDir(i + 1) volPrefix = "vol%03d_" % (i + 1) validationMd = self._getExtraPath(volPrefix + "validation.xmd") moveFile(join(volDir, "validation.xmd"), validationMd) clusterMd = self._getExtraPath(volPrefix + "clusteringTendency.xmd") moveFile(join(volDir, "clusteringTendency.xmd"), clusterMd) outImgSet = self._createSetOfParticles(volPrefix) outImgSet.copyInfo(imgSet) outImgSet.copyItems( imgSet, updateItemCallback=self._setWeight, itemDataIterator=md.iterRows(clusterMd, sortByLabel=md.MDL_ITEM_ID), ) mdValidatoin = md.MetaData(validationMd) weight = mdValidatoin.getValue(md.MDL_WEIGHT, mdValidatoin.firstObject()) volume.weight = Float(weight) volume.clusterMd = String(clusterMd) volume.cleanObjId() # clean objects id to assign new ones inside the set outputVols.append(volume) self._defineOutputs(outputParticles=outImgSet) outputVols.setSamplingRate(volume.getSamplingRate()) self._defineOutputs(outputVolumes=outputVols)
def createOutputStep(self): outputVols = self._createSetOfVolumes() for i, vol in enumerate(self._iterInputVols()): volDir = self._getVolDir(i + 1) volume = vol.clone() volPrefix = 'vol%03d_' % (i + 1) m_pruned = md.MetaData() m_pruned.read(volDir + '/pruned_particles_alignability.xmd') prunedMd = self._getExtraPath( volPrefix + 'pruned_particles_alignability.xmd') moveFile(join(volDir, 'pruned_particles_alignability.xmd'), prunedMd) m_volScore = md.MetaData() m_volScore.read(volDir + '/validationAlignability.xmd') validationMd = self._getExtraPath( volPrefix + 'validation_alignability.xmd') moveFile(join(volDir, 'validationAlignability.xmd'), validationMd) imgSet = self.inputParticles.get() outImgSet = self._createSetOfParticles(volPrefix) outImgSet.copyInfo(imgSet) outImgSet.copyItems(imgSet, updateItemCallback=self._setWeight, itemDataIterator=md.iterRows(prunedMd, sortByLabel=md.MDL_ITEM_ID)) mdValidatoin = md.getFirstRow(validationMd) weight = mdValidatoin.getValue(md.MDL_WEIGHT_PRECISION_ALIGNABILITY) volume.weightAlignabilityPrecision = Float(weight) weight = mdValidatoin.getValue(md.MDL_WEIGHT_ACCURACY_ALIGNABILITY) volume.weightAlignabilityAccuracy = Float(weight) weight = mdValidatoin.getValue(md.MDL_WEIGHT_PRECISION_MIRROR) volume.weightMirror = Float(weight) volume.cleanObjId() # clean objects id to assign new ones inside the set outputVols.append(volume) self._defineOutputs(outputParticles=outImgSet) self.createPlot2D(volPrefix, m_pruned) outputVols.setSamplingRate(volume.getSamplingRate()) self._defineOutputs(outputVolumes=outputVols) cleanPattern(self._getPath("reference_particles.*")) cleanPattern(self._getExtraPath("scaled_particles.*")) cleanPattern(self._getExtraPath("reference_particles.*")) cleanPattern(self._getExtraPath("corrected_ctf_particles.*")) cleanPattern(self._getFileName("volume")) cleanPattern(self._getExtraPath("params.txt"))
def convertInputStep(self): """ Convert input micrographs into a single mrcs stack """ inputMics = self.inputMicrographs.get() stackFn = self._getTmpPath('input_stack.mrcs') stackFnMrc = self._getTmpPath('input_stack.mrc') inputMics.writeStack(stackFn, applyTransform=False) # Grigorieff's program recognizes only mrc extension pwutils.moveFile(stackFn, stackFnMrc)
def _resize(self, Xdim, fnCorrected, prefix, fnTarget): if self.newXdim != Xdim: self.runJob("xmipp_image_resize", "-i %s -o %s --save_metadata_stack %s --fourier %d" % (fnCorrected, self._getExtraPath(prefix + '.stk'), self._getExtraPath(prefix + '.xmd'), self.newXdim), numberOfMpi=self.myMPI.get()) moveFile(self._getExtraPath(prefix + '.xmd'), fnTarget)
def createReport(self): fnTex = "report.tex" fhTex = open(self._getExtraPath(fnTex), "w") template = """ \\documentclass[12pt]{article} \\usepackage{amsmath,amsthm,amssymb,amsfonts} \\usepackage{graphicx} \\usepackage{pdfpages} \\DeclareGraphicsExtensions{.pdf,.png,.jpg} \\begin{document} \\title{User Report} \\author{Created by Scipion} \\maketitle """ fhTex.write(template) fnDir = self.filesPath.get() if not os.path.isdir(fnDir): fnDir = os.path.basename(fnDir) for fileName in sorted(glob.glob(os.path.join(fnDir, "*"))): fnDest = os.path.basename(fileName).lower() fnDest = fnDest.replace(" ", "_") fnDest = fnDest.replace(":", "_") fnDest = fnDest.replace(";", "_") copyFile(fileName, self._getExtraPath(fnDest)) if fnDest.endswith(".tex") or fnDest.endswith(".txt"): fhTex.write("\\input{%s}\n" % fnDest) fhTex.write("\n") elif fnDest.endswith(".png") or fnDest.endswith(".jpg"): fhTex.write("\\begin{center}\n") fhTex.write("\\includegraphics[width=14cm]{%s}\n" % fnDest) fhTex.write("\\end{center}\n") fhTex.write("\n") elif fnDest.endswith(".pdf"): fhTex.write("\\includepdf[pages=-]{%s}\n" % fnDest) fhTex.write("\\clearpage\n") fhTex.write("\n") template = """ \\end{document} """ fhTex.write(template) fhTex.close() args = "-interaction=nonstopmode " + fnTex self.runJob("pdflatex", args, cwd=self._getExtraPath()) fnPDF = self._getExtraPath("report.pdf") if os.path.exists(fnPDF): moveFile(fnPDF, self._getPath("report.pdf")) else: raise Exception("PDF file was not produced.")
def createReport(self): fnTex = "report.tex" fhTex = open(self._getExtraPath(fnTex), "w") template =""" \\documentclass[12pt]{article} \\usepackage{amsmath,amsthm,amssymb,amsfonts} \\usepackage{graphicx} \\usepackage{pdfpages} \\DeclareGraphicsExtensions{.pdf,.png,.jpg} \\begin{document} \\title{User Report} \\author{Created by Scipion} \\maketitle """ fhTex.write(template) fnDir = self.filesPath.get() if not os.path.isdir(fnDir): fnDir = os.path.basename(fnDir) for fileName in sorted(glob.glob(os.path.join(fnDir,"*"))): fnDest = os.path.basename(fileName).lower() fnDest = fnDest.replace(" ","_") fnDest = fnDest.replace(":","_") fnDest = fnDest.replace(";","_") copyFile(fileName, self._getExtraPath(fnDest)) if fnDest.endswith(".tex") or fnDest.endswith(".txt"): fhTex.write("\\input{%s}\n" % fnDest) fhTex.write("\n") elif fnDest.endswith(".png") or fnDest.endswith(".jpg"): fhTex.write("\\begin{center}\n") fhTex.write("\\includegraphics[width=14cm]{%s}\n" % fnDest) fhTex.write("\\end{center}\n") fhTex.write("\n") elif fnDest.endswith(".pdf"): fhTex.write("\\includepdf[pages=-]{%s}\n" % fnDest) fhTex.write("\\clearpage\n") fhTex.write("\n") template = """ \\end{document} """ fhTex.write(template) fhTex.close() args = "-interaction=nonstopmode " + fnTex self.runJob("pdflatex", args, cwd=self._getExtraPath()) fnPDF = self._getExtraPath("report.pdf") if os.path.exists(fnPDF): moveFile(fnPDF,self._getPath("report.pdf")) else: raise Exception("PDF file was not produced.")
def reformatOutputStep(self,fnPseudoatoms): self._enterWorkingDir() n = self._countAtoms(fnPseudoatoms) self.runJob("nma_reformat_vector_foranimate.pl","%d fort.11" % n,env=getNMAEnviron()) self.runJob("cat","vec.1* > vec_ani.txt") self.runJob("rm","-f vec.1*") self.runJob("nma_reformat_vector.pl","%d fort.11" % n,env=getNMAEnviron()) fnModesDir="modes" makePath(fnModesDir) self.runJob("mv","-f vec.* %s"%fnModesDir) self.runJob("nma_prepare_for_animate.py","",env=getNMAEnviron()) self.runJob("rm","-f vec_ani.txt fort.11 matrice.sdijf") moveFile('vec_ani.pkl','extra/vec_ani.pkl') self._leaveWorkingDir()
def convertToPseudoAtomsStep(self, inputFn, fnMask, sampling, prefix=''): pseudoatoms = 'pseudoatoms%s'%prefix outputFn = self._getPath(pseudoatoms) sigma = sampling * self.pseudoAtomRadius.get() targetErr = self.pseudoAtomTarget.get() nthreads = self.numberOfThreads.get() params = "-i %(inputFn)s -o %(outputFn)s --sigma %(sigma)f --thr %(nthreads)d " params += "--targetError %(targetErr)f --sampling_rate %(sampling)f -v 2 --intensityColumn Bfactor" if fnMask: params += " --mask binary_file %(fnMask)s" print params%locals() self.runJob("xmipp_volume_to_pseudoatoms", params % locals()) for suffix in ["_approximation.vol", "_distance.hist"]: moveFile(self._getPath(pseudoatoms+suffix), self._getExtraPath(pseudoatoms+suffix)) cleanPattern(self._getPath(pseudoatoms+'_*'))
def convertEndian(stackFn, stackSize): """ Convert the stack file generated by Xmipp to one that Spider likes more. Params: stackFn: the filename of the images stack stackSize: the number of particles in the stack """ fn, ext = splitext(stackFn) # Change to BigEndian runTemplate('cp_endian.spi', ext[1:], {'[particles]': fn + '@******', '[particles_big]': fn + '_big@******', '[numberOfParticles]': stackSize }) moveFile(fn + '_big' + ext, stackFn)
def createOutputStep(self): outputVols = self._createSetOfVolumes() for i, vol in enumerate(self._iterInputVols()): volDir = self._getVolDir(i+1) volume = vol.clone() volPrefix = 'vol%03d_' % (i+1) m_pruned = md.MetaData() m_pruned.read(volDir+'/pruned_particles_alignability.xmd') prunedMd = self._getExtraPath(volPrefix + 'pruned_particles_alignability.xmd') moveFile(join(volDir, 'pruned_particles_alignability.xmd'), prunedMd) m_volScore = md.MetaData() m_volScore.read(volDir+'/validationAlignability.xmd') validationMd = self._getExtraPath(volPrefix + 'validation_alignability.xmd') moveFile(join(volDir, 'validationAlignability.xmd'), validationMd) imgSet = self.inputParticles.get() outImgSet = self._createSetOfParticles(volPrefix) outImgSet.copyInfo(imgSet) outImgSet.copyItems(imgSet, updateItemCallback=self._setWeight, itemDataIterator=md.iterRows(prunedMd, sortByLabel=md.MDL_ITEM_ID)) mdValidatoin = md.getFirstRow(validationMd) weight = mdValidatoin.getValue(md.MDL_WEIGHT_PRECISION_ALIGNABILITY) volume.weightAlignabilityPrecision = Float(weight) weight = mdValidatoin.getValue(md.MDL_WEIGHT_ACCURACY_ALIGNABILITY) volume.weightAlignabilityAccuracy = Float(weight) weight = mdValidatoin.getValue(md.MDL_WEIGHT_PRECISION_MIRROR) volume.weightMirror = Float(weight) volume.cleanObjId() # clean objects id to assign new ones inside the set outputVols.append(volume) self._defineOutputs(outputParticles=outImgSet) self.createPlot2D(volPrefix,m_pruned) outputVols.setSamplingRate(volume.getSamplingRate()) self._defineOutputs(outputVolumes=outputVols)
def convertToPseudoAtomsStep(self, inputFn, fnMask, sampling, prefix=''): pseudoatoms = 'pseudoatoms%s' % prefix outputFn = self._getPath(pseudoatoms) sigma = sampling * self.pseudoAtomRadius.get() targetErr = self.pseudoAtomTarget.get() nthreads = self.numberOfThreads.get() params = "-i %(inputFn)s -o %(outputFn)s --sigma %(sigma)f --thr %(nthreads)d " params += "--targetError %(targetErr)f --sampling_rate %(sampling)f -v 2 --intensityColumn Bfactor" if fnMask: params += " --mask binary_file %(fnMask)s" print params % locals() self.runJob("xmipp_volume_to_pseudoatoms", params % locals()) for suffix in ["_approximation.vol", "_distance.hist"]: moveFile(self._getPath(pseudoatoms + suffix), self._getExtraPath(pseudoatoms + suffix)) cleanPattern(self._getPath(pseudoatoms + '_*'))
def putMol2Title(fn, title=""): i = 0 fhIn = open(fn) fhOut = open(fn + ".aux", 'w') for line in fhIn.readlines(): if i != 1: fhOut.write(line) else: if title != "": fhOut.write(title + "\n") else: fhOut.write(os.path.splitext(os.path.split(fn)[1])[0] + "\n") i += 1 fhIn.close() fhOut.close() moveFile(fn + ".aux", fn)
def prime2DStep(self): partFile = self._getExtraPath("particles.mrc") SamplingRate = self.inputParticles.get().getSamplingRate() kV = self.inputParticles.get().getAcquisition().getVoltage() partitions = 1 partName = os.path.basename(partFile) partName = os.path.splitext(partName)[0] tmpDir = self._getTmpPath(partName) makePath(tmpDir) paramsOri = 'prg=print_project_field oritype=ptcl2D > oritab.txt' paramsImp = 'prg=import_particles cs=2.7 ctf=no fraca=0.1 kv=%f smpd=%f stk=%s' % ( kV, SamplingRate, os.path.abspath(partFile)) paramsC2D = ' prg=cluster2D msk=%d ncls=%d nparts=%d nthr=%d' % ( self.mask.get(), self.clusters.get(), partitions, self.numberOfThreads.get()) if self.maxIter.get() > 0: paramsC2D = paramsC2D + (' maxits=%d' % self.maxIter.get()) self.runJob(simple.Plugin.sim_exec(), 'prg=new_project projname=temp', cwd=os.path.abspath(tmpDir), env=simple.Plugin.getEnviron()) self.runJob(simple.Plugin.sim_exec(), paramsImp, cwd=os.path.abspath(tmpDir) + '/temp', env=simple.Plugin.getEnviron()) self.runJob(simple.Plugin.distr_exec(), paramsC2D, cwd=os.path.abspath(tmpDir) + '/temp', env=simple.Plugin.getEnviron()) self.runJob(simple.Plugin.sim_exec(), paramsOri, cwd=os.path.abspath(tmpDir) + '/temp', env=simple.Plugin.getEnviron()) #Move output files to ExtraPath and rename them properly lastIter = self.getLastIteration(tmpDir) os.remove(os.path.abspath(self._getExtraPath("particles.mrc"))) mvRoot1 = os.path.join(tmpDir + '/temp/2_cluster2D', "cavgs_iter%03d.mrc" % lastIter) mvRoot2 = os.path.join(tmpDir + '/temp', "oritab.txt") # moveFile(mvRoot1, self._getExtraPath(partName + "_cavgs_final.mrc")) ih = ImageHandler() ih.convert(mvRoot1, self._getExtraPath(partName + "_cavgs_final.mrcs")) moveFile(mvRoot2, self._getExtraPath(partName + "_oritab.txt")) cleanPath(tmpDir)
def refineAnglesStep(self): fnTmpDir = self._getTmpPath() fnDirectional = self._getDirectionalClassesFn() inputParticles = self.inputParticles.get() newTs = self.readInfoField(self._getExtraPath(), "sampling", xmippLib.MDL_SAMPLINGRATE) newXdim = self.readInfoField(self._getExtraPath(), "size", xmippLib.MDL_XSIZE) # Generate projections fnGallery = join(fnTmpDir, "gallery.stk") fnGalleryMd = join(fnTmpDir, "gallery.doc") fnVol = self._getInputVolFn() args = "-i %s -o %s --sampling_rate %f --sym %s" % \ (fnVol, fnGallery, 5.0, self.symmetryGroup) args += " --compute_neighbors --angular_distance -1 --experimental_images %s" % fnDirectional self.runJob("xmipp_angular_project_library", args, numberOfMpi=self.numberOfMpi.get() * self.numberOfThreads.get()) # Global angular assignment maxShift = 0.15 * newXdim args = '-i %s --initgallery %s --maxShift %d --odir %s --dontReconstruct --useForValidation 0' % \ (fnDirectional, fnGalleryMd, maxShift, fnTmpDir) self.runJob('xmipp_reconstruct_significant', args, numberOfMpi=self.numberOfMpi.get() * self.numberOfThreads.get()) fnAngles = join(fnTmpDir, "angles_iter001_00.xmd") self.runJob("xmipp_metadata_utilities", "-i %s --operate drop_column ref" % fnAngles, numberOfMpi=1) self.runJob("xmipp_metadata_utilities", "-i %s --set join %s ref2" % (fnAngles, fnDirectional), numberOfMpi=1) # Local angular assignment fnAnglesLocalStk = self._getPath("directional_local_classes.stk") args = "-i %s -o %s --sampling %f --Rmax %d --padding %d --ref %s --max_resolution %f --applyTo image1 --Nsimultaneous %d" % \ (fnAngles, fnAnglesLocalStk, newTs, newXdim / 2, 2, fnVol, self.targetResolution, 8) args += " --optimizeShift --max_shift %f" % maxShift args += " --optimizeAngles --max_angular_change %f" % self.angularDistance self.runJob("xmipp_angular_continuous_assign2", args, numberOfMpi=self.numberOfMpi.get() * self.numberOfThreads.get()) moveFile(self._getPath("directional_local_classes.xmd"), self._getDirectionalClassesFn()) cleanPattern(self._getExtraPath("direction_*"))
def reformatOutputStep(self, fnPseudoatoms): self._enterWorkingDir() n = self._countAtoms(fnPseudoatoms) self.runJob("nma_reformat_vector_foranimate.pl", "%d fort.11" % n, env=getNMAEnviron()) self.runJob("cat", "vec.1* > vec_ani.txt") self.runJob("rm", "-f vec.1*") self.runJob("nma_reformat_vector.pl", "%d fort.11" % n, env=getNMAEnviron()) fnModesDir = "modes" makePath(fnModesDir) self.runJob("mv", "-f vec.* %s" % fnModesDir) self.runJob("nma_prepare_for_animate.py", "", env=getNMAEnviron()) self.runJob("rm", "-f vec_ani.txt fort.11 matrice.sdijf") moveFile('vec_ani.pkl', 'extra/vec_ani.pkl') self._leaveWorkingDir()
def convertEndian(stackFn, stackSize): """ Convert the stack file generated by Xmipp to one that Spider likes more. Params: stackFn: the filename of the images stack stackSize: the number of particles in the stack """ fn, ext = splitext(stackFn) fnDir, fnBase = split(fn) # Change to BigEndian runTemplate('cp_endian.spi', ext[1:], { '[particles]': fnBase + '@******', '[particles_big]': fnBase + '_big@******', '[numberOfParticles]': stackSize }, cwd=fnDir) moveFile(fn + '_big' + ext, stackFn)
def _coneStep(self, gpuId, idx, modelFn): fnLabels = self._getExtraPath('labels.txt') fileLabels = open(fnLabels, "r") expSet = self._getProjectionsExp(self.numCones) if not exists(expSet): for n in range(1, self.numCones): if exists(self._getProjectionsExp(self.numCones - n)): expSet = self._getProjectionsExp(self.numCones - n) break newFnLabels = self._getExtraPath('labels%d.txt' % idx) newFileLabels = open(newFnLabels, "w") lines = fileLabels.readlines() for line in lines: if line == str(idx - 1) + '\n': newFileLabels.write('1\n') else: newFileLabels.write('0\n') newFileLabels.close() fileLabels.close() newXdim = readInfoField(self._getExtraPath(), "size", emlib.MDL_XSIZE) fnLabels = self._getExtraPath('labels%d.txt' % idx) print("Training region ", idx, " in GPU ", gpuId) sys.stdout.flush() try: args = "%s %s %s %s %d %d %d %d " % ( expSet, fnLabels, self._getExtraPath(), modelFn+'_aux', self.numEpochs, newXdim, 2, self.batchSize.get()) #args += " %(GPU)s" args += " %s " % (gpuId) #args += " %s " %(int(idx % totalGpu)) self.runJob("xmipp_cone_deepalign", args, numberOfMpi=1, env=self.getCondaEnv()) except Exception as e: raise Exception( "ERROR: Please, if you are suffering memory problems, " "check the target resolution to work with lower dimensions.") moveFile(self._getExtraPath(modelFn + '_aux.h5'), self._getExtraPath(modelFn + '.h5'))
def unblurStep(self, mvF, samplingRate): #movieName = self._getMovieName(movie) mvName = os.path.basename(mvF) mvName = os.path.splitext(mvName)[0] tmpDir = self._getTmpPath(mvName) makePath(tmpDir) mvRoot = os.path.join(tmpDir, mvName) fnInput = os.path.abspath(mvRoot + '.txt') fhInput = open(fnInput, 'w') fhInput.write(os.path.abspath(mvF)) fhInput.close() params = self.getUnblurParams(fnInput, samplingRate, mvName) self.runJob(simple.Plugin.distr_exec(), params, cwd=os.path.abspath(tmpDir), env=simple.Plugin.getEnviron()) moveFile(mvRoot + "_intg1.mrc", self._getExtraPath(mvName + ".mrc")) moveFile(mvRoot + "_pspec1.mrc", self._getExtraPath(mvName + "_psd.mrc")) moveFile(mvRoot + "_thumb1.mrc", self._getExtraPath(mvName + "_thumb.mrc")) cleanPath(tmpDir)
def cleanVolume(self,fnVol): # Generate mask if available if self.nextMask.hasValue(): fnMask=self._getExtraPath("mask.vol") else: fnMask="" fnRootRestored=self._getExtraPath("volumeRestored") args='--i1 %s --i2 %s --oroot %s --denoising 1'%(fnVol,fnVol,fnRootRestored) if fnMask!="": args+=" --mask binary_file %s"%fnMask self.runJob('xmipp_volume_halves_restoration',args,numberOfMpi=1) moveFile("%s_restored1.vol"%fnRootRestored,fnVol) cleanPath("%s_restored2.vol"%fnRootRestored) args='--i1 %s --i2 %s --oroot %s --filterBank 0.01'%(fnVol,fnVol,fnRootRestored) if fnMask!="": args+=" --mask binary_file %s"%fnMask self.runJob('xmipp_volume_halves_restoration',args,numberOfMpi=1) moveFile("%s_restored1.vol"%fnRootRestored,fnVol) cleanPath("%s_restored2.vol"%fnRootRestored) cleanPath("%s_filterBank.vol"%fnRootRestored)
def convertStep(self): imgsFn = self._getFileName('imgsFn') fnVol = self._getFileName('fnVol') fnVolMask = self._getFileName('fnVolMask') inputParticles = self.inputParticles.get() writeSetOfParticles(inputParticles, imgsFn) Xdim = inputParticles.getXDim() self.Ts = inputParticles.getSamplingRate() newTs = self.targetResolution.get() * 1.0 / 3.0 self.newTs = max(self.Ts, newTs) self.newXdim = int(Xdim * self.Ts / newTs) writeInfoField(self._getExtraPath(), "sampling", md.MDL_SAMPLINGRATE, newTs) writeInfoField(self._getExtraPath(), "size", md.MDL_XSIZE, self.newXdim) if self.newXdim != Xdim: self.runJob( "xmipp_image_resize", "-i %s -o %s --save_metadata_stack %s --fourier %d" % (imgsFn, self._getExtraPath('scaled_particles.stk'), self._getExtraPath('scaled_particles.xmd'), self.newXdim), numberOfMpi=1) moveFile(self._getExtraPath('scaled_particles.xmd'), imgsFn) ih = ImageHandler() ih.convert(self.inputVolume.get(), fnVol) Xdim = self.inputVolume.get().getDim()[0] if Xdim != self.newXdim: self.runJob("xmipp_image_resize", "-i %s --dim %d " % (fnVol, self.newXdim), numberOfMpi=1) if self.inputVolumeMask.get(): ih.convert(self.inputVolumeMask.get(), fnVolMask) if Xdim != self.newXdim: self.runJob("xmipp_image_resize", "-i %s --dim %d --interp nearest" % (fnVolMask, self.newXdim), numberOfMpi=1)
def createOutputStep(self): outputVols = self._createSetOfVolumes() for i, vol in enumerate(self._iterInputVols()): volume = vol.clone() volDir = self._getVolDir(i+1) volPrefix = 'vol%03d_' % (i+1) validationMd = self._getExtraPath(volPrefix + 'validation.xmd') moveFile(join(volDir, 'validation.xmd'), validationMd) clusterMd = self._getExtraPath(volPrefix + 'clusteringTendency.xmd') moveFile(join(volDir, 'clusteringTendency.xmd'), clusterMd) md = xmipp.MetaData(validationMd) weight = md.getValue(xmipp.MDL_WEIGHT, md.firstObject()) volume.weight = Float(weight) volume.clusterMd = String(clusterMd) volume.cleanObjId() # clean objects id to assign new ones inside the set outputVols.append(volume) outputVols.setSamplingRate(volume.getSamplingRate()) self._defineOutputs(outputVolumes=outputVols)
def createOutputStep(self): outputVols = self._createSetOfVolumes() for i, vol in enumerate(self._iterInputVols()): volume = vol.clone() volDir = self._getVolDir(i + 1) volPrefix = "vol%03d_" % (i + 1) validationMd = self._getExtraPath(volPrefix + "validation.xmd") moveFile(join(volDir, "validation.xmd"), validationMd) clusterMd = self._getExtraPath(volPrefix + "clusteringTendency.xmd") moveFile(join(volDir, "clusteringTendency.xmd"), clusterMd) md = xmipp.MetaData(validationMd) weight = md.getValue(xmipp.MDL_WEIGHT, md.firstObject()) volume.weight = Float(weight) volume.clusterMd = String(clusterMd) volume.cleanObjId() # clean objects id to assign new ones inside the set outputVols.append(volume) outputVols.setSamplingRate(volume.getSamplingRate()) self._defineOutputs(outputVolumes=outputVols) self._defineTransformRelation(self.inputVolumes, outputVols)
def writePosFilesStep(self): """ Write the pos file for each micrograph on metadata format. """ #self.posFiles = writeSetOfCoordinates(self._getExtraPath(), self.inputCoords) writeSetOfCoordinates(self._getExtraPath(), self.inputCoords) # We need to find the mapping (either by micName or micId) # between the micrographs in the SetOfCoordinates and # the micrographs (if we are in a different case than 'same as picking' if self.downsampleType != SAME_AS_PICKING: micDict = {} coordMics = self.inputCoords.getMicrographs() for mic in coordMics: micBase = removeBaseExt(mic.getFileName()) micPos = self._getExtraPath(micBase + ".pos") micDict[mic.getMicName()] = micPos micDict[mic.getObjId()] = micPos if any(mic.getMicName() in micDict for mic in self.inputMics): micKey = lambda mic: mic.getMicName() elif any(mic.getObjId() in micDict for mic in self.inputMics): self.warning( 'Could not match input micrographs and coordinates ' 'micrographs by micName, using micId.') micKey = lambda mic: mic.getObjId() else: raise Exception( 'Could not match input micrographs and coordinates ' 'neither by micName or micId.') for mic in self.inputMics: # micrograph from input (other) mk = micKey(mic) if mk in micDict: micPosCoord = micDict[mk] if exists(micPosCoord): micBase = removeBaseExt(mic.getFileName()) micPos = self._getExtraPath(micBase + ".pos") if micPos != micPosCoord: self.info('Moving %s -> %s' % (micPosCoord, micPos)) moveFile(micPosCoord, micPos)
def createOutputStep(self): outputVols = self._createSetOfVolumes() for vol in self._iterInputVols(): volume = vol.clone() volDir = self._getVolDir(vol.getObjId()) volPrefix = 'vol%03d_' % (vol.getObjId()) validationMd = self._getExtraPath(volPrefix + 'validation.xmd') moveFile(join(volDir, 'validation.xmd'), validationMd) clusterMd = self._getExtraPath(volPrefix + 'clusteringTendency.xmd') moveFile(join(volDir, 'clusteringTendency.xmd'), clusterMd) mData = md.MetaData(validationMd) weight = mData.getValue(md.MDL_WEIGHT, mData.firstObject()) volume._xmipp_weight = Float(weight) volume.clusterMd = String(clusterMd) volume.cleanObjId() # clean objects id to assign new ones inside the set outputVols.append(volume) outputVols.setSamplingRate(self.partSet.getSamplingRate()) self._defineOutputs(outputVolumes=outputVols) self._defineTransformRelation(self.inputVolumes, outputVols)
def processStep(self): # Enter here to generate the star file or to preprocess the images outputRadius = self._getOutputRadius() params = ' --operate_on input_particles.star' if self.doNormalize: radius = self.backRadius.get() if radius <= 0: radius = outputRadius params += ' --norm --bg_radius %d' % radius if self.doRemoveDust: wDust = self.whiteDust.get() if wDust > 0: params += ' --white_dust %f' % wDust bDust = self.blackDust.get() if bDust > 0: params += ' --black_dust %f' % bDust if self.doInvert: params += ' --invert_contrast' if self.doScale: params += ' --scale %d' % self.scaleSize.get() if self.doWindow: params += ' --window %d' % self.windowSize.get() self.runJob(self._getProgram('relion_preprocess'), params, cwd=self._getPath()) outputMrcs = glob(self._getPath('particles*.mrcs'))[0] # In Relion 1.3 it is produces particles.mrcs.mrcs partFn = self._getPath('particles.mrcs') if outputMrcs != partFn: # Override the initial converted mrcs particles stack # It also make easy to use the same .star file as output moveFile(outputMrcs, partFn)
def init3DStep(self): partFile = self._getExtraPath("particles.mrc") SamplingRate = self.inputClasses.get().getSamplingRate() partName = os.path.basename(partFile) partName = os.path.splitext(partName)[0] tmpDir = self._getTmpPath(partName) makePath(tmpDir) partitions = 1 params3D = ' prg=initial_3Dmodel msk=%d pgrp=%s nparts=%d nthr=%d eo=no' % ( self.mask.get(), self.symmetry.get(), partitions, self.numberOfThreads.get()) paramsImp = ' prg=import_cavgs stk=%s smpd=%f' % ( os.path.abspath(partFile), SamplingRate) self.runJob(simple.Plugin.sim_exec(), 'prg=new_project projname=temp', cwd=os.path.abspath(tmpDir), env=simple.Plugin.getEnviron()) self.runJob(simple.Plugin.sim_exec(), paramsImp, cwd=os.path.abspath(tmpDir) + '/temp', env=simple.Plugin.getEnviron()) self.runJob(simple.Plugin.distr_exec(), params3D, cwd=os.path.abspath(tmpDir) + '/temp', env=simple.Plugin.getEnviron()) #Move output files to ExtraPath and rename them properly os.remove(os.path.abspath(self._getExtraPath("particles.mrc"))) mvRoot1 = os.path.join(tmpDir + '/temp/2_initial_3Dmodel', "rec_final.mrc") mvRoot2 = os.path.join(tmpDir + '/temp/2_initial_3Dmodel', "final_oris.txt") moveFile(mvRoot1, self._getExtraPath(partName + "_rec_final.mrc")) moveFile(mvRoot2, self._getExtraPath(partName + "_projvol_oris.txt")) cleanPath(tmpDir)
def processStep(self): # Enter here to generate the star file or to preprocess the images outputRadius = self._getOutputRadius() params = ' --operate_on input_particles.star ' + self._getOutParam() if self.doNormalize: radius = self.backRadius.get() if radius <= 0: radius = outputRadius params += ' --norm --bg_radius %d' % radius if self.doRemoveDust: wDust = self.whiteDust.get() if wDust > 0: params += ' --white_dust %f' % wDust bDust = self.blackDust.get() if bDust > 0: params += ' --black_dust %f' % bDust if self.doInvert: params += ' --invert_contrast' if self.doScale: params += ' --scale %d' % self.scaleSize.get() if self.doWindow: params += ' --window %d' % self.windowSize.get() self.runJob(self._getProgram('relion_preprocess'), params, cwd=self._getPath()) outputMrcs = glob(self._getPath('*.mrcs.mrcs')) if len(outputMrcs) > 0: partFn = self._getFileName("preprocess_particles") moveFile(outputMrcs[0], partFn)
def writePosFilesStep(self): """ Write the pos file for each micrograph on metadata format. """ #self.posFiles = writeSetOfCoordinates(self._getExtraPath(), self.inputCoords) writeSetOfCoordinates(self._getExtraPath(), self.inputCoords) # We need to find the mapping (either by micName or micId) # between the micrographs in the SetOfCoordinates and # the micrographs (if we are in a different case than 'same as picking' if self.downsampleType != SAME_AS_PICKING: micDict = {} coordMics = self.inputCoords.getMicrographs() for mic in coordMics: micBase = removeBaseExt(mic.getFileName()) micPos = self._getExtraPath(micBase + ".pos") micDict[mic.getMicName()] = micPos micDict[mic.getObjId()] = micPos if any(mic.getMicName() in micDict for mic in self.inputMics): micKey = lambda mic: mic.getMicName() elif any(mic.getObjId() in micDict for mic in self.inputMics): self.warning('Could not match input micrographs and coordinates ' 'micrographs by micName, using micId.') micKey = lambda mic: mic.getObjId() else: raise Exception('Could not match input micrographs and coordinates ' 'neither by micName or micId.') for mic in self.inputMics: # micrograph from input (other) mk = micKey(mic) if mk in micDict: micPosCoord = micDict[mk] if exists(micPosCoord): micBase = removeBaseExt(mic.getFileName()) micPos = self._getExtraPath(micBase + ".pos") if micPos != micPosCoord: self.info('Moving %s -> %s' % (micPosCoord, micPos)) moveFile(micPosCoord, micPos)
def fdrStep(self): args = Plugin.getHome('lib/py2/FDRcontrol.pyc') ih = ImageHandler() if self.useHalfMaps.get(): halfmaps = self.inputVol.get().getHalfMaps().split(',') fn1 = halfmaps[0] fn2 = halfmaps[1] fn1mrc = self._getTmpPath('vol1.mrc') fn2mrc = self._getTmpPath('vol2.mrc') ih.convert(fn1, fn1mrc) ih.convert(fn2, fn2mrc) args += ' --em_map tmp/vol1.mrc --halfmap2 tmp/vol2.mrc' else: fnVol = self._getTmpPath('vol.mrc') ih.convert(self.inputVol.get(), fnVol) args += ' --em_map tmp/vol.mrc' args += ' --testProc rightSided' args += ' --window_size %d' % self.noiseBox.get() if self.method.get() == 0: args += ' -method BY' elif self.method.get() == 1: args += ' -method BH' elif self.method.get() == 2: args += ' -method Holm' elif self.method.get() == 3: args += ' -method Hochberg' self.runJob('ccpem-python', args, cwd=self._getPath()) moveFile(self._getPath('diag_image.pdf'), self._getExtraPath('diag_image.pdf')) if self.useHalfMaps.get(): moveFile(self._getPath('vol1_confidenceMap.mrc'), self._getExtraPath('vol_confidenceMap.mrc')) else: moveFile(self._getPath('vol_confidenceMap.mrc'), self._getExtraPath('vol_confidenceMap.mrc')) fhSummary = open(self._getPath('summary.txt'), 'w') fhLog = open(self._getPath('logs/run.stdout')) for line in fhLog.readlines(): if line.startswith('Calculated map threshold'): fhSummary.write(line) fhLog.close() fhSummary.close()
def globalAssignment(self): iteration=1 fnDirCurrent=self._getExtraPath("Iter%03d"%iteration) makePath(fnDirCurrent) fnGlobal=join(fnDirCurrent,"globalAssignment") makePath(fnGlobal) targetResolution=self.significantMaxResolution.get() TsCurrent=max(self.TsOrig,targetResolution/3) self.prepareImages(fnGlobal,TsCurrent) self.prepareReferences(fnGlobal,TsCurrent,targetResolution) # Calculate angular step at this resolution ResolutionAlignment=targetResolution newXdim=self.readInfoField(fnGlobal,"size",xmipp.MDL_XSIZE) angleStep=self.calculateAngStep(newXdim, TsCurrent, ResolutionAlignment) self.writeInfoField(fnGlobal,"angleStep",xmipp.MDL_ANGLE_DIFF,float(angleStep)) # Significant alignment alpha=1-0.01*self.significantSignificance.get() fnDirSignificant=join(fnGlobal,"significant") fnImgs=join(fnGlobal,"images.xmd") makePath(fnDirSignificant) # Create defocus groups row=getFirstRow(fnImgs) if row.containsLabel(xmipp.MDL_CTF_MODEL) or row.containsLabel(xmipp.MDL_CTF_DEFOCUSU): self.runJob("xmipp_ctf_group","--ctfdat %s -o %s/ctf:stk --pad 2.0 --sampling_rate %f --phase_flipped --error 0.1 --resol %f"%\ (fnImgs,fnDirSignificant,TsCurrent,targetResolution),numberOfMpi=1) moveFile("%s/ctf_images.sel"%fnDirSignificant,"%s/ctf_groups.xmd"%fnDirSignificant) cleanPath("%s/ctf_split.doc"%fnDirSignificant) md = xmipp.MetaData("numberGroups@%s"%join(fnDirSignificant,"ctfInfo.xmd")) fnCTFs="%s/ctf_ctf.stk"%fnDirSignificant numberGroups=md.getValue(xmipp.MDL_COUNT,md.firstObject()) ctfPresent=True else: numberGroups=1 ctfPresent=False # Generate projections fnReferenceVol=join(fnGlobal,"volumeRef.vol") fnGallery=join(fnDirSignificant,"gallery.stk") fnGalleryMd=join(fnDirSignificant,"gallery.xmd") args="-i %s -o %s --sampling_rate %f --sym %s --min_tilt_angle %f --max_tilt_angle %f"%\ (fnReferenceVol,fnGallery,angleStep,self.symmetryGroup,self.angularMinTilt.get(),self.angularMaxTilt.get()) self.runJob("xmipp_angular_project_library",args) cleanPath(join(fnDirSignificant,"gallery_angles.doc")) moveFile(join(fnDirSignificant,"gallery.doc"), fnGalleryMd) fnAngles=join(fnGlobal,"anglesDisc.xmd") for j in range(1,numberGroups+1): fnAnglesGroup=join(fnDirSignificant,"angles_group%02d.xmd"%j) if not exists(fnAnglesGroup): if ctfPresent: fnGroup="ctfGroup%06d@%s/ctf_groups.xmd"%(j,fnDirSignificant) fnGalleryGroup=join(fnDirSignificant,"gallery_group%06d.stk"%j) fnGalleryGroupMd=join(fnDirSignificant,"gallery_group%06d.xmd"%j) self.runJob("xmipp_transform_filter", "-i %s -o %s --fourier binary_file %d@%s --save_metadata_stack %s --keep_input_columns"%\ (fnGalleryMd,fnGalleryGroup,j,fnCTFs,fnGalleryGroupMd)) else: fnGroup=fnImgs fnGalleryGroupMd=fnGalleryMd args='-i %s --initgallery %s --odir %s --sym %s --iter 1 --alpha0 %f --alphaF %f --angularSampling %f --maxShift %d '\ '--minTilt %f --maxTilt %f --useImed --angDistance %f --dontReconstruct'%\ (fnGroup,fnGalleryGroupMd,fnDirSignificant,self.symmetryGroup,alpha,alpha,angleStep,\ round(self.angularMaxShift.get()*newXdim/100),self.angularMinTilt.get(),self.angularMaxTilt.get(),2*angleStep) self.runJob('xmipp_reconstruct_significant',args,numberOfMpi=self.numberOfThreads.get()) moveFile(join(fnDirSignificant,"angles_iter001_00.xmd"),join(fnDirSignificant,"angles_group%02d.xmd"%j)) self.runJob("rm -f",fnDirSignificant+"/images_*iter00?_*.xmd",numberOfMpi=1) if j==1: copyFile(fnAnglesGroup, fnAngles) else: self.runJob("xmipp_metadata_utilities","-i %s --set union %s"%(fnAngles,fnAnglesGroup),numberOfMpi=1) if ctfPresent: self.runJob("rm -f",fnDirSignificant+"/gallery*",numberOfMpi=1)
def _processMovie(self, movieId, movieName, movieFolder): """ Process the movie actions, remember to: 1) Generate all output files inside movieFolder (usually with cwd in runJob) 2) Copy the important result files after processing (movieFolder will be deleted!!!) """ program = self._getProgram() # Read the parameters # micName = self._getMicName(movieId) micName = self._getNameExt(movieName, "_aligned", "mrc") metadataNameInterMediate = self._getNameExt(movieName, "_alignedIntermediate", "xmd") metadataName = self._getNameExt(movieName, "_aligned", "xmd") psdCorrName = self._getNameExt(movieName, "_aligned_corrected", "psd") firstFrame = self.alignFrame0.get() lastFrame = self.alignFrameN.get() gpuId = self.GPUCore.get() alMethod = self.alignMethod.get() # For simple average execution if alMethod == AL_AVERAGE: command = "-i %(movieName)s -o %(micName)s" % locals() command += " --nst %d --ned %d --simpleAverage --psd" % (firstFrame, lastFrame) try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() # For DosefGPU Execution (and combination with optical flow) elif alMethod == AL_DOSEFGPU or alMethod == AL_DOSEFGPUOPTICAL: logFile = self._getLogFile(movieId) # gainFile = self.inputMovies.get().getGain() args = { "-crx": self.cropOffsetX.get(), "-cry": self.cropOffsetY.get(), "-cdx": self.cropDimX.get(), "-cdy": self.cropDimY.get(), "-bin": self.binFactor.get(), "-nst": self.alignFrame0.get(), "-ned": self.alignFrameN.get(), "-nss": self.sumFrame0.get(), "-nes": self.sumFrameN.get(), "-gpu": gpuId, "-flg": logFile, } command = "%(movieName)s -fcs %(micName)s " % locals() command += " ".join(["%s %s" % (k, v) for k, v in args.iteritems()]) if alMethod == AL_DOSEFGPUOPTICAL: program = "dosefgpu_driftcorr" corrMovieName = self._getCorrMovieName(movieId) command += " " + "-fct %(corrMovieName)s -ssc 1 " % locals() command += " " + self.extraParams.get() import pyworkflow.em.packages.dosefgpu as dosefgpu try: self.runJob(program, command, cwd=movieFolder, env=dosefgpu.getEnviron()) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() elif alMethod == AL_CROSSCORRELATION or alMethod == AL_CROSSCORRELATIONOPTICAL: # not dosefgpu program = "xmipp_movie_alignment_correlation" corrMovieName = self._getCorrMovieName(movieId) command = "-i %s " % movieName command += "-o %s " % metadataNameInterMediate command += "--sampling %f " % self.samplingRate command += "--max_freq %f " % self.filterFactor command += "--cropULCorner %d %d " % (self.cropOffsetX.get(), self.cropOffsetY.get()) command += "--cropDRCorner %d %d " % ( self.cropOffsetX.get() + self.cropDimX.get() - 1, self.cropOffsetY.get() + self.cropDimY.get() - 1, ) _lastFrame = -1 if lastFrame != 0: _lastFrame = lastFrame command += "--frameRange %d %d " % (firstFrame, _lastFrame) command += "--max_shift %d " % 16 # TODO expert param command += "--oavg %s " % micName command += "--oaligned %s " % corrMovieName try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() # For Optical Flow execution (and combination with DosefGPU) if alMethod == AL_OPTICAL or alMethod == AL_DOSEFGPUOPTICAL or alMethod == AL_CROSSCORRELATIONOPTICAL: winSize = self.winSize.get() if alMethod == AL_DOSEFGPUOPTICAL: program = "xmipp_movie_optical_alignment_gpu" corrMovieName = self._getCorrMovieName(movieId) command = "-i %(corrMovieName)s " % locals() # Set to Zero for Optical Flow (output movie of dosefgpu) firstFrame = 0 lastFrame = 0 elif alMethod == AL_CROSSCORRELATIONOPTICAL: program = "xmipp_movie_optical_alignment_cpu" command = "-i %(corrMovieName)s " % locals() # Set to Zero for Optical Flow (output movie of dosefgpu) firstFrame = 0 lastFrame = 0 else: program = "xmipp_movie_optical_alignment_cpu" command = "-i %(movieName)s " % locals() command += "-o %(micName)s --winSize %(winSize)d" % locals() command += " --nst %d --ned %d --psd" % (firstFrame, lastFrame) if self.doGPU: command += " --gpu %d" % gpuId try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() if alMethod == AL_OPTICAL or alMethod == AL_DOSEFGPUOPTICAL or alMethod == AL_CROSSCORRELATIONOPTICAL: moveFile(join(movieFolder, metadataName), self._getExtraPath()) # Move output micrograph and related information to 'extra' folder moveFile(join(movieFolder, micName), self._getExtraPath()) if alMethod == AL_DOSEFGPU: # Copy the log file to have shifts information moveFile(join(movieFolder, logFile), self._getExtraPath()) elif alMethod == AL_CROSSCORRELATION: # Copy metadatafile otherwise it will be deleted # TODO: create a proper scipion object moveFile(join(movieFolder, metadataNameInterMediate), self._getExtraPath()) moveFile(join(movieFolder, corrMovieName), self._getExtraPath()) else: moveFile(join(movieFolder, psdCorrName), self._getExtraPath())
def generateOutputStackStep(self, tsObjId): tomo = self.inputSetOfTomograms.get()[tsObjId] location = tomo.getLocation()[1] fileName, fileExtension = os.path.splitext(location) extraPrefix = self._getExtraPath(os.path.basename(fileName)) tmpPrefix = self._getTmpPath(os.path.basename(fileName)) path.makePath(extraPrefix) path.makePath(tmpPrefix) runNewstack = False paramsNewstack = { 'input': location, 'output': os.path.join(extraPrefix, os.path.basename(location)), 'imagebinned': 1.0, } argsNewstack = "-input %(input)s " \ "-output %(output)s " \ "-imagebinned %(imagebinned)s " if self.floatDensities.get() != 0: runNewstack = True argsNewstack += " -FloatDensities " + str( self.floatDensities.get()) if self.floatDensities.get() == 2: if self.meanSdToggle.get() == 0: argsNewstack += " -MeanAndStandardDeviation " + str(self.scaleMean.get()) + "," + \ str(self.scaleSd.get()) elif self.floatDensities.get() == 4: argsNewstack += " -ScaleMinAndMax " + str( self.scaleMax.get()) + "," + str(self.scaleMin.get()) else: if self.scaleRangeToggle.get() == 0: argsNewstack += " -ScaleMinAndMax " + str(self.scaleRangeMax.get()) + "," + \ str(self.scaleRangeMin.get()) if self.getModeToOutput() is not None: runNewstack = True argsNewstack += " -ModeToOutput " + str(self.getModeToOutput()) if runNewstack: Plugin.runImod(self, 'newstack', argsNewstack % paramsNewstack) if self.binning.get() != 1: if runNewstack: path.moveFile( os.path.join(extraPrefix, os.path.basename(location)), os.path.join(tmpPrefix, os.path.basename(location))) inputTomoPath = os.path.join(tmpPrefix, os.path.basename(location)) else: inputTomoPath = location paramsBinvol = { 'input': inputTomoPath, 'output': os.path.join(extraPrefix, os.path.basename(location)), 'binning': self.binning.get(), } argsBinvol = "-input %(input)s " \ "-output %(output)s " \ "-binning %(binning)d " Plugin.runImod(self, 'binvol', argsBinvol % paramsBinvol) outputNormalizedSetOfTomograms = self.getOutputNormalizedSetOfTomograms( ) newTomogram = Tomogram() newTomogram.copyInfo(tomo) newTomogram.copyAttributes(tomo, '_origin') if not runNewstack and self.binning.get() == 1: newTomogram.setLocation(location) else: newTomogram.setLocation( os.path.join(extraPrefix, os.path.basename(location))) if self.binning > 1: newTomogram.setSamplingRate(tomo.getSamplingRate() * int(self.binning.get())) outputNormalizedSetOfTomograms.append(newTomogram) outputNormalizedSetOfTomograms.update(newTomogram) outputNormalizedSetOfTomograms.write() self._store()
def _processMovie(self, movieId, movieName, movieFolder): """ Process the movie actions, remember to: 1) Generate all output files inside movieFolder (usually with cwd in runJob) 2) Copy the important result files after processing (movieFolder will be deleted!!!) """ program = self._getProgram() # Read the parameters #micName = self._getMicName(movieId) micName = self._getNameExt(movieName, '_aligned', 'mrc') metadataNameInterMediate = self._getNameExt(movieName, '_alignedIntermediate', 'xmd') metadataName = self._getNameExt(movieName, '_aligned', 'xmd') psdCorrName = self._getNameExt(movieName, '_aligned_corrected', 'psd') firstFrame = self.alignFrame0.get() lastFrame = self.alignFrameN.get() gpuId = self.GPUCore.get() alMethod = self.alignMethod.get() doSaveMovie = False # Some movie have .mrc or .mrcs format but it is recognized as a volume if movieName.endswith('.mrcs') or movieName.endswith('.mrc'): movieSuffix = ':mrcs' elif movieName.endswith('.em'): movieSuffix = ':ems' else: movieSuffix = '' # For simple average execution grayCorrected = False if alMethod == AL_AVERAGE: command = '-i %(movieName)s%(movieSuffix)s -o %(micName)s' % locals( ) command += ' --nst %d --ned %d --simpleAverage' % (firstFrame, lastFrame) if self.inputMovies.get().getDark(): command += " --dark " + self.inputMovies.get().getDark() grayCorrected = True if self.inputMovies.get().getGain(): command += " --gain " + self.inputMovies.get().getGain() grayCorrected = True try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals( ) # For DosefGPU Execution (and combination with optical flow) if alMethod == AL_DOSEFGPU or alMethod == AL_DOSEFGPUOPTICAL: logFile = self._getLogFile(movieId) #gainFile = self.inputMovies.get().getGain() args = { '-crx': self.cropOffsetX.get(), '-cry': self.cropOffsetY.get(), '-cdx': self.cropDimX.get(), '-cdy': self.cropDimY.get(), '-bin': self.binFactor.get(), '-nst': self.alignFrame0.get(), '-ned': self.alignFrameN.get(), '-nss': self.sumFrame0.get(), '-nes': self.sumFrameN.get(), '-gpu': gpuId, '-flg': logFile, } command = '%(movieName)s -fcs %(micName)s ' % locals() command += ' '.join( ['%s %s' % (k, v) for k, v in args.iteritems()]) if alMethod == AL_DOSEFGPUOPTICAL: program = 'dosefgpu_driftcorr' corrMovieName = self._getCorrMovieName(movieId) command += ' ' + '-fct %(corrMovieName)s -ssc 1 ' % locals() command += ' ' + self.extraParams.get() import pyworkflow.em.packages.dosefgpu as dosefgpu try: self.runJob(program, command, cwd=movieFolder, env=dosefgpu.getEnviron()) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals( ) if alMethod == AL_CROSSCORRELATION or alMethod == AL_CROSSCORRELATIONOPTICAL: #not dosefgpu program = 'xmipp_movie_alignment_correlation' corrMovieName = self._getCorrMovieName(movieId) command = '-i %s%s ' % (movieName, movieSuffix) command += '-o %s ' % metadataNameInterMediate command += '--sampling %f ' % self.samplingRate command += '--max_freq %f ' % self.filterFactor command += '--cropULCorner %d %d ' % (self.cropOffsetX.get(), self.cropOffsetY.get()) command += '--cropDRCorner %d %d ' % ( self.cropOffsetX.get() + self.cropDimX.get() - 1, self.cropOffsetY.get() + self.cropDimY.get() - 1) _lastFrame = -1 if lastFrame != 0: _lastFrame = lastFrame command += '--frameRange %d %d ' % (firstFrame, _lastFrame) command += '--max_shift %d ' % 16 #TODO expert param command += '--oavg %s ' % micName command += ' --oaligned %s ' % corrMovieName if self.inputMovies.get().getDark(): command += " --dark " + self.inputMovies.get().getDark() grayCorrected = True if self.inputMovies.get().getGain(): command += " --gain " + self.inputMovies.get().getGain() grayCorrected = True try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals( ) # For Optical Flow execution (and combination with DosefGPU) if alMethod == AL_OPTICAL or\ alMethod == AL_DOSEFGPUOPTICAL or\ alMethod == AL_CROSSCORRELATIONOPTICAL: winSize = self.winSize.get() doSaveMovie = self.doSaveMovie.get() groupSize = self.groupSize.get() if alMethod == AL_DOSEFGPUOPTICAL: program = 'xmipp_movie_optical_alignment_gpu' corrMovieName = self._getCorrMovieName(movieId) command = '-i %(corrMovieName)s%(movieSuffix)s ' % locals() # Set to Zero for Optical Flow (output movie of dosefgpu) firstFrame = 0 lastFrame = 0 elif alMethod == AL_CROSSCORRELATIONOPTICAL: program = 'xmipp_movie_optical_alignment_cpu' command = '-i %(corrMovieName)s ' % locals() else: command = '-i %(movieName)s%(movieSuffix)s ' % locals() if self.doGPU: program = 'xmipp_movie_optical_alignment_gpu' command += '--gpu %d ' % gpuId else: program = 'xmipp_movie_optical_alignment_cpu' # Set to Zero for Optical Flow (output movie of dosefgpu) firstFrame = 0 lastFrame = 0 if doSaveMovie: command += ' --ssc ' command += '-o %(micName)s --winSize %(winSize)d --groupSize %(groupSize)d ' % locals( ) command += '--nst %d --ned %d ' % (firstFrame, lastFrame) if self.inputMovies.get().getDark() and not grayCorrected: command += " --dark " + self.inputMovies.get().getDark() grayCorrected = True if self.inputMovies.get().getGain() and not grayCorrected: command += " --gain " + self.inputMovies.get().getGain() grayCorrected = True try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals( ) moveFile(join(movieFolder, metadataName), self._getExtraPath()) # Compute half-half PSD ih = em.ImageHandler() print join(movieFolder, '%(movieName)s' % locals()) avg = ih.computeAverage(join(movieFolder, movieName)) avg.write(join(movieFolder, 'uncorrectedmic.mrc')) command = '--micrograph uncorrectedmic.mrc --oroot uncorrectedpsd --dont_estimate_ctf --pieceDim 400 --overlap 0.7' program = 'xmipp_ctf_estimate_from_micrograph' self.runJob(program, command, cwd=movieFolder) command = '--micrograph %(micName)s --oroot correctedpsd --dont_estimate_ctf --pieceDim 400 --overlap 0.7' % locals( ) self.runJob(program, command, cwd=movieFolder) correctedPSD = em.ImageHandler().createImage() unCorrectedPSD = em.ImageHandler().createImage() correctedPSD.read(join(movieFolder, 'correctedpsd.psd')) unCorrectedPSD.read(join(movieFolder, 'uncorrectedpsd.psd')) x, y, z, n = correctedPSD.getDimensions() for i in range(1, y): for j in range(1, x // 2): unCorrectedPSD.setPixel(i, j, correctedPSD.getPixel(i, j)) unCorrectedPSD.write(join(movieFolder, psdCorrName)) # Move output micrograph and related information to 'extra' folder moveFile(join(movieFolder, micName), self._getExtraPath()) if doSaveMovie: outMovieName = self._getNameExt(movieName, '_aligned', 'mrcs') moveFile(join(movieFolder, outMovieName), self._getExtraPath()) if alMethod == AL_DOSEFGPU: # Copy the log file to have shifts information moveFile(join(movieFolder, logFile), self._getExtraPath()) elif alMethod == AL_CROSSCORRELATION: # Copy metadatafile otherwise it will be deleted #TODO: create a proper scipion object moveFile(join(movieFolder, metadataNameInterMediate), self._getExtraPath()) #moveFile(join(movieFolder, corrMovieName), self._getExtraPath()) moveFile(join(movieFolder, psdCorrName), self._getExtraPath())
def significantStep(self, iterNumber, alpha): iterDir = self._getTmpPath('iter%03d' % iterNumber) makePath(iterDir) prevVolFn = self.getIterVolume(iterNumber - 1) volFn = self.getIterVolume(iterNumber) anglesFn = self._getExtraPath('angles_iter%03d.xmd' % iterNumber) t = Timer() t.tic() if self.useGpu.get() and iterNumber > 1: # Generate projections fnGalleryRoot = join(iterDir, "gallery") args = "-i %s -o %s.stk --sampling_rate %f --sym %s " \ "--compute_neighbors --angular_distance -1 " \ "--experimental_images %s --min_tilt_angle %f " \ "--max_tilt_angle %f -v 0 --perturb %f " % \ (prevVolFn, fnGalleryRoot, self.angularSampling.get(), self.symmetryGroup, self.imgsFn, self.minTilt, self.maxTilt, math.sin(self.angularSampling.get()) / 4) self.runJob("xmipp_angular_project_library ", args, numberOfMpi=1) if self.trueSymsNo != 0: alphaApply = (alpha * self.trueSymsNo) / 2 else: alphaApply = alpha / 2 from pwem.emlib.metadata import getSize N = int(getSize(fnGalleryRoot + '.doc') * alphaApply * 2) count = 0 GpuListCuda = '' if self.useQueueForSteps() or self.useQueue(): GpuList = os.environ["CUDA_VISIBLE_DEVICES"] GpuList = GpuList.split(",") for elem in GpuList: GpuListCuda = GpuListCuda + str(count) + ' ' count += 1 else: GpuList = ' '.join([str(elem) for elem in self.getGpuList()]) GpuListAux = '' for elem in self.getGpuList(): GpuListCuda = GpuListCuda + str(count) + ' ' GpuListAux = GpuListAux + str(elem) + ',' count += 1 os.environ["CUDA_VISIBLE_DEVICES"] = GpuListAux args = '-i %s -r %s.doc -o %s --keepBestN %f --dev %s ' % \ (self.imgsFn, fnGalleryRoot, anglesFn, N, GpuListCuda) self.runJob(CUDA_ALIGN_SIGNIFICANT, args, numberOfMpi=1) cleanPattern(fnGalleryRoot + "*") else: args = self.getSignificantArgs(self.imgsFn) args += ' --odir %s' % iterDir args += ' --alpha0 %f --alphaF %f' % (alpha, alpha) args += ' --dontCheckMirrors ' if iterNumber == 1: if self.thereisRefVolume: args += " --initvolumes " + \ self._getExtraPath('input_volumes.xmd') else: args += " --numberOfVolumes 1" else: args += " --initvolumes %s" % prevVolFn self.runJob("xmipp_reconstruct_significant", args) moveFile(os.path.join(iterDir, 'angles_iter001_00.xmd'), anglesFn) t.toc('Significant took: ') reconsArgs = ' -i %s --fast' % anglesFn reconsArgs += ' -o %s' % volFn reconsArgs += ' --weight -v 0 --sym %s ' % self.symmetryGroup print("Number of images for reconstruction: ", metadata.getSize(anglesFn)) t.tic() if self.useGpu.get(): cudaReconsArgs = reconsArgs #AJ to make it work with and without queue system if self.numberOfMpi.get() > 1: N_GPUs = len((self.gpuList.get()).split(',')) cudaReconsArgs += ' -gpusPerNode %d' % N_GPUs cudaReconsArgs += ' -threadsPerGPU %d' % max( self.numberOfThreads.get(), 4) count = 0 GpuListCuda = '' if self.useQueueForSteps() or self.useQueue(): GpuList = os.environ["CUDA_VISIBLE_DEVICES"] GpuList = GpuList.split(",") for elem in GpuList: GpuListCuda = GpuListCuda + str(count) + ' ' count += 1 else: GpuListAux = '' for elem in self.getGpuList(): GpuListCuda = GpuListCuda + str(count) + ' ' GpuListAux = GpuListAux + str(elem) + ',' count += 1 os.environ["CUDA_VISIBLE_DEVICES"] = GpuListAux cudaReconsArgs += ' --thr %s' % self.numberOfThreads.get() if self.numberOfMpi.get() == 1: cudaReconsArgs += ' --device %s' % (GpuListCuda) if self.numberOfMpi.get() > 1: self.runJob('xmipp_cuda_reconstruct_fourier', cudaReconsArgs, numberOfMpi=len( (self.gpuList.get()).split(',')) + 1) else: self.runJob('xmipp_cuda_reconstruct_fourier', cudaReconsArgs) else: self.runJob("xmipp_reconstruct_fourier_accel", reconsArgs) t.toc('Reconstruct fourier took: ') # Center the volume fnSym = self._getExtraPath('volumeSym_%03d.vol' % iterNumber) self.runJob("xmipp_transform_mirror", "-i %s -o %s --flipX" % (volFn, fnSym), numberOfMpi=1) self.runJob("xmipp_transform_mirror", "-i %s --flipY" % fnSym, numberOfMpi=1) self.runJob("xmipp_transform_mirror", "-i %s --flipZ" % fnSym, numberOfMpi=1) self.runJob("xmipp_image_operate", "-i %s --plus %s" % (fnSym, volFn), numberOfMpi=1) self.runJob("xmipp_volume_align", '--i1 %s --i2 %s --local --apply' % (fnSym, volFn), numberOfMpi=1) cleanPath(fnSym) # To mask the volume xdim = self.inputSet.get().getDimensions()[0] maskArgs = "-i %s --mask circular %d -v 0" % (volFn, -xdim / 2) self.runJob('xmipp_transform_mask', maskArgs, numberOfMpi=1) # TODO mask the final volume in some smart way... # To filter the volume if self.useMaxRes: self.runJob('xmipp_transform_filter', '-i %s --fourier low_pass %f --sampling %f' % \ (volFn, self.maxResolution.get(), self.TsCurrent), numberOfMpi=1) if not self.keepIntermediate: cleanPath(prevVolFn, iterDir) if self.thereisRefVolume: cleanPath(self._getExtraPath('filteredVolume.vol'))
def _processMovie(self, movieId, movieName, movieFolder): """ Process the movie actions, remember to: 1) Generate all output files inside movieFolder (usually with cwd in runJob) 2) Copy the important result files after processing (movieFolder will be deleted!!!) """ program = self._getProgram() # Read the parameters #micName = self._getMicName(movieId) micName = self._getNameExt(movieName, '_aligned', 'mrc') metadataNameInterMediate = self._getNameExt(movieName, '_alignedIntermediate', 'xmd') metadataName = self._getNameExt(movieName, '_aligned', 'xmd') psdCorrName = self._getNameExt(movieName,'_aligned_corrected', 'psd') firstFrame = self.alignFrame0.get() lastFrame = self.alignFrameN.get() gpuId = self.GPUCore.get() alMethod = self.alignMethod.get() doSaveMovie = False # Some movie have .mrc or .mrcs format but it is recognized as a volume if movieName.endswith('.mrcs') or movieName.endswith('.mrc'): movieSuffix = ':mrcs' else: movieSuffix = '' # For simple average execution grayCorrected = False if alMethod == AL_AVERAGE: command = '-i %(movieName)s%(movieSuffix)s -o %(micName)s' % locals() command += ' --nst %d --ned %d --simpleAverage' % (firstFrame, lastFrame) if self.inputMovies.get().getDark(): command += " --dark "+self.inputMovies.get().getDark() grayCorrected=True if self.inputMovies.get().getGain(): command += " --gain "+self.inputMovies.get().getGain() grayCorrected=True try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() # For DosefGPU Execution (and combination with optical flow) if alMethod == AL_DOSEFGPU or alMethod == AL_DOSEFGPUOPTICAL: logFile = self._getLogFile(movieId) #gainFile = self.inputMovies.get().getGain() args = {'-crx': self.cropOffsetX.get(), '-cry': self.cropOffsetY.get(), '-cdx': self.cropDimX.get(), '-cdy': self.cropDimY.get(), '-bin': self.binFactor.get(), '-nst': self.alignFrame0.get(), '-ned': self.alignFrameN.get(), '-nss': self.sumFrame0.get(), '-nes': self.sumFrameN.get(), '-gpu': gpuId, '-flg': logFile, } command = '%(movieName)s -fcs %(micName)s ' % locals() command += ' '.join(['%s %s' % (k, v) for k, v in args.iteritems()]) if alMethod == AL_DOSEFGPUOPTICAL: program = 'dosefgpu_driftcorr' corrMovieName = self._getCorrMovieName(movieId) command += ' ' + '-fct %(corrMovieName)s -ssc 1 ' % locals() command += ' ' + self.extraParams.get() import pyworkflow.em.packages.dosefgpu as dosefgpu try: self.runJob(program, command, cwd=movieFolder, env=dosefgpu.getEnviron()) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() if alMethod == AL_CROSSCORRELATION or alMethod == AL_CROSSCORRELATIONOPTICAL: #not dosefgpu program = 'xmipp_movie_alignment_correlation' corrMovieName = self._getCorrMovieName(movieId) command = '-i %s%s ' % (movieName, movieSuffix) command += '-o %s '% metadataNameInterMediate command += '--sampling %f ' % self.samplingRate command += '--max_freq %f ' % self.filterFactor command += '--cropULCorner %d %d '%(self.cropOffsetX.get(),self.cropOffsetY.get()) command += '--cropDRCorner %d %d '%(self.cropOffsetX.get() + self.cropDimX.get() -1 ,self.cropOffsetY.get() + self.cropDimY.get() -1) _lastFrame = -1 if lastFrame != 0: _lastFrame = lastFrame command += '--frameRange %d %d '%(firstFrame,_lastFrame) command += '--max_shift %d ' % 16#TODO expert param command += '--oavg %s ' % micName command += ' --oaligned %s ' % corrMovieName if self.inputMovies.get().getDark(): command += " --dark "+self.inputMovies.get().getDark() grayCorrected=True if self.inputMovies.get().getGain(): command += " --gain "+self.inputMovies.get().getGain() grayCorrected=True try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() # For Optical Flow execution (and combination with DosefGPU) if alMethod == AL_OPTICAL or\ alMethod == AL_DOSEFGPUOPTICAL or\ alMethod == AL_CROSSCORRELATIONOPTICAL: winSize = self.winSize.get() doSaveMovie = self.doSaveMovie.get() groupSize = self.groupSize.get() if alMethod == AL_DOSEFGPUOPTICAL: program = 'xmipp_movie_optical_alignment_gpu' corrMovieName = self._getCorrMovieName(movieId) command = '-i %(corrMovieName)s%(movieSuffix)s ' % locals() # Set to Zero for Optical Flow (output movie of dosefgpu) firstFrame = 0 lastFrame = 0 elif alMethod == AL_CROSSCORRELATIONOPTICAL: program = 'xmipp_movie_optical_alignment_cpu' command = '-i %(corrMovieName)s ' % locals() else: command = '-i %(movieName)s%(movieSuffix)s ' % locals() if self.doGPU: program = 'xmipp_movie_optical_alignment_gpu' command += '--gpu %d ' % gpuId else: program = 'xmipp_movie_optical_alignment_cpu' # Set to Zero for Optical Flow (output movie of dosefgpu) firstFrame = 0 lastFrame = 0 if doSaveMovie: command += '--ssc ' command += '-o %(micName)s --winSize %(winSize)d --groupSize %(groupSize)d ' % locals() command += '--nst %d --ned %d ' % (firstFrame, lastFrame) if self.inputMovies.get().getDark() and not grayCorrected: command += " --dark "+self.inputMovies.get().getDark() grayCorrected=True if self.inputMovies.get().getGain() and not grayCorrected: command += " --gain "+self.inputMovies.get().getGain() grayCorrected=True if doSaveMovie: command += '--ssc ' try: self.runJob(program, command, cwd=movieFolder) except: print >> sys.stderr, program, " failed for movie %(movieName)s" % locals() moveFile(join(movieFolder, metadataName), self._getExtraPath()) # Compute half-half PSD ih = em.ImageHandler() print join(movieFolder, '%(movieName)s' % locals()) avg = ih.computeAverage(join(movieFolder, movieName)) avg.write(join(movieFolder, 'uncorrectedmic.mrc')) command = '--micrograph uncorrectedmic.mrc --oroot uncorrectedpsd --dont_estimate_ctf --pieceDim 400 --overlap 0.7' program = 'xmipp_ctf_estimate_from_micrograph' self.runJob(program, command, cwd=movieFolder) command = '--micrograph %(micName)s --oroot correctedpsd --dont_estimate_ctf --pieceDim 400 --overlap 0.7' % locals() self.runJob(program, command, cwd=movieFolder) correctedPSD = em.ImageHandler().createImage() unCorrectedPSD = em.ImageHandler().createImage() correctedPSD.read(join(movieFolder, 'correctedpsd.psd')) unCorrectedPSD.read(join(movieFolder, 'uncorrectedpsd.psd')) x, y, z, n = correctedPSD.getDimensions() for i in range(1,y): for j in range(1,x//2): unCorrectedPSD.setPixel(i, j, correctedPSD.getPixel(i,j)) unCorrectedPSD.write(join(movieFolder, psdCorrName)) # Move output micrograph and related information to 'extra' folder moveFile(join(movieFolder, micName), self._getExtraPath()) if doSaveMovie: outMovieName = self._getNameExt(movieName,'_aligned', 'mrcs') moveFile(join(movieFolder, outMovieName), self._getExtraPath()) if alMethod == AL_DOSEFGPU: # Copy the log file to have shifts information moveFile(join(movieFolder, logFile), self._getExtraPath()) elif alMethod == AL_CROSSCORRELATION: # Copy metadatafile otherwise it will be deleted #TODO: create a proper scipion object moveFile(join(movieFolder, metadataNameInterMediate), self._getExtraPath()) #moveFile(join(movieFolder, corrMovieName), self._getExtraPath()) moveFile(join(movieFolder, psdCorrName), self._getExtraPath())
def globalAssignment(self): iteration = 1 fnDirCurrent = self._getExtraPath("Iter%03d" % iteration) makePath(fnDirCurrent) fnGlobal = join(fnDirCurrent, "globalAssignment") makePath(fnGlobal) targetResolution = self.significantMaxResolution.get() TsCurrent = max(self.TsOrig, targetResolution / 3) self.prepareImages(fnGlobal, TsCurrent) self.prepareReferences(fnGlobal, TsCurrent, targetResolution) # Calculate angular step at this resolution ResolutionAlignment = targetResolution newXdim = self.readInfoField(fnGlobal, "size", xmipp.MDL_XSIZE) angleStep = self.calculateAngStep(newXdim, TsCurrent, ResolutionAlignment) self.writeInfoField(fnGlobal, "angleStep", xmipp.MDL_ANGLE_DIFF, float(angleStep)) # Significant alignment alpha = 1 - 0.01 * self.significantSignificance.get() fnDirSignificant = join(fnGlobal, "significant") fnImgs = join(fnGlobal, "images.xmd") makePath(fnDirSignificant) # Create defocus groups row = getFirstRow(fnImgs) if row.containsLabel(xmipp.MDL_CTF_MODEL) or row.containsLabel( xmipp.MDL_CTF_DEFOCUSU): self.runJob("xmipp_ctf_group","--ctfdat %s -o %s/ctf:stk --pad 2.0 --sampling_rate %f --phase_flipped --error 0.1 --resol %f"%\ (fnImgs,fnDirSignificant,TsCurrent,targetResolution),numberOfMpi=1) moveFile("%s/ctf_images.sel" % fnDirSignificant, "%s/ctf_groups.xmd" % fnDirSignificant) cleanPath("%s/ctf_split.doc" % fnDirSignificant) md = xmipp.MetaData("numberGroups@%s" % join(fnDirSignificant, "ctfInfo.xmd")) fnCTFs = "%s/ctf_ctf.stk" % fnDirSignificant numberGroups = md.getValue(xmipp.MDL_COUNT, md.firstObject()) ctfPresent = True else: numberGroups = 1 ctfPresent = False # Generate projections fnReferenceVol = join(fnGlobal, "volumeRef.vol") fnGallery = join(fnDirSignificant, "gallery.stk") fnGalleryMd = join(fnDirSignificant, "gallery.xmd") args="-i %s -o %s --sampling_rate %f --sym %s --min_tilt_angle %f --max_tilt_angle %f"%\ (fnReferenceVol,fnGallery,angleStep,self.symmetryGroup,self.angularMinTilt.get(),self.angularMaxTilt.get()) self.runJob("xmipp_angular_project_library", args) cleanPath(join(fnDirSignificant, "gallery_angles.doc")) moveFile(join(fnDirSignificant, "gallery.doc"), fnGalleryMd) fnAngles = join(fnGlobal, "anglesDisc.xmd") for j in range(1, numberGroups + 1): fnAnglesGroup = join(fnDirSignificant, "angles_group%02d.xmd" % j) if not exists(fnAnglesGroup): if ctfPresent: fnGroup = "ctfGroup%06d@%s/ctf_groups.xmd" % ( j, fnDirSignificant) fnGalleryGroup = join(fnDirSignificant, "gallery_group%06d.stk" % j) fnGalleryGroupMd = join(fnDirSignificant, "gallery_group%06d.xmd" % j) self.runJob("xmipp_transform_filter", "-i %s -o %s --fourier binary_file %d@%s --save_metadata_stack %s --keep_input_columns"%\ (fnGalleryMd,fnGalleryGroup,j,fnCTFs,fnGalleryGroupMd)) else: fnGroup = fnImgs fnGalleryGroupMd = fnGalleryMd args='-i %s --initgallery %s --odir %s --sym %s --iter 1 --alpha0 %f --alphaF %f --angularSampling %f --maxShift %d '\ '--minTilt %f --maxTilt %f --useImed --angDistance %f --dontReconstruct'%\ (fnGroup,fnGalleryGroupMd,fnDirSignificant,self.symmetryGroup,alpha,alpha,angleStep,\ round(self.angularMaxShift.get()*newXdim/100),self.angularMinTilt.get(),self.angularMaxTilt.get(),2*angleStep) self.runJob('xmipp_reconstruct_significant', args, numberOfMpi=self.numberOfThreads.get()) moveFile(join(fnDirSignificant, "angles_iter001_00.xmd"), join(fnDirSignificant, "angles_group%02d.xmd" % j)) self.runJob("rm -f", fnDirSignificant + "/images_*iter00?_*.xmd", numberOfMpi=1) if j == 1: copyFile(fnAnglesGroup, fnAngles) else: self.runJob("xmipp_metadata_utilities", "-i %s --set union %s" % (fnAngles, fnAnglesGroup), numberOfMpi=1) if ctfPresent: self.runJob("rm -f", fnDirSignificant + "/gallery*", numberOfMpi=1)