def start(self): aligndata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid']) boxsize = aligndata['boxsize'] apix = aligndata['pixelsize'] maskpixrad = self.params['maskrad']/apix if maskpixrad*2 > boxsize-2: apDisplay.printError("Mask radius is too big for boxsize: %d > %d"%(maskpixrad*2,boxsize-2)) apDisplay.printMsg("Mask radius and boxsize: %.1f < %d"%(maskpixrad*2,boxsize-2)) self.instack = os.path.join(aligndata['path']['path'], aligndata['imagicfile']) outdata = "stack.data" apXmipp.convertStackToXmippData(self.instack, outdata, maskpixrad, boxsize, numpart=self.params['numpart']-1) self.runKerdenSOM(outdata) if apFile.stackSize(self.instack) > 3.0*(1024**3): # Big stacks use eman self.createMontageByEMAN() binned = None else: binned = self.createMontageInMemory(apix) self.insertKerDenSOM(binned=binned) apFile.removeFile(outdata) apFile.removeFilePattern("*.cod")
def centerParticles(stack, mask=None, maxshift=None): apDisplay.printMsg("Centering stack: "+stack) stacksize = apFile.stackSize(stack) freemem = mem.free()*1024 #convert memory to bytes apDisplay.printMsg("file is %s, mem is %s" %(apDisplay.bytes(stacksize), apDisplay.bytes(freemem))) ### from EMAN FAQ: need to have at least 3x as much ram as the size of the file memsize = freemem/3.0 numfrac = int(math.ceil(stacksize/memsize)) apDisplay.printMsg("file is %s, will be split into %d fractions" %(apDisplay.bytes(stacksize), numfrac)) for i in range(numfrac): emancmd = "cenalignint "+stack if numfrac > 1: emancmd += " frac="+str(i)+"/"+str(numfrac) if mask is not None: emancmd += " mask="+str(mask) if maxshift is not None: emancmd += " maxshift="+str(maxshift) apEMAN.executeEmanCmd(emancmd, verbose=False, showcmd=True) return
def writeClusterJobFile(self): if self.params['nproc'] is None: nproc = 128 else: nproc = self.params['nproc'] rundir = os.path.join("/lustre/people/vossman/xmippdata", self.params['runname']) newrundir = "$PBSREMOTEDIR/" xmippopts = ( " " +" -i $PBSREMOTEDIR/partlist2.doc " +" -nref "+str(self.params['numrefs']) +" -iter "+str(self.params['maxiter']) +" -o $PBSREMOTEDIR/part"+self.timestamp +" -psi_step "+str(self.params['psistep']) ) ### fast mode if self.params['fast'] is True: xmippopts += " -fast " if self.params['fastmode'] == "narrow": xmippopts += " -C 1e-10 " elif self.params['fastmode'] == "wide": xmippopts += " -C 1e-18 " ### convergence criteria if self.params['converge'] == "fast": xmippopts += " -eps 5e-3 " elif self.params['converge'] == "slow": xmippopts += " -eps 5e-8 " else: xmippopts += " -eps 5e-5 " ### mirrors if self.params['mirror'] is True: xmippopts += " -mirror " ### save mem if self.params['savemem'] is True: xmippopts += " -save_memB " ### normalization if self.params['norm'] is True: xmippopts += " -norm " ### use student's T distribution if self.params['student'] is True: xmippopts += " -student " ### memory needed numbytes = apFile.stackSize(self.stack['file']) numgig = math.ceil( numbytes / (1024.0**3) / (self.params['bin']**2) ) ### write to file jobfile = "xmipp-"+self.timestamp+".job" results = rundir+"/"+self.params['runname']+"-results.tgz" f = open(jobfile, "w") f.write("#PBS -l nodes=1:ppn=8\n") #f.write("#PBS -l nodes="+str(nproc/4)+":ppn=4\n") f.write("#PBS -l walltime=240:00:00\n") f.write("#PBS -l cput=240:00:00\n") f.write("#PBS -l mem=%dgb\n"%(numgig)) f.write("#PBS -r n\n") f.write("#PBS -k oe\n") f.write("\n") f.write("## rundir: "+self.params['rundir']+"\n") f.write("\n") f.write("cd "+rundir+"\n") f.write("/bin/rm -fv pbstempdir "+results+"\n") f.write("ln -s $PBSREMOTEDIR pbstempdir\n") f.write("cd $PBSREMOTEDIR\n") f.write("tar xf "+rundir+"/particles.tar\n") f.write("\n") f.write("foreach line ( `cat partlist.sel | cut -f1 -d' '` )\n") f.write(" echo $PBSREMOTEDIR/`echo $line | sed 's/^.*partfiles/partfiles/'` 1 >> partlist2.doc\n") f.write("end\n") f.write("\n") f.write("setenv MPI_HOME /lustre/people/applications/openmpi-1.2.2/\n") f.write("setenv XMIPP_HOME /lustre/people/vossman/Xmipp-2.3-src/\n") f.write("set path = ( $MPI_HOME/bin $path )\n") f.write("setenv LD_LIBRARY_PATH $MPI_HOME/lib:$XMIPP_HOME/lib:/usr/lib:/lib\n") f.write("\n") #f.write("$MPI_HOME/bin/mpirun -np "+str(nproc)+" $XMIPP_HOME/bin/xmipp_mpi_ml_align2d \\\n") f.write("$MPI_HOME/bin/mpirun -np 8 \\\n") f.write(" $XMIPP_HOME/bin/xmipp_mpi_ml_align2d -save_memB \\\n") f.write(" "+xmippopts+"\n") f.write("\n") f.write("tar zcf "+results+" *.???\n") f.write("\n") f.write("exit\n") f.close() query = ( " UPDATE ApMaxLikeJobData " +" SET `finished` = '1' " +" WHERE `DEF_id` = '"+str(self.params['maxlikejobid'])+"'" +"\n" ) f = open("readyupload.sql", "w") f.write(query) f.close() apDisplay.printMsg("mysql -u usr_object -h database_host ap"+str(self.params['projectid'])+" < readyupload.sql") apDisplay.printMsg("tar cf particles.tar partlist.sel partfiles/") apDisplay.printMsg("rsync -vaP "+jobfile+" cluster:"+rundir+"/") apDisplay.printMsg("rsync -vaP particles.tar cluster:"+rundir+"/") apDisplay.printColor("ready to run job on cluster", "cyan") sys.exit(1)
def start(self): if self.params['method'] == 'any': ### startAny uses class averages clusterstack, numimages = self.getClusterStack() else: ### starticos, startoct, startcsym uses individual particles clusterstack, numimages = self.getClusterParticles() if self.params['method'] != 'any': if self.params['numkeep'] is not None and numimages/10 < int(self.params['numkeep']): apDisplay.printWarning("particle number of "+ self.params['numkeep'] + " is greater than 10% of the number of selected classes") elif self.params['numkeep'] is None: self.params['numkeep'] = int(math.floor(numimages/20.0))+1 apDisplay.printWarning("numkeep was not defined, using %d particles"%(self.params['numkeep'])) nproc = apParam.getNumProcessors() #construct command for each of the EMAN commonline method if self.params['method'] == 'any': startcmd = "startAny "+clusterstack+" proc="+str(nproc) startcmd +=" sym="+self.symmdata['eman_name'] if self.params['mask'] is not None: startcmd +=" mask="+str(self.params['mask']) else: maskrad = math.floor(self.clusterstackdata['clusterrun']['boxsize']/2.0) startcmd +=" mask=%d"%(maskrad) if self.params['rounds'] is not None: startcmd +=" rounds="+str(self.params['rounds']) elif self.params['method'] == 'csym': startcmd = "startcsym "+clusterstack+" " startcmd +=" "+str(self.params['numkeep']) startcmd +=" sym="+self.symmdata['eman_name'] if self.params['imask'] is not None: startcmd +=" imask="+self.params['imask'] elif self.params['method'] == 'oct': startcmd = "startoct "+clusterstack+" " startcmd +=" "+str(self.params['numkeep']) elif self.params['method'] == 'icos': startcmd = "starticos "+clusterstack+" " startcmd +=" "+str(self.params['numkeep']) if self.params['imask'] is not None: startcmd +=" imask="+self.params['imask'] apDisplay.printMsg("Creating 3D model with EMAN function: start"+self.params['method']) apFile.removeFile("threed.0a.mrc") apFile.removeFile("eman.log") apEMAN.executeEmanCmd(startcmd, verbose=False, logfile="eman.log") #apEMAN.executeEmanCmd(startcmd, verbose=True) finalmodelname = "threed-%s-eman_start%s.mrc"%(self.timestamp, self.params['method']) finalmodelpath = os.path.join(self.params['rundir'], finalmodelname) apDisplay.printMsg("Final model name: "+finalmodelname) finalmodel = "threed.0a.mrc" if os.path.isfile(finalmodel): emancmd = "proc3d %s %s norm=0,1 origin=0,0,0"%(finalmodel, finalmodelpath) #shutil.move(finalmodel, finalmodelpath) apEMAN.executeEmanCmd(emancmd, verbose=True) if not apVolume.isValidVolume(finalmodelpath): apDisplay.printError("Created volume is not valid") else: apDisplay.printError("No 3d model was created") ### upload it self.uploadDensity(finalmodelpath) ### chimera imaging apChimera.renderSnapshots(finalmodelpath, contour=self.params['contour'], zoom=self.params['zoom'], sym=self.symmdata['eman_name']) apChimera.renderAnimation(finalmodelpath, contour=self.params['contour'], zoom=self.params['zoom'], sym=self.symmdata['eman_name']) ### remove stack if apFile.stackSize(clusterstack)/1024**2 > 10: ### file bigger than 10MB apFile.removeStack(clusterstack) apFile.removeFile("threed.0a.mrc")