def readSetOfCoordinates(workDir, micSet, coordSet, invertY=False, newBoxer=False): """ Read from Eman .json files. Params: workDir: where the Eman boxer output files are located. micSet: the SetOfMicrographs to associate the .json, which name should be the same of the micrographs. coordSet: the SetOfCoordinates that will be populated. """ if newBoxer: # read boxSize from info/project.json jsonFnbase = pwutils.join(workDir, 'info', 'project.json') jsonBoxDict = loadJson(jsonFnbase) size = int(jsonBoxDict["global.boxsize"]) else: # read boxSize from e2boxercache/base.json jsonFnbase = pwutils.join(workDir, 'e2boxercache', 'base.json') jsonBoxDict = loadJson(jsonFnbase) size = int(jsonBoxDict["box_size"]) jsonFninfo = pwutils.join(workDir, 'info/') for mic in micSet: micBase = pwutils.removeBaseExt(mic.getFileName()) micPosFn = ''.join(glob.glob(jsonFninfo + '*' + micBase + '_info.json')) readCoordinates(mic, micPosFn, coordSet, invertY) coordSet.setBoxSize(size)
def getBoxSize(self, coordFile): """ Try to infer the box size from the given coordinate file. In the case of .box files, the size is the 3rd column In the case of .json files, we will look for file e2boxercache/base.json """ if coordFile.endswith('.box'): md = MetaData() md.readPlain(coordFile, "xcoor ycoor particleSize") return md.getValue(MDL_PICKING_PARTICLE_SIZE, md.firstObject()) elif coordFile.endswith('.json'): infoDir = os.path.dirname(coordFile) # Still go one level up of info dir jsonBase = pwutils.join(os.path.dirname(infoDir), 'e2boxercache', 'base.json') jsonBase2 = pwutils.join(infoDir, 'project.json') if pwutils.exists(jsonBase): jsonDict = loadJson(jsonBase) if 'box_size' in jsonDict: return int(jsonDict["box_size"]) elif pwutils.exists(jsonBase2): jsonDict = loadJson(jsonBase2) if 'global.boxsize' in jsonDict: return int(jsonDict["global.boxsize"]) return None
def writeSetOfCoordinatesXmipp(posDir, coordSet, ismanual=True, scale=1): """ Write a pos file on metadata format for each micrograph on the coordSet. Params: posDir: the directory where the .pos files will be written. coordSet: the SetOfCoordinates that will be read.""" boxSize = coordSet.getBoxSize() or 100 state = 'Manual' if ismanual else 'Supervised' # Create a dictionary with the pos filenames for each micrograph posDict = {} for mic in coordSet.iterMicrographs(): micIndex, micFileName = mic.getLocation() micName = os.path.basename(micFileName) if micIndex != NO_INDEX: micName = '%06d_at_%s' % (micIndex, micName) posFn = pwutils.join(posDir, pwutils.replaceBaseExt(micName, "pos")) posDict[mic.getObjId()] = posFn f = None lastMicId = None c = 0 for coord in coordSet.iterItems(orderBy='_micId'): micId = coord.getMicId() if micId != lastMicId: # we need to close previous opened file if f: f.close() c = 0 f = openMd(posDict[micId], state) lastMicId = micId c += 1 if scale != 1: x = coord.getX() * scale y = coord.getY() * scale else: x = coord.getX() y = coord.getY() f.write(" %06d 1 %d %d %d %06d\n" % (coord.getObjId(), x, y, 1, micId)) if f: f.close() # Write config.xmd metadata configFn = pwutils.join(posDir, 'config.xmd') writeCoordsConfig(configFn, int(boxSize), state) return posDict.values()
def _renameFiles(self, pattern1, pattern2): # find files by pattern1, move and rename them by replacing pattern2 filesList = sorted(glob(self._getPath(pattern1))) for fn in filesList: oldFn = os.path.basename(fn) newFn = pwutils.join(self._getExtraPath('shiny'), oldFn.replace(pattern2, '')) pwutils.moveFile(fn, newFn)
def readSetOfCoordinates(outputDir, micSet, coordSet): """ Read from Bsoft .star files. Params: outputDir: the directory where the .star files are. micSet: the SetOfMicrographs to associate the .star, which name should be the same of the micrographs. coordSet: the SetOfCoordinates that will be populated. """ boxSize = 100 for mic in micSet: outputFile = join(outputDir, replaceBaseExt(mic.getFileName(), 'star')) if exists(outputFile): posMd = md.MetaData(outputFile) for objId in posMd: coordRow = rowFromMd(posMd, objId) coord = rowToCoordinate(coordRow) boxSize = 2 * float(coordRow.getValue("particle.x_origin", 50)) coord.setMicrograph(mic) coord.setX(coord.getX()) coord.setY(coord.getY()) coordSet.append(coord) # Add an unique ID that will be propagated to particles # posMd.setValue(md.MDL_PARTICLE_ID, long(coord.getObjId()), objId) # reading origin.x value and converting to particle # size, can change, we take last value coordSet.setBoxSize(boxSize)
def getOutputName(self, fn, key): """ Give a key, append the mrc extension and prefix the protocol working dir. """ template = pwutils.removeBaseExt(fn) + key + '.mrc' return pwutils.join(self.getMicrographsDir(), template)
def _getProgram(self): """ Return the program binary that will be used. """ binary = os.environ['GCTF'] program = pwutils.join(os.environ['GCTF_HOME'], 'bin', basename(binary)) return program
def organizeDataStep(self): from convert import relionToLocation, locationToRelion if getVersion() == V1_3: mdColumn = md.RLN_PARTICLE_NAME else: mdColumn = md.RLN_PARTICLE_ORI_NAME shinyStar = self._getFileName('shiny') newDir = self._getExtraPath('polished_particles') pwutils.makePath(newDir) if not isVersion2(): pwutils.makePath(self._getExtraPath('shiny')) shinyOld = "shiny.star" inputFit = "movie_particles_shiny.star" try: pwutils.moveFile(shinyOld, shinyStar) pwutils.moveFile( self._getPath(inputFit), self._getExtraPath("shiny/all_movies_input_fit.star")) for half in self.PREFIXES: pwutils.moveFile( self._getPath( 'movie_particles_shiny_%sclass001_unfil.mrc' % half), self._getExtraPath('shiny/shiny_%sclass001_unfil.mrc' % half)) self._renameFiles('movie_particles_shiny_post*', 'movie_particles_') self._renameFiles('movie_particles_shiny*', 'movie_particles_shiny_') except: raise Exception('ERROR: some file(s) were not found!') # move polished particles from Tmp to Extra path # and restore previous mdColumn mdShiny = md.MetaData(shinyStar) oldPath = "" for objId in mdShiny: index, imgPath = relionToLocation( mdShiny.getValue(md.RLN_IMAGE_NAME, objId)) newPath = pwutils.join(newDir, str(imgPath).split('/')[-1]) newLoc = locationToRelion(index, newPath) mdShiny.setValue(md.RLN_IMAGE_NAME, newLoc, objId) if oldPath != imgPath and exists(imgPath): pwutils.moveFile(imgPath, newPath) oldPath = imgPath index2, imgPath2 = relionToLocation( mdShiny.getValue(mdColumn, objId)) absPath = os.path.realpath(imgPath2) newPath2 = 'Runs' + str(absPath).split('Runs')[1] newLoc2 = locationToRelion(index2, newPath2) mdShiny.setValue(mdColumn, newLoc2, objId) mdShiny.write(shinyStar, md.MD_OVERWRITE) pwutils.cleanPath(self._getExtraPath('shiny/Runs'))
def _summary(self): summary = [] fname = os.path.abspath( join(self._getPath('emxData'), self.outputPrefix.get() + '.emx')) if exists(fname): summary.append('Exported %s to %s' % (self._exportTypes[self.inputType.get()], fname)) return summary
def readSetOfCoordinates(workDir, micSet, coordSet): """ Read coordinates from cisTEM .plt files. :param workDir: input folder with coord files :param micSet: input set of mics :param coordSet: output set of coords """ for mic in micSet: micCoordFn = join(workDir, replaceBaseExt(mic.getFileName(), 'plt')) readCoordinates(mic, micCoordFn, coordSet)
def getProgram(cls, mpi=False): if mpi: program = os.path.basename(cls.getVar(SPIDER_MPI)) else: program = os.path.basename(cls.getVar(SPIDER)) cmd = abspath(join(cls.getEnviron()[SPBIN_DIR], program)) return str(cmd)
def _createFilenameTemplates(self): """ Centralize how files are called for iterations and references. """ myDict = { 'inputMovies': 'input_movies.star', 'inputParts': 'input_particles.star', 'outputDir': pwutils.join('extra', 'output'), 'outputParts': self._getExtraPath('output/movie_particles.star') } self._updateFilenamesDict(myDict)
def _moveCoordsToInfo(self, tomo): fnCoor = '*%s_info.json' % pwutils.removeBaseExt(tomo.getFileName().split("__")[0]) pattern = os.path.join(self.path, fnCoor) files = glob.glob(pattern) if files: infoDir = pwutils.join(os.path.abspath(self.path), 'info') pathCoor = os.path.join(infoDir, os.path.basename(files[0])) pwutils.makePath(infoDir) copyFile(files[0], pathCoor)
def readSetOfParticles(lstFile, partSet, copyOrLink, direc): for index, fn in iterLstFile(lstFile): item = Particle() # set full path to particles stack file abspath = os.path.abspath(lstFile) fn = abspath.replace('sets/%s' % os.path.basename(lstFile), '') + fn newFn = pwutils.join(direc, os.path.basename(fn)) if not pwutils.exists(newFn): copyOrLink(fn, newFn) item.setLocation(index, newFn) partSet.append(item)
def _convertCoords(self, micSet, tmpDir, coordsType): """ Link specified coord set to tmpDir folder and convert it to .pos files""" coordTypes = { 'autopick': 'coordinates.sqlite', 'rejected': 'coordinates_rejected.sqlite' } coordsFnIn = self.protocol._getPath(coordTypes[coordsType]) coordsFnOut = pwutils.join(tmpDir, 'coordinates.sqlite') pwutils.createLink(coordsFnIn, coordsFnOut) coordSet = SetOfCoordinates(filename=coordsFnOut) coordSet.setMicrographs(micSet) from .convert import writeSetOfCoordinatesXmipp writeSetOfCoordinatesXmipp(tmpDir, coordSet, ismanual=False)
def readSetOfCoordinates(workDir, micSet, coordSet): """ Read from Appion .txt files. It is expected a file named: base.txt under the workDir. Params: workDir: where the Appion dogpicker output files are located. micSet: the SetOfMicrographs to associate the .txt, which name should be the same of the micrographs. coordSet: the SetOfCoordinates that will be populated. """ for mic in micSet: micCoordFn = join(workDir, replaceBaseExt(mic.getFileName(), 'txt')) readCoordinates(mic, micCoordFn, coordSet)
def _copyFiles(protocol, rpath): """ Copy all required files for protocol to run in a remote execution host. NOTE: this function should always be execute with the current working dir pointing to the project dir. And the remotePath is assumed to be in protocol.getHostConfig().getHostPath() Params: protocol: protocol to copy files ssh: an ssh connection to copy the files. """ remotePath = protocol.getHostConfig().getHostPath() for f in protocol.getFiles(): remoteFile = join(remotePath, f) rpath.putFile(f, remoteFile)
def organizeDataStep(self): from convert import relionToLocation, locationToRelion if getVersion() == V1_3: mdColumn = md.RLN_PARTICLE_NAME else: mdColumn = md.RLN_PARTICLE_ORI_NAME shinyStar = self._getFileName('shiny') newDir = self._getExtraPath('polished_particles') pwutils.makePath(newDir) if not isVersion2(): pwutils.makePath(self._getExtraPath('shiny')) shinyOld = "shiny.star" inputFit = "movie_particles_shiny.star" try: pwutils.moveFile(shinyOld, shinyStar) pwutils.moveFile(self._getPath(inputFit), self._getExtraPath("shiny/all_movies_input_fit.star")) for half in self.PREFIXES: pwutils.moveFile(self._getPath('movie_particles_shiny_%sclass001_unfil.mrc' % half), self._getExtraPath('shiny/shiny_%sclass001_unfil.mrc' % half)) self._renameFiles('movie_particles_shiny_post*', 'movie_particles_') self._renameFiles('movie_particles_shiny*', 'movie_particles_shiny_') except: raise Exception('ERROR: some file(s) were not found!') # move polished particles from Tmp to Extra path # and restore previous mdColumn mdShiny = md.MetaData(shinyStar) oldPath = "" for objId in mdShiny: index, imgPath = relionToLocation(mdShiny.getValue(md.RLN_IMAGE_NAME, objId)) newPath = pwutils.join(newDir, str(imgPath).split('/')[-1]) newLoc = locationToRelion(index, newPath) mdShiny.setValue(md.RLN_IMAGE_NAME, newLoc, objId) if oldPath != imgPath and exists(imgPath): pwutils.moveFile(imgPath, newPath) oldPath = imgPath index2, imgPath2 = relionToLocation(mdShiny.getValue(mdColumn, objId)) absPath = os.path.realpath(imgPath2) newPath2 = 'Runs' + str(absPath).split('Runs')[1] newLoc2 = locationToRelion(index2, newPath2) mdShiny.setValue(mdColumn, newLoc2, objId) mdShiny.write(shinyStar, md.MD_OVERWRITE) pwutils.cleanPath(self._getExtraPath('shiny/Runs'))
def runTemplate(inputScript, paramsDict, log=None, cwd=None): """ This function will create a valid Imagic script by copying the template and replacing the values in dictionary. After the new file is read, the Imagic interpreter is invoked. Usually the execution should be done where the results will be left. """ outputScript = replaceBaseExt(inputScript, 'b') if cwd is not None: outputScript = join(cwd, outputScript) # First write the script from the template with the substitutions writeScript(inputScript, outputScript, paramsDict) # Then proceed to run the script runScript(outputScript, log, cwd)
def convertBinaryVol(vol, outputDir): """ Convert binary volume to a format read by Relion. Params: vol: input volume object to be converted. outputDir: where to put the converted file(s) Return: new file name of the volume (converted or not). """ ih = ImageHandler() fn = vol.getFileName() if not fn.endswith('.mrc'): newFn = pwutils.join(outputDir, pwutils.replaceBaseExt(fn, 'mrc')) ih.convert(fn, newFn) return newFn return fn
def createOutputStep(self): # Create a Set of 3D Coordinates per class coord3DSetDict = dict() coord3DMap = dict() suffix = self._getOutputSuffix(SetOfCoordinates3D) coord3DSet = self._createSetOfCoordinates3D(self.inputSet.get(), suffix) coord3DSet.setBoxSize(self.box) coord3DSet.setName("tomoCoord") coord3DSet.setPrecedents(self.inputSet.get()) coord3DSet.setSamplingRate(self.inputSet.get().getSamplingRate()) for tomo in self.inputSet.get(): inputTomo = tomo.clone() tomoName = os.path.basename(tomo.getFileName()) tomoName = os.path.splitext(tomoName)[0] jsonFnbase = pwutils.join(self._getExtraPath(), '%s_info.json' % tomoName) jsonBoxDict = loadJson(jsonFnbase) for key, classItem in jsonBoxDict["class_list"].items(): index = int(key) coord3DSetDict[index] = coord3DSet name = self.OUTPUT_PREFIX + suffix coord3DMap[index] = name args = dict() args[name] = coord3DSet # Populate Set of 3D Coordinates with 3D Coordinates readSetOfCoordinates3D(jsonBoxDict, coord3DSetDict, inputTomo, self.correctOffset, groupId=self.groupId.get()) self._defineOutputs(**args) self._defineSourceRelation(self.inputSet.get(), coord3DSet) # Update Outputs for index, coord3DSet in coord3DSetDict.items(): coord3DSet.setObjComment(self.getSummary(coord3DSet)) self._updateOutputSet(coord3DMap[index], coord3DSet, state=coord3DSet.STREAM_CLOSED)
def writeSetOfCoordinates(coordDir, coordSet, micsSet): """ Write a star file on metadata format for each micrograph on the coordSet. Params: coordDir: the directory where the .star files will be written. coordSet: the SetOfCoordinates that will be read. micsSet: the SetOfMicrographs that will be read. """ header = """ data_ loop_ _rlnCoordinateX #1 _rlnCoordinateY #2 """ # Create a dictionary with the pos filenames for each micrograph posDict = {} for mic in micsSet: micBase = pwutils.removeBaseExt(mic.getFileName()) posDict[mic.getObjId()] = pwutils.join(coordDir, micBase, micBase + '_coords.star') f = None lastMicId = None # Iterate only once over the whole SetOfCoordinates, but ordering by # micrograph Id, so we can detect when there are coordinates from a # new micrographs to write the new star file for coord in coordSet.iterItems(orderBy='_micId'): micId = coord.getMicId() if micId != lastMicId: # Detect there is a new micrograph if f: # we need to close previous opened file f.close() f = open(posDict[micId], 'w') f.write(header) lastMicId = micId f.write("%d %d\n" % coord.getPosition()) if f: f.close()
def createCtfModelStep(self): inputSet = self.inputParticles.get() partSet = self._createSetOfParticles() partSet.copyInfo(inputSet) for particle in inputSet: coord = particle.getCoordinate() if coord is None: continue x, y = coord.getPosition() if self.applyShifts: shifts = getShifts(particle.getTransform(), self.alignType) xCoor, yCoor = x - int(shifts[0]), y - int(shifts[1]) xNew, yNew = (xCoor * self.scale, yCoor * self.scale) else: xNew, yNew = (x * self.scale, y * self.scale) micBase = pwutils.removeBaseExt(coord.getMicName()) for key in self.matchingMics: micKey = pwutils.removeBaseExt(key.getFileName()) if micBase in micKey: # micName from mic and micName from coord may be different ctfFn = pwutils.join(self._getExtraPath(micKey), micKey + '_local.star') if pwutils.exists(ctfFn): mdFn = md.MetaData(ctfFn) for row in md.iterRows(mdFn): coordX = row.getValue(md.RLN_IMAGE_COORD_X) coordY = row.getValue(md.RLN_IMAGE_COORD_Y) if (int(xNew), int(yNew)) == (coordX, coordY): newPart = particle.clone() rowToCtfModel(row, newPart.getCTF()) partSet.append(newPart) self._defineOutputs(outputParticles=partSet) self._defineTransformRelation(inputSet, partSet)
def jsonFilesFromSet(setScipion, path): json_files = [] if isinstance(setScipion, SetOfTomograms): tomo_files = [] for file in setScipion.getFiles(): fileBasename = pwutils.removeBaseExt(file) if "__" in fileBasename: fnInputCoor = '%s_info.json' % fileBasename.split("__")[0] else: parentFolder = pwutils.removeBaseExt(os.path.dirname(file)) fnInputCoor = '%s-%s_info.json' % (parentFolder, fileBasename) pathInputCoor = pwutils.join(path, fnInputCoor) json_files.append(pathInputCoor) tomo_files.append(file) return json_files, tomo_files elif isinstance(setScipion, SetOfTiltSeries): tlt_files = [] for tilt_serie in setScipion.iterItems(iterate=False): json_file = os.path.join(path, os.path.basename(os.path.dirname(tilt_serie.getFirstItem().getFileName())) + '-' + tilt_serie.getTsId() + '_info.json') json_files.append(json_file) tlt_files.append(tilt_serie.getFirstItem().getFileName()) return json_files, tlt_files
def convertMask(img, outputPath, newPix=None, newDim=None): """ Convert mask to mrc format read by Relion. Params: img: input image to be converted. outputPath: it can be either a directory or a file path. If it is a directory, the output name will be inferred from input and put into that directory. If it is not a directory, it is assumed is the output filename. newPix: output pixel size (equals input if None) newDim: output box size Return: new file name of the mask. """ index, filename = img.getLocation() imgFn = locationToRelion(index, filename) inPix = img.getSamplingRate() outPix = inPix if newPix is None else newPix if os.path.isdir(outputPath): outFn = pwutils.join(outputPath, pwutils.replaceBaseExt(imgFn, 'mrc')) else: outFn = outputPath params = '--i %s --o %s --angpix %0.5f --rescale_angpix %0.5f' % ( imgFn, outFn, inPix, outPix) if newDim is not None: params += ' --new_box %d' % newDim params += ' --threshold_above 1 --threshold_below 0' pwutils.runJob(None, 'relion_image_handler', params, env=Plugin.getEnviron()) return outFn
def significantStep(self, iterNumber, alpha): iterDir = self._getTmpPath('iter%03d' % iterNumber) makePath(iterDir) prevVolFn = self.getIterVolume(iterNumber - 1) volFn = self.getIterVolume(iterNumber) anglesFn = self._getExtraPath('angles_iter%03d.xmd' % iterNumber) t = Timer() t.tic() if self.useGpu.get() and iterNumber > 1: # Generate projections fnGalleryRoot = join(iterDir, "gallery") args = "-i %s -o %s.stk --sampling_rate %f --sym %s " \ "--compute_neighbors --angular_distance -1 " \ "--experimental_images %s --min_tilt_angle %f " \ "--max_tilt_angle %f -v 0 --perturb %f " % \ (prevVolFn, fnGalleryRoot, self.angularSampling.get(), self.symmetryGroup, self.imgsFn, self.minTilt, self.maxTilt, math.sin(self.angularSampling.get()) / 4) self.runJob("xmipp_angular_project_library ", args, numberOfMpi=1) if self.trueSymsNo != 0: alphaApply = (alpha * self.trueSymsNo) / 2 else: alphaApply = alpha / 2 from pwem.emlib.metadata import getSize N = int(getSize(fnGalleryRoot + '.doc') * alphaApply * 2) count = 0 GpuListCuda = '' if self.useQueueForSteps() or self.useQueue(): GpuList = os.environ["CUDA_VISIBLE_DEVICES"] GpuList = GpuList.split(",") for elem in GpuList: GpuListCuda = GpuListCuda + str(count) + ' ' count += 1 else: GpuList = ' '.join([str(elem) for elem in self.getGpuList()]) GpuListAux = '' for elem in self.getGpuList(): GpuListCuda = GpuListCuda + str(count) + ' ' GpuListAux = GpuListAux + str(elem) + ',' count += 1 os.environ["CUDA_VISIBLE_DEVICES"] = GpuListAux args = '-i %s -r %s.doc -o %s --keepBestN %f --dev %s ' % \ (self.imgsFn, fnGalleryRoot, anglesFn, N, GpuListCuda) self.runJob(CUDA_ALIGN_SIGNIFICANT, args, numberOfMpi=1) cleanPattern(fnGalleryRoot + "*") else: args = self.getSignificantArgs(self.imgsFn) args += ' --odir %s' % iterDir args += ' --alpha0 %f --alphaF %f' % (alpha, alpha) args += ' --dontCheckMirrors ' if iterNumber == 1: if self.thereisRefVolume: args += " --initvolumes " + \ self._getExtraPath('input_volumes.xmd') else: args += " --numberOfVolumes 1" else: args += " --initvolumes %s" % prevVolFn self.runJob("xmipp_reconstruct_significant", args) moveFile(os.path.join(iterDir, 'angles_iter001_00.xmd'), anglesFn) t.toc('Significant took: ') reconsArgs = ' -i %s --fast' % anglesFn reconsArgs += ' -o %s' % volFn reconsArgs += ' --weight -v 0 --sym %s ' % self.symmetryGroup print("Number of images for reconstruction: ", metadata.getSize(anglesFn)) t.tic() if self.useGpu.get(): cudaReconsArgs = reconsArgs #AJ to make it work with and without queue system if self.numberOfMpi.get() > 1: N_GPUs = len((self.gpuList.get()).split(',')) cudaReconsArgs += ' -gpusPerNode %d' % N_GPUs cudaReconsArgs += ' -threadsPerGPU %d' % max( self.numberOfThreads.get(), 4) count = 0 GpuListCuda = '' if self.useQueueForSteps() or self.useQueue(): GpuList = os.environ["CUDA_VISIBLE_DEVICES"] GpuList = GpuList.split(",") for elem in GpuList: GpuListCuda = GpuListCuda + str(count) + ' ' count += 1 else: GpuListAux = '' for elem in self.getGpuList(): GpuListCuda = GpuListCuda + str(count) + ' ' GpuListAux = GpuListAux + str(elem) + ',' count += 1 os.environ["CUDA_VISIBLE_DEVICES"] = GpuListAux cudaReconsArgs += ' --thr %s' % self.numberOfThreads.get() if self.numberOfMpi.get() == 1: cudaReconsArgs += ' --device %s' % (GpuListCuda) if self.numberOfMpi.get() > 1: self.runJob('xmipp_cuda_reconstruct_fourier', cudaReconsArgs, numberOfMpi=len( (self.gpuList.get()).split(',')) + 1) else: self.runJob('xmipp_cuda_reconstruct_fourier', cudaReconsArgs) else: self.runJob("xmipp_reconstruct_fourier_accel", reconsArgs) t.toc('Reconstruct fourier took: ') # Center the volume fnSym = self._getExtraPath('volumeSym_%03d.vol' % iterNumber) self.runJob("xmipp_transform_mirror", "-i %s -o %s --flipX" % (volFn, fnSym), numberOfMpi=1) self.runJob("xmipp_transform_mirror", "-i %s --flipY" % fnSym, numberOfMpi=1) self.runJob("xmipp_transform_mirror", "-i %s --flipZ" % fnSym, numberOfMpi=1) self.runJob("xmipp_image_operate", "-i %s --plus %s" % (fnSym, volFn), numberOfMpi=1) self.runJob("xmipp_volume_align", '--i1 %s --i2 %s --local --apply' % (fnSym, volFn), numberOfMpi=1) cleanPath(fnSym) # To mask the volume xdim = self.inputSet.get().getDimensions()[0] maskArgs = "-i %s --mask circular %d -v 0" % (volFn, -xdim / 2) self.runJob('xmipp_transform_mask', maskArgs, numberOfMpi=1) # TODO mask the final volume in some smart way... # To filter the volume if self.useMaxRes: self.runJob('xmipp_transform_filter', '-i %s --fourier low_pass %f --sampling %f' % \ (volFn, self.maxResolution.get(), self.TsCurrent), numberOfMpi=1) if not self.keepIntermediate: cleanPath(prevVolFn, iterDir) if self.thereisRefVolume: cleanPath(self._getExtraPath('filteredVolume.vol'))
def convertInputStep(self): inputParticles = self.inputParticles.get() firstCoord = inputParticles.getFirstItem().getCoordinate() self.hasMicName = firstCoord.getMicName() is not None inputMics = self._getMicrographs() self.alignType = inputParticles.getAlignment() self.downFactor = self.ctfDownFactor.get() # create a tmp set for matching mics self.matchingMics = self._createSetOfMicrographs(suffix='_tmp') self.matchingMics.copyInfo(inputMics) if self.downFactor != 1.: self.matchingMics.setDownsample(self.downFactor) # create a tmp set for coords coords = self._createSetOfCoordinates(inputMics, suffix='_tmp') newCoord = Coordinate() self.scale = inputParticles.getSamplingRate() / inputMics.getSamplingRate() if self.scale != 1.0: print "Scaling coordinates by a factor *%0.2f*" % self.scale # Create the micrograph dicts micDict = {} # dict with micName or micId micBaseDict = {} # dict with micName (just basename) micKey2 = None insertedMics = {} for mic in inputMics: if self.hasMicName: micKey = mic.getMicName() micKey2 = pwutils.removeBaseExt(micKey) else: micKey = mic.getObjId() if micKey in micDict: print ">>> ERROR: micrograph key %s is duplicated!" % micKey print " Used in micrographs:" print " - %s" % micDict[micKey].getLocation() print " - %s" % mic.getLocation() raise Exception("Micrograph key %s is duplicated!" % micKey) micDict[micKey] = mic.clone() if self.hasMicName: micBaseDict[micKey2] = mic.clone() # match the mic from coord with micDict for particle in inputParticles: coord = particle.getCoordinate() or None if coord is None: print "Skipping particle, coordinates not found" continue if self.hasMicName: micKey = coord.getMicName() micKey2 = pwutils.removeBaseExt(micKey) else: micKey = coord.getMicId() # find the mapping by micName (with or without ext) or micId mic = micDict.get(micKey, None) or micBaseDict.get(micKey2, None) if mic is None: print "Skipping particle, key %s not found" % micKey else: newCoord.copyObjId(particle) x, y = coord.getPosition() if self.applyShifts: shifts = getShifts(particle.getTransform(), self.alignType) xCoor, yCoor = x - int(shifts[0]), y - int(shifts[1]) newCoord.setPosition(xCoor * self.scale, yCoor * self.scale) else: newCoord.setPosition(x * self.scale, y * self.scale) newCoord.setMicrograph(mic) coords.append(newCoord) if mic.getObjId() not in insertedMics: insertedMics[mic.getObjId()] = mic self.matchingMics.append(mic) ih = em.ImageHandler() # We convert matching micrographs if they are not *.mrc for mic in self.matchingMics: # Create micrograph dir micName = mic.getFileName() micDir = self._getTmpPath(pwutils.removeBaseExt(micName)) pwutils.makePath(micDir) outMic = pwutils.join(micDir, pwutils.replaceBaseExt(micName, 'mrc')) if self.downFactor != 1.: ih.scaleFourier(micName, outMic, self.downFactor) sps = inputMics.getScannedPixelSize() * self.downFactor self._params['scannedPixelSize'] = sps else: if micName.endswith('.mrc'): pwutils.createLink(micName, outMic) else: ih.convert(micName, outMic) # Write out coordinate files and sets writeSetOfCoordinates(self._getTmpPath(), coords, self.matchingMics) coords.clear() pwutils.cleanPath(coords.getFileName()) self.matchingMics.write() self.matchingMics.close()
def convertInputStep(self, classesFn): inputSet = self.inputSet.get() if isinstance(inputSet, SetOfClasses2D): writeSetOfClasses2D(inputSet, classesFn, writeParticles=False) else: writeSetOfParticles(inputSet, classesFn) # To re-sample input images fnDir = self._getExtraPath() fnNewParticles = join(fnDir, "input_classes.stk") TsOrig = self.inputSet.get().getSamplingRate() TsRefVol = -1 if self.thereisRefVolume: TsRefVol = self.refVolume.get().getSamplingRate() if self.useMaxRes: self.TsCurrent = max([TsOrig, self.maxResolution.get(), TsRefVol]) self.TsCurrent = self.TsCurrent / 3 Xdim = self.inputSet.get().getDimensions()[0] self.newXdim = int(round(Xdim * TsOrig / self.TsCurrent)) if self.newXdim < 40: self.newXdim = int(40) self.TsCurrent = float(TsOrig) * (float(Xdim) / float(self.newXdim)) if self.newXdim != Xdim: self.runJob("xmipp_image_resize", "-i %s -o %s --fourier %d" % (self.imgsFn, fnNewParticles, self.newXdim), numberOfMpi=self.numberOfMpi.get() * self.numberOfThreads.get()) else: self.runJob("xmipp_image_convert", "-i %s -o %s " "--save_metadata_stack %s" % (self.imgsFn, fnNewParticles, join(fnDir, "input_classes.xmd")), numberOfMpi=1) # To resample the refVolume if exists with the newXdim calculated # previously if self.thereisRefVolume: fnFilVol = self._getExtraPath('filteredVolume.vol') self.runJob("xmipp_image_convert", "-i %s -o %s -t vol" % (self.refVolume.get().getFileName(), fnFilVol), numberOfMpi=1) # TsVol = self.refVolume.get().getSamplingRate() if self.useMaxRes: if self.newXdim != Xdim: self.runJob('xmipp_image_resize', "-i %s --fourier %d" % (fnFilVol, self.newXdim), numberOfMpi=1) self.runJob('xmipp_transform_window', "-i %s --size %d" % (fnFilVol, self.newXdim), numberOfMpi=1) args = "-i %s --fourier low_pass %f --sampling %f " % ( fnFilVol, self.maxResolution.get(), self.TsCurrent) self.runJob("xmipp_transform_filter", args, numberOfMpi=1) if not self.useMaxRes: inputVolume = self.refVolume.get() else: inputVolume = Volume(fnFilVol) inputVolume.setSamplingRate(self.TsCurrent) inputVolume.setObjId(self.refVolume.get().getObjId()) fnVolumes = self._getExtraPath('input_volumes.xmd') row = metadata.Row() volumeToRow(inputVolume, row, alignType=ALIGN_NONE) md = emlib.MetaData() row.writeToMd(md, md.addObject()) md.write(fnVolumes)
def testFile(filename): return pwutils.join("pythoninterface", filename)
def refineCtfStep(self): self._defineValues() self._prepareCommand() for mic in self.matchingMics: micName = mic.getFileName() micBase = pwutils.removeBaseExt(micName) micDirTmp = self._getTmpPath(pwutils.removeBaseExt(micName)) outMic = pwutils.join(micDirTmp, pwutils.replaceBaseExt(micName, 'mrc')) micFnCtf = pwutils.join(micDirTmp, micBase + '.ctf') micFnOut = self._getCtfOutPath(micDirTmp) micFnCtfFit = pwutils.join(micDirTmp, micBase + '_EPA.log') micFnLocalCtf = pwutils.join(micDirTmp, micBase + '_local.star') # Update _params dictionary self._params['micFn'] = outMic self._params['gctfOut'] = micFnOut if self.useInputCtf and self.ctfRelations.get(): # get input CTFs from a mic ctfs = self.ctfRelations.get() micKey = mic.getMicName() if self.hasMicName else mic.getObjId() for ctf in ctfs: ctfMicName = ctf.getMicrograph().getMicName() ctfMicId = ctf.getMicrograph().getObjId() if micKey == ctfMicName or micKey == ctfMicId: # add CTF refine options self._params.update({'refine_input_ctf': 1, 'defU_init': ctf.getDefocusU(), 'defV_init': ctf.getDefocusV(), 'defA_init': ctf.getDefocusAngle(), 'B_init': self.bfactor.get() }) self._args += "--refine_input_ctf %d " % self._params['refine_input_ctf'] self._args += "--defU_init %f " % self._params['defU_init'] self._args += "--defV_init %f " % self._params['defV_init'] self._args += "--defA_init %f " % self._params['defA_init'] self._args += "--B_init %f " % self._params['B_init'] self._args += "--defU_err %f " % self.defUerr.get() self._args += "--defV_err %f " % self.defVerr.get() self._args += "--defA_err %f " % self.defAerr.get() self._args += "--B_err %f " % self.Berr.get() break # final args self._args += "--do_validation %d " % (1 if self.doValidate else 0) self._args += "%(micFn)s " self._args += "> %(gctfOut)s" try: self.runJob(self._getProgram(), self._args % self._params, env=self._getEnviron()) except: print("ERROR: Gctf has failed for micrograph %s" % outMic) # move results from tmp to extra folder micDir = self._getExtraPath(pwutils.removeBaseExt(micName)) pwutils.makePath(micDir) psdFile = self._getPsdPath(micDir) ctfOutFile = self._getCtfOutPath(micDir) ctffitFile = self._getCtfFitOutPath(micDir) ctflocalFile = self._getCtfLocalPath(micDir, micBase) pwutils.moveFile(micFnCtf, psdFile) pwutils.moveFile(micFnOut, ctfOutFile) pwutils.moveFile(micFnCtfFit, ctffitFile) pwutils.moveFile(micFnLocalCtf, ctflocalFile) # Let's clean the temporary micrographs pwutils.cleanPath(outMic) pwutils.cleanPath(micDirTmp) pwutils.cleanPath(self.matchingMics.getFileName()) pwutils.cleanPath(self.getProject().getPath('micrographs_all_gctf.star'))
def writeSetOfParticles(partSet, path, **kwargs): """ Convert the imgSet particles to .hdf files as expected by Eman. This function should be called from a current dir where the images in the set are available. """ ext = pwutils.getExt(partSet.getFirstItem().getFileName())[1:] if ext == 'hdf': # create links if input has hdf format for fn in partSet.getFiles(): newFn = pwutils.removeBaseExt(fn).split('__ctf')[0] + '.hdf' newFn = pwutils.join(path, newFn) pwutils.createLink(fn, newFn) print(" %s -> %s" % (fn, newFn)) else: firstCoord = partSet.getFirstItem().getCoordinate() or None hasMicName = False if firstCoord: hasMicName = firstCoord.getMicName() or False fileName = "" a = 0 proc = eman2.Plugin.createEmanProcess(args='write') for i, part in iterParticlesByMic(partSet): micName = micId = part.getMicId() if hasMicName: micName = pwutils.removeBaseExt( part.getCoordinate().getMicName()) objDict = part.getObjDict() if not micId: micId = 0 suffix = kwargs.get('suffix', '') if hasMicName and (micName != str(micId)): objDict['hdfFn'] = pwutils.join(path, "%s%s.hdf" % (micName, suffix)) else: objDict['hdfFn'] = pwutils.join( path, "mic_%06d%s.hdf" % (micId, suffix)) alignType = kwargs.get('alignType') if alignType != em.ALIGN_NONE: shift, angles = alignmentToRow(part.getTransform(), alignType) # json cannot encode arrays so I convert them to lists # json fail if has -0 as value objDict['_shifts'] = shift.tolist() objDict['_angles'] = angles.tolist() objDict['_itemId'] = part.getObjId() # the index in EMAN begins with 0 if fileName != objDict['_filename']: fileName = objDict['_filename'] if objDict['_index'] == 0: a = 0 else: a = 1 objDict['_index'] = int(objDict['_index'] - a) # Write the e2converter.py process from where to read the image print >> proc.stdin, json.dumps(objDict) proc.stdin.flush() proc.stdout.readline() proc.kill()
def _viewParam(self, param=None): micSet = self.protocol.getInputMicrographs() tmpDir = self.protocol._getTmpPath() pwutils.cleanPath(tmpDir) pwutils.makePath(tmpDir) # FIXME: (JMRT) We are always writing the SetOfCoordinates and removing # the tmpDir, we need to take into account if the user has picked # some particles in the tmpDir and has not saved them, that now he # will lose all picked particles. # A possible solution could be to alert that changes have not been # written during modification of tmpDir or create a new Xmipp picking # protocol to continue picking later without losing the coordinates. if micSet is None: raise Exception( 'visualize: SetOfCoordinates has no micrographs set.') micsFn = pwutils.join(tmpDir, micSet.getName() + '_micrographs.xmd') from .convert import writeSetOfMicrographs from pwem.viewers.showj import launchSupervisedPickerGUI writeSetOfMicrographs(micSet, micsFn) inTmpFolder = True view = [] if param == 'doShowAutopick': self._convertCoords(micSet, tmpDir, coordsType='autopick') launchSupervisedPickerGUI(micsFn, tmpDir, self.protocol, mode='review', inTmpFolder=inTmpFolder) elif param == 'doShowRejected': self._convertCoords(micSet, tmpDir, coordsType='rejected') launchSupervisedPickerGUI(micsFn, tmpDir, self.protocol, mode='review', inTmpFolder=inTmpFolder) elif param == 'doShowCC': fn = self.protocol._getPath('micrographs_ccmax.sqlite') view.append(ObjectView(self._project, self.protocol.strId(), fn)) return view elif param == 'doShowFilt': fn = self.protocol._getPath('micrographs_pref.sqlite') view.append(ObjectView(self._project, self.protocol.strId(), fn)) return view elif param == 'doShowBgEst': fn = self.protocol._getPath('micrographs_bg.sqlite') view.append(ObjectView(self._project, self.protocol.strId(), fn)) return view elif param == 'doShowBgSub': fn = self.protocol._getPath('micrographs_bgfree.sqlite') view.append(ObjectView(self._project, self.protocol.strId(), fn)) return view elif param == 'doShowSigma': fn = self.protocol._getPath('micrographs_lsigma.sqlite') view.append(ObjectView(self._project, self.protocol.strId(), fn)) return view elif param == 'doShowMask': fn = self.protocol._getPath('micrographs_mask.sqlite') view.append(ObjectView(self._project, self.protocol.strId(), fn)) return view
def writeFile(): fnInputCoor = getMeshVolFileName(currentVolId) pathInputCoor = pwutils.join(path, fnInputCoor) np.savetxt(pathInputCoor, np.asarray(coords), fmt='%d', delimiter=",")