def importCTF(self, mic, fileName): ctf = CTFModel() ctf.setMicrograph(mic) readCtfModel(ctf, fileName, ctf4=ctffindOutputVersion(fileName) == 4) for suffix in ["_psd.mrc", ".mrc"]: if os.path.exists(pwutils.removeExt(fileName) + suffix): ctf.setPsdFile(pwutils.removeExt(fileName) + suffix) return ctf
def createChimeraScript(self): fnRoot = "extra/" scriptFile = self.protocol._getPath('Chimera_resolution.cmd') fhCmd = open(scriptFile, 'w') imageFile = self.protocol._getExtraPath(OUTPUT_RESOLUTION_FILE_CHIMERA) img = ImageHandler().read(imageFile) imgData = img.getData() min_Res = round(np.amin(imgData)*100)/100 max_Res = round(np.amax(imgData)*100)/100 numberOfColors = 21 colors_labels = self.numberOfColors(min_Res, max_Res, numberOfColors) colorList = self.colorMapToColorList(colors_labels, self.getColorMap()) if self.protocol.halfVolumes.get() is True: #fhCmd.write("open %s\n" % (fnRoot+FN_MEAN_VOL)) #Perhaps to check the use of mean volume is useful fnbase = removeExt(self.protocol.inputVolume.get().getFileName()) ext = getExt(self.protocol.inputVolume.get().getFileName()) fninput = abspath(fnbase + ext[0:4]) fhCmd.write("open %s\n" % fninput) else: fnbase = removeExt(self.protocol.inputVolumes.get().getFileName()) ext = getExt(self.protocol.inputVolumes.get().getFileName()) fninput = abspath(fnbase + ext[0:4]) fhCmd.write("open %s\n" % fninput) fhCmd.write("open %s\n" % (fnRoot + OUTPUT_RESOLUTION_FILE_CHIMERA)) if self.protocol.halfVolumes.get() is True: smprt = self.protocol.inputVolume.get().getSamplingRate() else: smprt = self.protocol.inputVolumes.get().getSamplingRate() fhCmd.write("volume #0 voxelSize %s\n" % (str(smprt))) fhCmd.write("volume #1 voxelSize %s\n" % (str(smprt))) fhCmd.write("vol #1 hide\n") scolorStr = '%s,%s:' * numberOfColors scolorStr = scolorStr[:-1] line = ("scolor #0 volume #1 perPixel false cmap " + scolorStr + "\n") % colorList fhCmd.write(line) scolorStr = '%s %s ' * numberOfColors str_colors = () for idx, elem in enumerate(colorList): if (idx % 2 == 0): if ((idx % 8) == 0): str_colors += str(elem), else: str_colors += '" "', else: str_colors += elem, line = ("colorkey 0.01,0.05 0.02,0.95 " + scolorStr + "\n") % str_colors fhCmd.write(line) fhCmd.close()
def classifyStep(self): """ Run MSA-CL and MSA-SUM from IMAGIC. """ inputFile = self.inputMSA.get().getParticlesStack() inputFileBase = pwutils.removeExt(inputFile) inputFileImg = inputFileBase + '.img' inputFileHed = inputFileBase + '.hed' pwutils.createLink(inputFileImg, self._getTmpPath("particles.img")) pwutils.createLink(inputFileHed, self._getTmpPath("particles.hed")) inputFn = "tmp/particles" if self.doDownweight.get(): downweight = 'YES' else: downweight = 'NO' self._params.update({ 'particles': inputFn, 'eigs_num': self.numberOfFactors.get(), 'cls_num': self.numberOfClasses.get(), 'perc_ign': self.percentIgnore.get(), 'downweight': downweight, 'perc_ign_bad': self.percentIgnoreBad.get() }) classDir = self._getPath(self.CLASS_DIR) pwutils.cleanPath(classDir) pwutils.makePath(classDir) self.runTemplate('msa/msa-cls.b', self._params)
def splittingStep(self): for movie in self.inputMovies.get(): fnMovie = movie.getFileName() fnMovieOdd = pwutils.removeExt(basename(fnMovie)) + "_odd.xmd" fnMovieEven = pwutils.removeExt(basename(fnMovie)) + "_even.xmd" args = '--img "%s" ' % fnMovie args += '-o "%s" ' % self._getExtraPath(fnMovieOdd) args += '-e %s ' % self._getExtraPath(fnMovieEven) args += '--type frames ' if (self.sumFrames.get() is True): args += '--sum_frames' self.runJob('xmipp_image_odd_even', args)
def readCTFModel(ctfModel, filename): """ Set values for the ctfModel. :param ctfModel: output CTF model :param filename: input file to parse """ jsonDict = loadJson(filename) keyPos = None ctfPhaseShift = 0.0 if 'ctf_frame' in jsonDict: keyPos = jsonDict['ctf_frame'][1] elif 'ctf' in jsonDict: keyPos = jsonDict['ctf'][0] else: setWrongDefocus(ctfModel) if keyPos: defocus = float(keyPos['defocus']) defocusAngle = float(keyPos['dfang']) dfdiff = float(keyPos['dfdiff']) ampcont = float(keyPos['ampcont']) defocusU = 10000.0 * defocus + 5000.0 * dfdiff defocusV = 20000.0 * defocus - defocusU ctfPhaseShift = calculatePhaseShift(ampcont) ctfModel.setStandardDefocus(defocusU, defocusV, defocusAngle) if 'ctf_im2d' in jsonDict: # psdFile = jsonDict['ctf_im2d']['__image__'][0] fnBase = pwutils.removeExt(filename) + '_jsonimg' psdFile = "1@%s.hdf" % fnBase if os.path.exists(psdFile): ctfModel.setPsdFile(psdFile) ctfModel.setPhaseShift(float(ctfPhaseShift))
def runProjectionMatching(self, iterN, refN, args, **kwargs): """ Loop over all CTF groups and launch a projection matching for each one. Note: Iterate ctf groups in reverse order to have same order as in add_to docfiles from angular_class_average. #FIXME: check why reverse order is needed """ projMatchRootName = self._getFileName('projMatchRootNames', iter=iterN, ref=refN) refname = self._getFileName('projectLibraryStk', iter=iterN, ref=refN) numberOfCtfGroups = self.numberOfCtfGroups.get() # ctfGroupName = self._getPath(self.ctfGroupDirectory, '%(ctfGroupRootName)s') #remove output metadata cleanPath(projMatchRootName) for ctfN in reversed(list(self.allCtfGroups())): self._log.info('CTF group: %d/%d' % (ctfN, numberOfCtfGroups)) ctfArgs = ' -i %(inputdocfile)s -o %(outputname)s --ref %(refname)s' inputdocfile = self._getBlockFileName(ctfBlockName, ctfN, self.docFileInputAngles[iterN-1]) outputname = self._getBlockFileName(ctfBlockName, ctfN, projMatchRootName) baseTxtFile = removeExt(refname) neighbFile = baseTxtFile + '_sampling.xmd' cleanPath(neighbFile) neighbFileb = baseTxtFile + '_group' + str(ctfN).zfill(self.FILENAMENUMBERLENGTH) + '_sampling.xmd' copyFile(neighbFileb, neighbFile) print "copied file ", neighbFileb, "to", neighbFile if self.doCTFCorrection and self._referenceIsCtfCorrected[iterN]: ctfArgs += ' --ctf %s' % self._getBlockFileName('', ctfN, self._getFileName('stackCTFs')) progArgs = ctfArgs % locals() + args self.runJob('xmipp_angular_projection_matching', progArgs, **kwargs)
def writePosFilesStep(self): """ Write the pos file for each micrograph in metadata format (both untilted and tilted). """ writeSetOfCoordinates(self._getExtraPath(), self.inputCoords.getUntilted(), scale=self.getBoxScale()) writeSetOfCoordinates(self._getExtraPath(), self.inputCoords.getTilted(), scale=self.getBoxScale()) # We need to find the mapping by micName (without ext) between the # micrographs in the SetOfCoordinates and the Other micrographs if self._micsOther(): micDict = {} # create tmp set with all mics from coords set coordMics = SetOfMicrographs(filename=':memory:') coordMics.copyInfo(self.inputCoords.getUntilted().getMicrographs()) for micU, micT in izip(self.inputCoords.getUntilted().getMicrographs(), self.inputCoords.getTilted().getMicrographs()): micU.cleanObjId() micT.cleanObjId() coordMics.append(micU) coordMics.append(micT) for mic in coordMics: micBase = pwutils.removeBaseExt(mic.getFileName()) micPos = self._getExtraPath(micBase + ".pos") micDict[pwutils.removeExt(mic.getMicName())] = micPos # now match micDict and inputMics if any(pwutils.removeExt(mic.getMicName()) in micDict for mic in self.inputMics): micKey = lambda mic: pwutils.removeExt(mic.getMicName()) else: raise Exception('Could not match input micrographs and coordinates ' 'by micName.') for mic in self.inputMics: # micrograph from input (other) mk = micKey(mic) if mk in micDict: micPosCoord = micDict[mk] if exists(micPosCoord): micBase = pwutils.removeBaseExt(mic.getFileName()) micPos = self._getExtraPath(micBase + ".pos") if micPos != micPosCoord: self.info('Moving %s -> %s' % (micPosCoord, micPos)) pwutils.moveFile(micPosCoord, micPos)
def sliceAndNameFromMicName(micname): """ Extracts z and tomo name from a micname composed with tomoSliceToMicName""" parts = removeExt(basename(micname)).split("_") slice = parts[0] slice = int(slice.replace("S", "")) fileName = "_".join(parts[1:]) return slice, fileName
def writePosFilesStep(self): """ Write the pos file for each micrograph in metadata format (both untilted and tilted). """ writeSetOfCoordinates(self._getExtraPath(), self.inputCoords.getUntilted(), scale=self.getBoxScale()) writeSetOfCoordinates(self._getExtraPath(), self.inputCoords.getTilted(), scale=self.getBoxScale()) # We need to find the mapping by micName (without ext) between the # micrographs in the SetOfCoordinates and the Other micrographs if self._micsOther(): micDict = {} for micU, micT in izip( self.inputCoords.getUntilted().getMicrographs(), self.inputCoords.getTilted().getMicrographs()): micBaseU = pwutils.removeBaseExt(micU.getFileName()) micPosU = self._getExtraPath(micBaseU + ".pos") micDict[pwutils.removeExt(micU.getMicName())] = micPosU micBaseT = pwutils.removeBaseExt(micT.getFileName()) micPosT = self._getExtraPath(micBaseT + ".pos") micDict[pwutils.removeExt(micT.getMicName())] = micPosT # now match micDict and other mics (in self.ctfDict) if any( pwutils.removeExt(mic.getMicName()) in micDict for mic in self.ctfDict): micKey = lambda mic: pwutils.removeExt(mic.getMicName()) else: raise Exception( 'Could not match input micrographs and coordinates ' 'by micName.') for mic in self.ctfDict: mk = micKey(mic) if mk in micDict: micPosCoord = micDict[mk] if exists(micPosCoord): micBase = pwutils.removeBaseExt(mic.getFileName()) micPos = self._getExtraPath(micBase + ".pos") if micPos != micPosCoord: self.info('Moving %s -> %s' % (micPosCoord, micPos)) pwutils.moveFile(micPosCoord, micPos)
def _getMaterialsList(vesicle): # Get annotated materials from txt file and add one line for each one materialsFile = removeExt(vesicle) + '.txt' with open(materialsFile) as matFile: materialsList = matFile.read() # Expected format is a string like 'ind1,ind2,...,indn\n, so it's necessary to transform it into a list of # material indices return materialsList.replace('\n', '').split(',')
def createChimeraScript(self): fnRoot = "extra/" scriptFile = self.protocol._getPath('Chimera_resolution.cmd') fhCmd = open(scriptFile, 'w') imageFile = self.protocol._getFileName(OUTPUT_RESOLUTION_FILE_CHIMERA) #imageFile = self.protocol._getFileName(OUTPUT_RESOLUTION_FILE) img = ImageHandler().read(imageFile) imgData = img.getData() imgData = imgData[imgData != 0] min_Res = round(np.amin(imgData) * 100) / 100 max_Res = round(np.amax(imgData) * 100) / 100 numberOfColors = 21 colors_labels = self.numberOfColors(min_Res, max_Res, numberOfColors) colorList = self.colorMapToColorList(colors_labels, self.getColorMap()) fnbase = removeExt(self.protocol.inputVolume.get().getFileName()) # ext = getExt(self.protocol.inputVolume.get().getFileName()) # fninput = abspath(fnbase + ext[0:4]) # fhCmd.write("open %s\n" % fninput) fhCmd.write("open %s\n" % (fnRoot + RESIZE_VOL)) fhCmd.write("open %s\n" % (fnRoot + OUTPUT_RESOLUTION_FILE_CHIMERA)) # fhCmd.write("open %s\n" % (fnRoot + OUTPUT_RESOLUTION_FILE)) # smprt = self.protocol.inputVolume.get().getSamplingRate() smprt = 1.0 fhCmd.write("volume #0 voxelSize %s step 1\n" % (str(smprt))) fhCmd.write("volume #1 voxelSize %s\n" % (str(smprt))) fhCmd.write("vol #1 hide\n") scolorStr = '%s,%s:' * numberOfColors scolorStr = scolorStr[:-1] line = ("scolor #0 volume #1 perPixel false cmap " + scolorStr + "\n") % colorList fhCmd.write(line) scolorStr = '%s %s ' * numberOfColors str_colors = () for idx, elem in enumerate(colorList): if (idx % 2 == 0): if ((idx % 8) == 0): str_colors += str(elem), else: str_colors += '" "', else: str_colors += elem, line = ("colorkey 0.01,0.05 0.02,0.95 " + scolorStr + "\n") % str_colors fhCmd.write(line) fhCmd.close()
def convertXmdToStackStep(self): for movie in self.inputMovies.get(): fnMovie = movie.getFileName() fnMovieOdd = pwutils.removeExt(basename(fnMovie)) + "_odd.xmd" fnMovieEven = pwutils.removeExt(basename(fnMovie)) + "_even.xmd" fnMovieOddMrc = pwutils.removeExt(basename(fnMovieOdd)) + ".mrc" fnMovieEvenMrc = pwutils.removeExt(basename(fnMovieEven)) + ".mrc" args = '-i "%s" ' % self._getExtraPath(fnMovieOdd) args += '-o "%s" ' % self._getExtraPath(fnMovieOddMrc) self.runJob('xmipp_image_convert', args) args = '-i "%s" ' % self._getExtraPath(fnMovieEven) args += '-o "%s" ' % self._getExtraPath(fnMovieEvenMrc) self.runJob('xmipp_image_convert', args)
def convertXmdToStackStep(self, tsObjId): ts = self.inputSetOfTiltSeries.get()[tsObjId] tsId = ts.getTsId() tsFileName = ts.getFirstItem().getFileName() tsFileNameOdd = pwutils.removeExt( os.path.basename(tsFileName)) + "_odd.xmd" tsFileNameEven = pwutils.removeExt( os.path.basename(tsFileName)) + "_even.xmd" tsFileNameOddMrc = pwutils.removeExt( os.path.basename(tsFileNameOdd)) + ".mrc" tsFileNameEvenMrc = pwutils.removeExt( os.path.basename(tsFileNameEven)) + ".mrc" paramsConvertOdd = { 'inputXmdOdd': self._getExtraPath(os.path.join(tsId, tsFileNameOdd)), 'outputMrcOdd': self._getExtraPath(os.path.join(tsId, tsFileNameOddMrc)), } argsConvertOdd = "-i %(inputXmdOdd)s " \ "-o %(outputMrcOdd)s " self.runJob('xmipp_image_convert', argsConvertOdd % paramsConvertOdd) paramsConvertEven = { 'inputXmdEven': self._getExtraPath(os.path.join(tsId, tsFileNameEven)), 'outputMrcEven': self._getExtraPath(os.path.join(tsId, tsFileNameEvenMrc)), } argsConvertEven = "-i %(inputXmdEven)s " \ "-o %(outputMrcEven)s " self.runJob('xmipp_image_convert', argsConvertEven % paramsConvertEven)
def createChimeraScriptDoA(self, infile, outfile, ellipfile): fnRoot = "extra/" scriptFile = self.protocol._getPath(outfile) fhCmd = open(scriptFile, 'w') imageFile = self.protocol._getExtraPath(infile) img = ImageHandler().read(imageFile) imgData = img.getData() min_Res = 0.0 #round(np.amin(imgData)*100)/100 max_Res = 1.0 #round(np.amax(imgData)*100)/100 numberOfColors = 21 colors_labels = self.numberOfColors(min_Res, max_Res, numberOfColors) colorList = self.colorMapToColorList(colors_labels, self.getColorMap()) fnbase = removeExt(self.protocol.inputVolumes.get().getFileName()) ext = getExt(self.protocol.inputVolumes.get().getFileName()) fninput = abspath(fnbase + ext[0:4]) fhCmd.write("open %s\n" % fninput) fhCmd.write("open %s\n" % (fnRoot + infile)) fhCmd.write("open %s\n" % (fnRoot + ellipfile)) smprt = self.protocol.inputVolumes.get().getSamplingRate() fhCmd.write("volume #0 voxelSize %s\n" % (str(smprt))) fhCmd.write("volume #1 voxelSize %s\n" % (str(smprt))) fhCmd.write("volume #2 voxelSize %s\n" % (str(smprt))) fhCmd.write("volume #2 style mesh\n") fhCmd.write("vol #1 hide\n") scolorStr = '%s,%s:' * numberOfColors scolorStr = scolorStr[:-1] line = ("scolor #0 volume #1 perPixel false cmap " + scolorStr + "\n") % colorList fhCmd.write(line) scolorStr = '%s %s ' * numberOfColors str_colors = () for idx, elem in enumerate(colorList): if (idx % 2 == 0): if ((idx % 8) == 0): str_colors += str(elem), else: str_colors += '" "', else: str_colors += elem, line = ("colorkey 0.01,0.05 0.02,0.95 " + scolorStr + "\n") % str_colors fhCmd.write(line) fhCmd.close()
def _getMovieRoot(self, movie): # Try to use the 'original' fileName in case it is present # the original could be different from the current filename if # we are dealing with compressed movies (e.g., movie.mrc.bz2) fn = movie.getAttributeValue('_originalFileName', movie.getFileName()) # Remove the first extension fnRoot = pwutils.removeBaseExt(fn) # Check if there is a second extension # (Assuming is is only a dot and 3 or 4 characters after it # Do not perform this check if the file name is short if len(fnRoot) > 5: if fnRoot[-4] == '.' or fnRoot[-5] == '.': fnRoot = pwutils.removeExt(fnRoot) return fnRoot
def createCtfPlot(ctfSet, ctfId): """ Create EmPlotter instance. """ ctfModel = ctfSet[ctfId] psdFn = ctfModel.getPsdFile() fn = removeExt(psdFn) + "_avrot.txt" xplotter = EmPlotter(windowTitle='CTFFind results') plot_title = getPlotSubtitle(ctfModel) a = xplotter.createSubPlot(plot_title, 'Spacial frequency (1/A)', 'Amplitude (or cross-correlation)') legendName = ['Amplitude spectrum', 'CTF Fit', 'Quality of fit'] _plotCurves(a, fn) xplotter.showLegend(legendName, loc='upper right') a.set_ylim([-0.1, 1.1]) a.grid(True) xplotter.show()
def convertInputStep(self, inputId, avgId, volId): """ Create the input file in STAR format as expected by Relion. If the input particles comes from Relion, just link the file. Params: particlesId: use this parameters just to force redo of convert if the input particles are changed. """ inputSet = self.inputSet.get() imgStar = self._getFileName('input_particles') refStar = self._getFileName('input_refs') # Pass stack file as None to avoid write the images files self.info("Converting set from '%s' into '%s'" % (inputSet.getFileName(), imgStar)) refSet = None # case refine3D if self.isInputClasses(): refSet = self.inputSet.get() # 2D or 3D classes else: if self.isInputAutoRefine(): em.ImageHandler().convert(self.referenceVolume.get(), self._getFileName('input_refvol')) else: # Autopicking case refSet = self.referenceAverages.get() self.classDict = {} if refSet: self.info("Converting reference from '%s' into %s" % (refSet.getFileName(), refStar)) # Compute class mapping classList = [cls.getObjId() for cls in refSet] classList.sort() for i, c in enumerate(classList): self.classDict[c] = i + 1 writeReferences(refSet, removeExt(refStar), postprocessImageRow=self._updateClasses) # Write particles star file allParticles = self._allParticles(iterate=False) writeSetOfParticles(allParticles, imgStar, self._getPath(), postprocessImageRow=self._postProcessImageRow)
def getUniqueFileName(fn, extension): """ Get an unique file for either link or convert files. It is possible that the base name overlap if they come from different runs. (like particles.mrcs after relion preprocess) """ newFn = os.path.join(outputRoot, pwutils.replaceBaseExt(fn, extension)) newRoot = pwutils.removeExt(newFn) values = filesDict.values() counter = 1 while newFn in values: counter += 1 newFn = '%s_%05d.%s' % (newRoot, counter, extension) return newFn
def classifyStep(self, imcFile, numberOfFactors, numberOfClasses): """ Apply the selected filter to particles. Create the set of particles. """ # Copy file to working directory, it could be also a link imcLocalFile = basename(imcFile) copyFile(imcFile, self._getPath(imcLocalFile)) self.info("Copied file '%s' to '%s' " % (imcFile, imcLocalFile)) # Spider automatically add _IMC to the ca-pca result file imcBase = removeExt(imcLocalFile).replace('_IMC', '') self._params.update({'x27': numberOfFactors, '[cas_prefix]': imcBase, }) self._updateParams() self.runTemplate(self.getScript(), self.getExt(), self._params)
def importCTF(self, mic, fileName): ctf = CTFModel() ctf.setMicrograph(mic) readCtfModel(ctf, fileName, ctf4=ctffindOutputVersion(fileName) == 4) # Try to find the given PSD file associated with the cttfind log file # we handle special cases of .ctf extension and _ctffindX prefix for Relion runs fnBase = pwutils.removeExt(fileName) for suffix in ["_psd.mrc", ".mrc", ".ctf"]: psdPrefixes = [fnBase, fnBase.replace("_ctffind3", ""), fnBase.replace("_ctffind4", "")] for prefix in psdPrefixes: psdFile = prefix + suffix if os.path.exists(psdFile): if psdFile.endswith(".ctf"): psdFile += ":mrc" ctf.setPsdFile(psdFile) return ctf
def runProjectionMatching(self, iterN, refN, args, **kwargs): """ Loop over all CTF groups and launch a projection matching for each one. Note: Iterate ctf groups in reverse order to have same order as in add_to docfiles from angular_class_average. #FIXME: check why reverse order is needed """ projMatchRootName = self._getFileName('projMatchRootNames', iter=iterN, ref=refN) refname = self._getFileName('projectLibraryStk', iter=iterN, ref=refN) numberOfCtfGroups = self.numberOfCtfGroups.get() # ctfGroupName = self._getPath(self.ctfGroupDirectory, '%(ctfGroupRootName)s') #remove output metadata cleanPath(projMatchRootName) for ctfN in reversed(list(self.allCtfGroups())): self._log.info('CTF group: %d/%d' % (ctfN, numberOfCtfGroups)) ctfArgs = ' -i %(inputdocfile)s -o %(outputname)s --ref %(refname)s' inputdocfile = self._getBlockFileName( ctfBlockName, ctfN, self.docFileInputAngles[iterN - 1]) outputname = self._getBlockFileName(ctfBlockName, ctfN, projMatchRootName) baseTxtFile = removeExt(refname) neighbFile = baseTxtFile + '_sampling.xmd' cleanPath(neighbFile) neighbFileb = baseTxtFile + '_group' + str(ctfN).zfill( self.FILENAMENUMBERLENGTH) + '_sampling.xmd' copyFile(neighbFileb, neighbFile) print "copied file ", neighbFileb, "to", neighbFile threads = self.numberOfThreads.get() trhArgs = ' --mem %(mem)s --thr %(thr)s' thrParams = { 'mem': self.availableMemory.get() * threads, 'thr': threads, } if self.doCTFCorrection and self._referenceIsCtfCorrected[iterN]: ctfArgs += ' --ctf %s' % self._getBlockFileName( '', ctfN, self._getFileName('stackCTFs')) progArgs = ctfArgs % locals() + args + trhArgs % thrParams self.runJob('xmipp_angular_projection_matching', progArgs, **kwargs)
def loadAcquisitionInfo(self): """ Return a proper acquistionInfo (dict) or an error message (str). """ if self.importFrom != self.IMPORT_FROM_FILES: return ProtImportImages.loadAcquisitionInfo(self) result = "Could not find acquistion information" for fileName, fileId in self.iterFiles(): baseName = pwutils.removeExt(fileName) xml1 = baseName.replace('_frames', '.xml') if os.path.exists(xml1): result = self._parseXML(xml1) else: xml2 = baseName + '.xml' result = self._parseXML(xml2) return result
def importCTF(self, mic, fileName): ctf = CTFModel() ctf.setMicrograph(mic) readCtfModel(ctf, fileName) # Try to find the given PSD file associated with the cttfind log file # we handle special cases of .ctf extension and _ctffindX prefix for Relion runs fnBase = pwutils.removeExt(fileName) for suffix in ['_psd.mrc', '.mrc', '.ctf']: psdPrefixes = [fnBase, fnBase.replace('_ctffind3', ''), fnBase.replace('_gctf', '')] for prefix in psdPrefixes: psdFile = prefix + suffix if os.path.exists(psdFile): if psdFile.endswith('.ctf'): psdFile += ':mrc' ctf.setPsdFile(psdFile) return ctf
def importCTF(self, mic, fileName): ctf = CTFModel() ctf.setMicrograph(mic) readCtfModel(ctf, fileName, ctf4=False) # Try to find the given PSD file associated with the cttfind log file # we handle special cases of .ctf extension and _ctffindX prefix for Relion runs fnBase = pwutils.removeExt(fileName) for suffix in ['_psd.mrc', '.mrc', '.ctf']: psdPrefixes = [fnBase, fnBase.replace('_ctffind3', ''), fnBase.replace('_gctf', '')] for prefix in psdPrefixes: psdFile = prefix + suffix if os.path.exists(psdFile): if psdFile.endswith('.ctf'): psdFile += ':mrc' ctf.setPsdFile(psdFile) return ctf
def createCtfPlot(ctfSet, ctfId): ctfModel = ctfSet[ctfId] psdFn = ctfModel.getPsdFile() fn = pwutils.removeExt(psdFn) + "_EPA.txt" gridsize = [1, 1] xplotter = EmPlotter(x=gridsize[0], y=gridsize[1], windowTitle='CTF Fitting') plot_title = "CTF Fitting" a = xplotter.createSubPlot(plot_title, 'Resolution (Angstroms)', 'CTF', yformat=False) a.invert_xaxis() for i in range(1, 5): _plotCurve(a, i, fn) xplotter.showLegend(['simulated CTF', 'equiphase avg.', 'equiphase avg. - bg', 'cross correlation']) a.grid(True) xplotter.show()
def createCtfPlot(ctfSet, ctfId): ctfModel = ctfSet[ctfId] psdFn = ctfModel.getPsdFile() fn = removeExt(psdFn) + "_avrot.txt" gridsize = [1, 1] xplotter = EmPlotter(x=gridsize[0], y=gridsize[1], windowTitle='CTF Fitting') plot_title = "CTF Fitting" a = xplotter.createSubPlot(plot_title, 'pixels^-1', 'CTF', yformat=False) legendName = [ 'rotational avg. No Astg', 'rotational avg.', 'CTF Fit', 'Cross Correlation', '2sigma cross correlation of noise' ] for i in range(1, 6): _plotCurve(a, i, fn) xplotter.showLegend(legendName) a.grid(True) xplotter.show()
def convertPdbStep(self): """ Although is not mandatory, usually is used by the protocol to register the resulting outputs in the database. """ pdbFn = self._getPdbFileName() outFile = removeExt(self._getVolName()) if getExt(pdbFn) == ".cif": pdbFn2 = replaceBaseExt(pdbFn, 'pdb') cifToPdb(pdbFn, pdbFn2) pdbFn = pdbFn2 samplingR = self.sampling.get() args = '-i %s --sampling %f -o %s' % (pdbFn, samplingR, outFile) if self.centerPdb: args += ' --centerPDB' if self.vol: vol = self.volObj.get() size = vol.getDim() ccp4header = headers.Ccp4Header(vol.getFileName(), readHeader=True) self.shifts = ccp4header.getOrigin() args += ' --size %d %d %d --orig %d %d %d' % ( size[2], size[1], size[0], self.shifts[0] / samplingR, self.shifts[1] / samplingR, self.shifts[2] / samplingR) if self.setSize: args += ' --size' if self.size_x.hasValue(): args += ' %d' % self.size_x.get() if self.size_y.hasValue() and self.size_z.hasValue(): args += ' %d %d' % (self.size_y.get(), self.size_z.get()) self.info("Input file: " + pdbFn) self.info("Output file: " + outFile) program = "xmipp_volume_from_pdb" self.runJob(program, args)
def convertPdbStep(self): """ Although is not mandatory, usually is used by the protocol to register the resulting outputs in the database. """ pdbFn = self._getPdbFileName() outFile = removeExt(self._getVolName()) args = '-i %s --sampling %f -o %s' % (pdbFn, self.sampling.get(), outFile) if self.centerPdb: args += ' --centerPDB' if self.setSize: args += ' --size' if self.size.hasValue(): args += ' %d' % self.size.get() self.info("Input file: " + pdbFn) self.info("Output file: " +outFile) program = "xmipp_volume_from_pdb" self.runJob(program, args)
def classifyStep(self, imcFile, numberOfFactors, numberOfClasses): """ Apply the selected filter to particles. Create the set of particles. """ # Copy file to working directory, it could be also a link imcLocalFile = basename(imcFile) copyFile(imcFile, self._getPath(imcLocalFile)) self.info("Copied file '%s' to '%s' " % (imcFile, imcLocalFile)) # Spider automatically add _IMC to the ca-pca result file # JMRT: I have modify the kmeans.msa script to not add _IMC # automatically, it can be also used with _SEQ suffix, # so we will pass the whole cas_file imcBase = removeExt(imcLocalFile)#.replace('_IMC', '') imcPrefix = imcBase.replace('_IMC', '').replace('_SEQ', '') self._params.update({'x27': numberOfFactors, '[cas_prefix]': imcPrefix, '[cas_file]': imcBase, }) self._updateParams() self.runTemplate(self.getScript(), self.getExt(), self._params)
def createCtfPlot(ctfSet, ctfId): ctfModel = ctfSet[ctfId] psdFn = ctfModel.getPsdFile() fn = pwutils.removeExt(psdFn) + "_EPA.txt" gridsize = [1, 1] xplotter = EmPlotter(x=gridsize[0], y=gridsize[1], windowTitle='CTF Fitting') plot_title = "CTF Fitting" a = xplotter.createSubPlot(plot_title, 'Resolution (Angstroms)', 'CTF', yformat=False) a.invert_xaxis() for i in range(1, 5): _plotCurve(a, i, fn) xplotter.showLegend([ 'simulated CTF', 'equiphase avg.', 'equiphase avg. - bg', 'cross correlation' ]) a.grid(True) xplotter.show()
def createCtfPlot(ctfSet, ctfId): ctfModel = ctfSet[ctfId] psdFn = ctfModel.getPsdFile() fn = pwutils.removeExt(psdFn) + "_EPA.log" xplotter = EmPlotter(windowTitle='CTF Fitting') plot_title = getPlotSubtitle(ctfModel) a = xplotter.createSubPlot(plot_title, 'Resolution (Angstroms)', 'CTF') a.invert_xaxis() version = Plugin.getActiveVersion() curves = [1, 4, 5] if version == '1.18' else [1, 3, 4] for i in curves: _plotCurve(a, i, fn) xplotter.showLegend([ 'simulated CTF', # 'equiphase avg.', # 'bg', # only for v1.18 'equiphase avg. - bg', 'cross correlation' ]) a.grid(True) xplotter.show()
def classifyStep(self, imcFile, numberOfFactors, numberOfClasses): """ Apply the selected filter to particles. Create the set of particles. """ # Copy file to working directory, it could be also a link imcLocalFile = basename(imcFile) copyFile(imcFile, self._getPath(imcLocalFile)) self.info("Copied file '%s' to '%s' " % (imcFile, imcLocalFile)) # Spider automatically add _IMC to the ca-pca result file # JMRT: I have modify the kmeans.msa script to not add _IMC # automatically, it can be also used with _SEQ suffix, # so we will pass the whole cas_file imcBase = removeExt(imcLocalFile)#.replace('_IMC', '') imcPrefix = imcBase.replace('_IMC', '').replace('_SEQ', '') self._params.update({'x27': numberOfFactors, 'x30': self.numberOfThreads.get(), '[cas_prefix]': imcPrefix, '[cas_file]': imcBase, }) self._updateParams() self.runTemplate(self.getScript(), self.getExt(), self._params)
def convertPdbStep(self): """ Although is not mandatory, usually is used by the protocol to register the resulting outputs in the database. """ pdbFn = self._getPdbFileName() outFile = removeExt(self._getVolName()) args = '-i %s --sampling %f -o %s' % (pdbFn, self.sampling.get(), outFile) if self.centerPdb: args += ' --centerPDB' if self.setSize: args += ' --size' if self.size.hasValue(): args += ' %d' % self.size.get() self.info("Input file: " + pdbFn) self.info("Output file: " + outFile) program = "xmipp_volume_from_pdb" self.runJob(program, args)
def importCTF(self, mic, fileName): """ Create a CTF model and populate its values. :param mic: input micrograph object :param fileName: input file to be parsed :return: CTFModel object """ ctf = CTFModel() ctf.setMicrograph(mic) readCtfModel(ctf, fileName) # Try to find the given PSD file associated with the cttfind log file # we handle special cases of .ctf extension and _ctffind4 prefix for Relion runs fnBase = pwutils.removeExt(fileName) for suffix in ['_psd.mrc', '.mrc', '.ctf']: psdPrefixes = [fnBase, fnBase.replace('_ctffind4', '')] for prefix in psdPrefixes: psdFile = prefix + suffix if pwutils.exists(psdFile): if psdFile.endswith('.ctf'): psdFile += ':mrc' ctf.setPsdFile(psdFile) return ctf return ctf
def _getMicKey(particle): coord = particle.getCoordinate() if coord is None or coord.getMicName() is None: return '%05d' % particle.getMicId() else: return pwutils.removeExt(coord.getMicName())
def insertAngularProjectLibraryStep(self, iterN, refN, **kwargs): args = ' -i %(maskedFileNamesIter)s --experimental_images %(experimentalImages)s' args += ' -o %(projectLibraryRootName)s --sampling_rate %(samplingRate)s --sym %(symmetry)s' args += 'h --compute_neighbors' ###need one block per reference # Project all references xDim, yDim, zDim = self.input3DReferences.get().getDim() memoryUsed = (xDim * yDim * zDim * 8) / pow(2,20) if memoryUsed == 0: memoryUsed = 1 # If this value is 0, produce an division error in runAngularProjectLibraryStep stepParams = {'method' : self.getEnumText('projectionMethod')} expImages = self._getExpImagesFileName(self.docFileInputAngles[iterN-1]) projectLibraryRootName = self._getFileName('projectLibraryStk', iter=iterN, ref=refN) params = {'maskedFileNamesIter' : self._getFileName('maskedFileNamesIters', iter=iterN, ref=refN), 'experimentalImages' : expImages, 'projectLibraryRootName' : projectLibraryRootName, 'samplingRate' : self._angSamplingRateDeg[iterN], 'symmetry' : self._symmetry[iterN], } if self.maxChangeInAngles < 181: args += ' --near_exp_data --angular_distance %(maxChangeInAngles)s' else: args += ' --angular_distance -1' if self._perturbProjectionDirections[iterN]: args +=' --perturb %(perturb)s' params['perturb'] = math.sin(math.radians(self._angSamplingRateDeg[iterN])) / 4. if self.doRestricSearchbyTiltAngle: args += ' --min_tilt_angle %(tilt0)s --max_tilt_angle %(tiltF)s' params['tilt0'] = self.tilt0.get() params['tiltF'] = self.tiltF.get() if self.doCTFCorrection: params['ctfGroupSubsetFileName'] = self._getFileName('imageCTFpairs') args += ' --groups %(ctfGroupSubsetFileName)s' if len(self.symmetryGroupNeighbourhood.get()) > 1: params['symmetryGroupNeighbourhood'] = self.symmetryGroupNeighbourhood.get() args += ' --sym_neigh %(symmetryGroupNeighbourhood)s' if self._onlyWinner[iterN]: args += ' --only_winner' if stepParams['method'] == 'fourier': memoryUsed = memoryUsed * 6 stepParams['paddingAngularProjection'] = self.paddingAngularProjection.get() stepParams['kernelAngularProjection'] = self.getEnumText('kernelAngularProjection') stepParams['constantToAdd'] = self._constantToAddToFiltration[iterN] stepParams['memoryUsed'] = memoryUsed self._insertFunctionStep('angularProjectLibraryStep', iterN, refN, args % params, stepParams, **kwargs) if not self.doCTFCorrection: src = removeExt(projectLibraryRootName) + '_sampling.xmd' dst = removeExt(projectLibraryRootName) + ('_group%06d_sampling.xmd' % 1) self._insertCopyFileStep(src, dst)
def _processMovie(self, movie): numberOfFrames = self._getNumberOfFrames(movie) #FIXME: Figure out how to properly write shifts for unblur #self._writeMovieAlignment(movie, numberOfFrames) a0, aN = self._getRange(movie, 'align') _, lstFrame, _ = movie.getFramesRange() movieBaseName = pwutils.removeExt(movie.getFileName()) aveMicFn = movieBaseName + '_uncorrected_avg.mrc' if a0 > 1 or aN < lstFrame: from pyworkflow.em import ImageHandler ih = ImageHandler() movieInputFn = movie.getFileName() if movieInputFn.endswith("mrc"): movieInputFn += ":mrcs" movieConverted = pwutils.removeExt(movieInputFn) + "_tmp.mrcs" ih.convertStack(movieInputFn, movieConverted, a0, aN) # Here, only temporal movie file (or link) stored in # tmp/movie_?????? is removed before move the converted file. It # is necessary 'cause if it is overwritten you may lost your # original data. os.remove(movie.getFileName()) pwutils.moveFile(movieConverted, movie.getFileName()) movieSet = self.inputMovies.get() self._createLink(movie) range = aN - a0 + 1 self._argsUnblur(movie, range) try: self.runJob(self._program, self._args) outMicFn = self._getExtraPath(self._getOutputMicName(movie)) if not os.path.exists(outMicFn): # if only DW mic is saved outMicFn = self._getExtraPath(self._getOutputMicWtName(movie)) if self.doComputePSD: # Compute uncorrected avg mic roi = [0, 0, 0, 0] fakeShiftsFn = self.writeZeroShifts(movie) self.averageMovie(movie, fakeShiftsFn, aveMicFn, binFactor=1, roi=roi, dark=None, gain=movieSet.getGain()) self.computePSDs(movie, aveMicFn, outMicFn, outputFnCorrected=self._getPsdJpeg(movie)) self._saveAlignmentPlots(movie) if self._doComputeMicThumbnail(): self.computeThumbnail(outMicFn, outputFn=self._getOutputMicThumbnail( movie)) except: print("ERROR: Movie %s failed\n" % movie.getFileName()) import traceback traceback.print_exc()
def writeCtfStarStep(self): pwutils.cleanPath(self._getExportPath()) pwutils.makePath(self._getExportPath()) inputCTF = self.inputCTF.get() if self.micrographSource == 0: # same as CTF estimation ctfMicSet = inputCTF.getMicrographs() else: ctfMicSet = self.inputMicrographs.get() micSet = SetOfMicrographs(filename=':memory:') psd = inputCTF.getFirstItem().getPsdFile() hasPsd = psd and os.path.exists(psd) if hasPsd: psdPath = self._getExportPath('PSD') pwutils.makePath(psdPath) print("Writing PSD files to %s" % psdPath) for ctf in inputCTF: # Get the corresponding micrograph mic = ctfMicSet[ctf.getObjId()] if mic is None: print("Skipping CTF id: %s, it is missing from input " "micrographs. " % ctf.getObjId()) continue micFn = mic.getFileName() if not os.path.exists(micFn): print("Skipping micrograph %s, it does not exists. " % micFn) continue mic2 = mic.clone() mic2.setCTF(ctf) if hasPsd: psdFile = ctf.getPsdFile() newPsdFile = os.path.join( psdPath, '%s_psd.mrc' % pwutils.removeExt(mic.getMicName())) if not os.path.exists(psdFile): print("PSD file %s does not exits" % psdFile) print("Skipping micrograph %s" % micFn) continue pwutils.copyFile(psdFile, newPsdFile) # PSD path is relative to Export dir newPsdFile = os.path.relpath(newPsdFile, self._getExportPath()) ctf.setPsdFile(newPsdFile) else: # remove pointer to non-existing psd file ctf.setPsdFile(None) micSet.append(mic2) print("Writing set: %s to: %s" % (inputCTF, self._getStarFile())) micDir = self._getExportPath('Micrographs') pwutils.makePath(micDir) starWriter = convert.createWriter(rootDir=self._getExportPath(), outputDir=micDir, useBaseName=True) starWriter.writeSetOfMicrographs(micSet, self._getStarFile())
def _getNoisyOutputPath(self, fnvol): fnNoisy = self._getExtraPath(removeExt(basename(fnvol)) + '_Noisy.vol') return fnNoisy
def _processMovie(self, movie): numberOfFrames = self._getNumberOfFrames(movie) #FIXME: Figure out how to properly write shifts for unblur #self._writeMovieAlignment(movie, numberOfFrames) a0, aN = self._getRange(movie, 'align') _, lstFrame, _ = movie.getFramesRange() movieBaseName = pwutils.removeExt(movie.getFileName()) aveMicFn = movieBaseName + '_uncorrected_avg.mrc' if a0 > 1 or aN < lstFrame: from pyworkflow.em import ImageHandler ih = ImageHandler() movieInputFn = movie.getFileName() if movieInputFn.endswith("mrc"): movieInputFn += ":mrcs" movieConverted = pwutils.removeExt(movieInputFn) + "_tmp.mrcs" ih.convertStack(movieInputFn, movieConverted, a0, aN) # Here, only temporal movie file (or link) stored in # tmp/movie_?????? is removed before move the converted file. It # is necessary 'cause if it is overwritten you may lost your # original data. os.remove(movie.getFileName()) pwutils.moveFile(movieConverted, movie.getFileName()) movieSet = self.inputMovies.get() self._createLink(movie) range = aN - a0 + 1 self._argsUnblur(movie, range) try: self.runJob(self._program, self._args) outMicFn = self._getExtraPath(self._getOutputMicName(movie)) if not os.path.exists(outMicFn): # if only DW mic is saved outMicFn = self._getExtraPath(self._getOutputMicWtName(movie)) if self.doComputePSD: # Compute uncorrected avg mic roi = [0, 0, 0, 0] fakeShiftsFn = self.writeZeroShifts(movie) self.averageMovie(movie, fakeShiftsFn, aveMicFn, binFactor=1, roi=roi, dark=None, gain=movieSet.getGain()) self.computePSDs(movie, aveMicFn, outMicFn, outputFnCorrected=self._getPsdJpeg(movie)) self._saveAlignmentPlots(movie) if self._doComputeMicThumbnail(): self.computeThumbnail(outMicFn, outputFn=self._getOutputMicThumbnail( movie)) except: print("ERROR: Movie %s failed\n" % movie.getFileName())
def _getNoisyOutputPath(self, fnvol): fnNoisy = self._getExtraPath(removeExt(basename(fnvol)) + '_Noisy.mrc') return fnNoisy
def __init__(self): parser = argparse.ArgumentParser( description="Create movie stacks from the individual " "frame files.") add = parser.add_argument # shortcut add('--files', default='', help='Pattern to match input frame files.') add('-n', help='Number of frames per movie.') add('--suffix', help='Provide suffix added to create movie file. ' 'e.g. _frames.mrcs') add('--delete_frames', action='store_true', help='Provide this option if you want to delete individual frame ' 'files after the movie stack is created. ') args = parser.parse_args() n = int(args.n) frameRegex = re.compile("(?P<prefix>.+[^\d]+)(?P<frameid>\d+)") # Group all frames for each movie # Key of the dictionary will be the common prefix and the value # will be a list with all frames in that movie frameDict = {} filePaths = glob.glob(args.files) filePaths.sort() for fileName in filePaths: fnNoExt = pwutils.removeExt(fileName) match = frameRegex.match(fnNoExt) if match is None: raise Exception("Incorrect match of frame files pattern!") d = match.groupdict() prefix = d['prefix'] frameid = int(d['frameid']) if prefix not in frameDict: frameDict[prefix] = [] frameDict[prefix].append((frameid, fileName)) suffix = args.suffix ih = ImageHandler() for k, v in frameDict.iteritems(): if len(v) != n: raise Exception("Incorrect number of frames!") movieFn = k + suffix movieOut = movieFn if movieOut.endswith("mrc"): movieOut += ":mrcs" print "Writing movie stack: ", movieFn for i, frame in enumerate(sorted(v, key=lambda x: x[0])): frameFn = frame[1] # Frame name stored previously ih.convert(frameFn, (i+1, movieFn)) if args.delete_frames: pwutils.cleanPath(frameFn)
def _processMovie(self, movie): inputMovies = self.inputMovies.get() movieFolder = self._getOutputMovieFolder(movie) outputMicFn = self._getRelPath(self._getOutputMicName(movie), movieFolder) outputMovieFn = self._getRelPath(self._getOutputMovieName(movie), movieFolder) movieBaseName = pwutils.removeExt(movie.getFileName()) aveMicFn = movieBaseName + '_uncorrected_avg.mrc' logFile = self._getRelPath(self._getMovieLogFile(movie), movieFolder) a0, aN = self._getRange(movie, 'align') if not self.useMotioncor2: # Get the number of frames and the range to be used # for alignment and sum s0, sN = self._getRange(movie, 'sum') argsDict = {'-crx': self.cropOffsetX.get(), '-cry': self.cropOffsetY.get(), '-cdx': self.cropDimX.get(), '-cdy': self.cropDimY.get(), '-bin': self.binFactor.get(), '-nst': '%d' % a0, '-ned': '%d' % aN, '-nss': '%d' % s0, '-nes': '%d' % sN, '-flg': logFile, } args = '"%s" ' % movie.getBaseName() args += ' '.join( ['%s %s' % (k, v) for k, v in argsDict.iteritems()]) if inputMovies.getGain(): args += ' -fgr "%s"' % inputMovies.getGain() if inputMovies.getDark(): args += ' -fdr "%s"' % inputMovies.getDark() if self.doSaveAveMic: args += ' -fcs "%s" ' % outputMicFn if self.doSaveMovie: args += ' -fct "%s" -ssc 1' % outputMovieFn args += ' -gpu %(GPU)s' args += ' ' + self.extraParams.get() program = MOTIONCORR_PATH else: logFileBase = (logFile.replace('0-Full.log', '').replace( '0-Patch-Full.log', '')) # default values for motioncor2 are (1, 1) cropDimX = self.cropDimX.get() or 1 cropDimY = self.cropDimY.get() or 1 numbOfFrames = self._getNumberOfFrames(movie) if self.doApplyDoseFilter: preExp, dose = self._getCorrectedDose(inputMovies) else: preExp, dose = 0.0, 0.0 # reset values = 1 to 0 (motioncor2 does it automatically, # but we need to keep this for consistency) if self.patchX.get() == 1: self.patchX.set(0) if self.patchY.get() == 1: self.patchY.set(0) argsDict = {'-OutMrc': '"%s"' % outputMicFn, '-Patch': '%d %d' % (self.patchX, self.patchY), '-MaskCent': '%d %d' % (self.cropOffsetX, self.cropOffsetY), '-MaskSize': '%d %d' % (cropDimX, cropDimY), '-FtBin': self.binFactor.get(), '-Tol': self.tol.get(), '-Group': self.group.get(), '-FmDose': dose, '-Throw': '%d' % a0, '-Trunc': '%d' % (abs(aN - numbOfFrames + 1)), '-PixSize': inputMovies.getSamplingRate(), '-kV': inputMovies.getAcquisition().getVoltage(), '-LogFile': logFileBase, } if getVersion('MOTIONCOR2') != '03162016': argsDict['-InitDose'] = preExp argsDict['-OutStack'] = 1 if self.doSaveMovie else 0 if self.isSemVersion(): if self.defectFile.get(): argsDict['-DefectFile'] = self.defectFile.get() if self.versionGE('1.0.1'): # Patch overlap was introduced in 1.0.1 patchOverlap = self.getAttributeValue('patchOverlap', None) if patchOverlap: # 0 or None is False argsDict['-Patch'] += " %d" % patchOverlap if self._supportsMagCorrection() and self.doMagCor: if self.useEst: inputEst = self.inputEst.get().getOutputLog() if getVersion('MOTIONCOR2') == '01302017': input_params = parseMagCorrInput(inputEst) # this version uses stretch parameters as following: # 1/maj, 1/min, -angle argsDict['-Mag'] = '%0.3f %0.3f %0.3f' % ( 1.0 / input_params[1], 1.0 / input_params[2], -1 * input_params[0]) else: # While motioncor2 >=1.0.0 uses estimation params AS IS input_params = parseMagEstOutput(inputEst) argsDict['-Mag'] = '%0.3f %0.3f %0.3f' % ( input_params[1], input_params[2], input_params[0]) else: argsDict['-Mag'] = '%0.3f %0.3f %0.3f' % (self.scaleMaj, self.scaleMin, self.angDist) ext = pwutils.getExt(movie.getFileName()).lower() if ext in ['.mrc', '.mrcs']: args = ' -InMrc "%s" ' % movie.getBaseName() elif ext in ['.tif', '.tiff']: args = ' -InTiff "%s" ' % movie.getBaseName() else: raise Exception("Unsupported format: %s" % ext) args += ' '.join(['%s %s' % (k, v) for k, v in argsDict.iteritems()]) if inputMovies.getGain(): args += ' -Gain "%s" ' % inputMovies.getGain() if inputMovies.getDark(): args += ' -Dark "%s"' % inputMovies.getDark() args += ' -Gpu %(GPU)s' args += ' ' + self.extraParams2.get() program = MOTIONCOR2_PATH try: self.runJob(program, args, cwd=movieFolder, env=getEnviron(self.useMotioncor2)) self._fixMovie(movie) # Compute PSDs outMicFn = self._getExtraPath(self._getOutputMicName(movie)) if not os.path.exists(outMicFn): # if only DW mic is saved outMicFn = self._getExtraPath(self._getOutputMicWtName(movie)) def _extraWork(): if self.doComputePSD: # Compute uncorrected avg mic roi = [self.cropOffsetX.get(), self.cropOffsetY.get(), self.cropDimX.get(), self.cropDimY.get()] fakeShiftsFn = self.writeZeroShifts(movie) self.averageMovie(movie, fakeShiftsFn, aveMicFn, binFactor=self.binFactor.get(), roi=roi, dark=inputMovies.getDark(), gain=inputMovies.getGain()) self.computePSDs(movie, aveMicFn, outMicFn, outputFnCorrected=self._getPsdJpeg(movie)) self._saveAlignmentPlots(movie) if self._doComputeMicThumbnail(): self.computeThumbnail(outMicFn, outputFn=self._getOutputMicThumbnail( movie)) # This protocols takes control of clean up the temporary movie folder # which is required mainly when using a thread for this extra work self._cleanMovieFolder(movieFolder) if self._useWorkerThread(): thread = Thread(target=_extraWork) thread.start() else: _extraWork() except: print("ERROR: Movie %s failed\n" % movie.getName())
def iterNewInputFiles(self): """ In the case of importing movies, we want to override this method for the case when input are individual frames and we want to create movie stacks before importing. The frames pattern should contains a part delimited by $. The id expression with # is not supported for simplicity. """ if not (self.inputIndividualFrames and self.stackFrames): # In this case behave just as if self.streamingSocket: iterInputFiles = self.iterFilenamesFromSocket() else: iterInputFiles = ProtImportMicBase.iterNewInputFiles(self) for fileName, uniqueFn, fileId in iterInputFiles: yield fileName, uniqueFn, fileId return if self.dataStreaming: if self.streamingSocket: filePaths = [f[0] for f in self.iterFilenamesFromSocket()] else: # Consider only the files that are not changed in the fileTime # delta if processing data in streaming fileTimeout = timedelta(seconds=self.fileTimeout.get()) filePaths = [f for f in self.getMatchFiles() if not self.fileModified(f, fileTimeout)] else: filePaths = self.getMatchFiles() frameRegex = re.compile("(?P<prefix>.+[^\d]+)(?P<frameid>\d+)") # Group all frames for each movie # Key of the dictionary will be the common prefix and the value # will be a list with all frames in that movie frameDict = {} for fileName in filePaths: fnNoExt = pwutils.removeExt(fileName) match = frameRegex.match(fnNoExt) if match is None: raise Exception("Incorrect match of frame files pattern!") d = match.groupdict() prefix = d['prefix'] frameid = int(d['frameid']) if prefix not in frameDict: frameDict[prefix] = [] frameDict[prefix].append((frameid, fileName)) suffix = self.movieSuffix.get() ih = ImageHandler() for movieFn in self.createdStacks: uniqueFn = basename(movieFn) if uniqueFn not in self.importedFiles: yield movieFn, uniqueFn, None def checkMovie(): for k, v in frameDict.iteritems(): moviePath = os.path.dirname(k) movieFn = join(moviePath + "/", self._getUniqueFileName(k) + suffix) if self.writeMoviesInProject: movieFn = self._getExtraPath(os.path.basename(movieFn)) if (movieFn not in self.importedFiles and movieFn not in self.createdStacks and len(v) == self.numberOfIndividualFrames): movieOut = movieFn if movieOut.endswith("mrc"): movieOut += ":mrcs" # By default we will write the movie stacks # unless we are in continue mode and the file exists writeMovie = True if (self.isContinued() and os.path.exists(movieFn)): self.info("Skipping movie stack: %s, seems to be done" % movieFn) writeMovie = False if writeMovie: self.info("Writing movie stack: %s" % movieFn) # Remove the output file if exists pwutils.cleanPath(movieFn) for i, frame in enumerate(sorted(v, key=lambda x: x[0])): frameFn = frame[1] # Frame name stored previously ih.convert(frameFn, (i+1, movieOut)) if self.deleteFrames: pwutils.cleanPath(frameFn) # Now return the newly created movie file as imported file self.createdStacks.add(movieFn) return checkMovie()
def _processMovie(self, movie): inputMovies = self.inputMovies.get() movieFolder = self._getOutputMovieFolder(movie) outputMicFn = self._getRelPath(self._getOutputMicName(movie), movieFolder) outputMovieFn = self._getRelPath(self._getOutputMovieName(movie), movieFolder) movieBaseName = pwutils.removeExt(movie.getFileName()) aveMicFn = movieBaseName + '_uncorrected_avg.mrc' logFile = self._getRelPath(self._getMovieLogFile(movie), movieFolder) a0, aN = self._getRange(movie, 'align') if not self.useMotioncor2: # Get the number of frames and the range to be used # for alignment and sum s0, sN = self._getRange(movie, 'sum') argsDict = { '-crx': self.cropOffsetX.get(), '-cry': self.cropOffsetY.get(), '-cdx': self.cropDimX.get(), '-cdy': self.cropDimY.get(), '-bin': self.binFactor.get(), '-nst': '%d' % a0, '-ned': '%d' % aN, '-nss': '%d' % s0, '-nes': '%d' % sN, '-gpu': self.GPUIDs.get(), '-flg': logFile, } args = '"%s" ' % movie.getBaseName() args += ' '.join( ['%s %s' % (k, v) for k, v in argsDict.iteritems()]) if inputMovies.getGain(): args += ' -fgr "%s"' % inputMovies.getGain() if inputMovies.getDark(): args += ' -fdr "%s"' % inputMovies.getDark() if self.doSaveAveMic: args += ' -fcs "%s" ' % outputMicFn if self.doSaveMovie: args += ' -fct "%s" -ssc 1' % outputMovieFn args += ' ' + self.extraParams.get() program = MOTIONCORR_PATH else: logFileBase = (logFile.replace('0-Full.log', '').replace('0-Patch-Full.log', '')) # default values for motioncor2 are (1, 1) cropDimX = self.cropDimX.get() or 1 cropDimY = self.cropDimY.get() or 1 numbOfFrames = self._getNumberOfFrames(movie) if self.doApplyDoseFilter: preExp, dose = self._getCorrectedDose(inputMovies) else: preExp, dose = 0.0, 0.0 # reset values = 1 to 0 (motioncor2 does it automatically, # but we need to keep this for consistency) if self.patchX.get() == 1: self.patchX.set(0) if self.patchY.get() == 1: self.patchY.set(0) argsDict = { '-OutMrc': '"%s"' % outputMicFn, '-Patch': '%d %d' % (self.patchX, self.patchY), '-MaskCent': '%d %d' % (self.cropOffsetX, self.cropOffsetY), '-MaskSize': '%d %d' % (cropDimX, cropDimY), '-FtBin': self.binFactor.get(), '-Tol': self.tol.get(), '-Group': self.group.get(), '-FmDose': dose, '-Throw': '%d' % a0, '-Trunc': '%d' % (abs(aN - numbOfFrames + 1)), '-PixSize': inputMovies.getSamplingRate(), '-kV': inputMovies.getAcquisition().getVoltage(), '-Gpu': self.GPUIDs.get(), '-LogFile': logFileBase, } if getVersion('MOTIONCOR2') != '03162016': argsDict['-InitDose'] = preExp argsDict['-OutStack'] = 1 if self.doSaveMovie else 0 if self.isSemVersion(): if self.defectFile.get(): argsDict['-DefectFile'] = self.defectFile.get() if self.versionGE( '1.0.1'): # Patch overlap was introduced in 1.0.1 patchOverlap = self.getAttributeValue('patchOverlap', None) if patchOverlap: # 0 or None is False argsDict['-Patch'] += " %d" % patchOverlap if self._supportsMagCorrection() and self.doMagCor: if self.useEst: inputEst = self.inputEst.get().getOutputLog() if getVersion('MOTIONCOR2') == '01302017': input_params = parseMagCorrInput(inputEst) # this version uses stretch parameters as following: # 1/maj, 1/min, -angle argsDict['-Mag'] = '%0.3f %0.3f %0.3f' % ( 1.0 / input_params[1], 1.0 / input_params[2], -1 * input_params[0]) else: # While motioncor2 >=1.0.0 uses estimation params AS IS input_params = parseMagEstOutput(inputEst) argsDict['-Mag'] = '%0.3f %0.3f %0.3f' % ( input_params[1], input_params[2], input_params[0]) else: argsDict['-Mag'] = '%0.3f %0.3f %0.3f' % ( self.scaleMaj, self.scaleMin, self.angDist) ext = pwutils.getExt(movie.getFileName()).lower() if ext in ['.mrc', '.mrcs']: args = ' -InMrc "%s" ' % movie.getBaseName() elif ext in ['.tif', '.tiff']: args = ' -InTiff "%s" ' % movie.getBaseName() else: raise Exception("Unsupported format: %s" % ext) args += ' '.join( ['%s %s' % (k, v) for k, v in argsDict.iteritems()]) if inputMovies.getGain(): args += ' -Gain "%s" ' % inputMovies.getGain() if inputMovies.getDark(): args += ' -Dark "%s"' % inputMovies.getDark() args += ' ' + self.extraParams2.get() program = MOTIONCOR2_PATH try: self.runJob(program, args, cwd=movieFolder, env=getEnviron(self.useMotioncor2)) self._fixMovie(movie) # Compute PSDs outMicFn = self._getExtraPath(self._getOutputMicName(movie)) if not os.path.exists(outMicFn): # if only DW mic is saved outMicFn = self._getExtraPath(self._getOutputMicWtName(movie)) if self.doComputePSD: # Compute uncorrected avg mic roi = [ self.cropOffsetX.get(), self.cropOffsetY.get(), self.cropDimX.get(), self.cropDimY.get() ] fakeShiftsFn = self.writeZeroShifts(movie) self.averageMovie(movie, fakeShiftsFn, aveMicFn, binFactor=self.binFactor.get(), roi=roi, dark=inputMovies.getDark(), gain=inputMovies.getGain()) self.computePSDs(movie, aveMicFn, outMicFn, outputFnCorrected=self._getPsdJpeg(movie)) self._saveAlignmentPlots(movie) if self._doComputeMicThumbnail(): self.computeThumbnail( outMicFn, outputFn=self._getOutputMicThumbnail(movie)) except: print("ERROR: Movie %s failed\n" % movie.getName())