def test_readDM4(self): """ Check we can read dm4 files (using EMAN) """ micFn = self.dsFormat.getFile('SuperRef_c3-adp-se-xyz-0228_001.dm4') ih = ImageHandler() # Check that we can read the dimensions of the dm4 file: EXPECTED_SIZE = (7676, 7420, 1, 1) self.assertEqual(ih.getDimensions(micFn), EXPECTED_SIZE) # We could even convert to an mrc file: outSuffix = pwutils.replaceBaseExt(micFn, 'mrc') outFn = join('/tmp', outSuffix) print "Converting: \n%s -> %s" % (micFn, outFn) ih.convert(micFn, outFn) self.assertTrue(os.path.exists(outFn)) self.assertTrue(pwutils.getFileSize(outFn) > 0) # Check dimensions are still the same: self.assertEqual(ih.getDimensions(outFn), EXPECTED_SIZE) # Clean up tmp files pwutils.cleanPath(outFn)
def test_convertMovie(self): """Check movie conversion""" movFn = self.dsFormat.getFile('qbeta/qbeta.mrc') + ":mrcs" ih = ImageHandler() # Check that we can read the dimensions of the dm4 file: EXPECTED_SIZE = (4096, 4096, 1, 7) EXPECTED_DT = ImageHandler.DT_USHORT self.assertEqual(ih.getDimensions(movFn), EXPECTED_SIZE) self.assertEqual(ih.getDataType(movFn), EXPECTED_DT) outFn = join('/tmp/qbeta_converted.mrcs') ih.convertStack(movFn, outFn, 2, 6) self.assertTrue(os.path.exists(outFn)) self.assertTrue(pwutils.getFileSize(outFn) > 0) self.assertEqual(ih.getDimensions(outFn), (4096, 4096, 1, 5)) self.assertEqual(ih.getDataType(outFn), EXPECTED_DT) if pwutils.envVarOn('SCIPION_DEBUG_NOCLEAN'): print "Not cleaning output movie: ", outFn else: pwutils.cleanPath(outFn)
def retrieveTrainSets(self): """ Retrieve, link and return a setOfParticles corresponding to the NegativeTrain DeepConsensus trainning set with certain extraction conditions (phaseFlip/invContrast) """ prefixYES = '' prefixNO = 'no' modelType = "negativeTrain_%sPhaseFlip_%sInvert.mrcs" % ( prefixYES if self.doInvert.get() else prefixNO, prefixYES if self.ignoreCTF.get() else prefixNO) modelPath = xmipp3.Plugin.getModel("deepConsensus", modelType) print("Precompiled negative particles found at %s" % (modelPath)) modelFn = self._getTmpPath(modelType) pwutils.createLink(modelPath, modelFn) tmpSqliteSuff = "AddTrain" partSet = self._createSetOfParticles(tmpSqliteSuff) img = SetOfParticles.ITEM_TYPE() imgh = ImageHandler() _, _, _, n = imgh.getDimensions(modelFn) if n > 1: for index in range(1, n + 1): img.cleanObjId() img.setMicId(9999) img.setFileName(modelFn) img.setIndex(index) partSet.append(img) partSet.setAlignment(ALIGN_NONE) cleanPath(self._getPath("particles%s.sqlite" % tmpSqliteSuff)) return partSet
def correctGain(self, movieFn, outputFn, gainFn=None, darkFn=None): """correct a movie with both gain and dark images""" ih = ImageHandler() _, _, z, n = ih.getDimensions(movieFn) numberOfFrames = max(z, n) # in case of wrong mrc stacks as volumes def _readImgFloat(fn): img = None if fn: img = ih.read(fn) img.convert2DataType(ih.DT_FLOAT) return img gainImg = _readImgFloat(gainFn) darkImg = _readImgFloat(darkFn) img = ih.createImage() for i in range(1, numberOfFrames + 1): img.read((i, movieFn)) img.convert2DataType(ih.DT_FLOAT) if darkImg: img.inplaceSubtract(darkImg) if gainImg: img.inplaceMultiply(gainImg) img.write((i, outputFn))
def importVolumesStep(self, pattern, samplingRate): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) # Create a Volume template object vol = Volume() vol.setSamplingRate(self.samplingRate.get()) copyOrLink = self.getCopyOrLink() imgh = ImageHandler() volSet = self._createSetOfVolumes() volSet.setSamplingRate(self.samplingRate.get()) for fileName, fileId in self.iterFiles(): dst = self._getExtraPath(basename(fileName)) copyOrLink(fileName, dst) x, y, z, n = imgh.getDimensions(dst) # First case considers when reading mrc without volume flag # Second one considers single volumes (not in stack) if (z == 1 and n != 1) or (z !=1 and n == 1): vol.setObjId(fileId) vol.setLocation(dst) volSet.append(vol) else: for index in range(1, n+1): vol.cleanObjId() vol.setLocation(index, dst) volSet.append(vol) if volSet.getSize() > 1: self._defineOutputs(outputVolumes=volSet) else: self._defineOutputs(outputVolume=vol)
def importImagesStep(self, pattern, voltage, sphericalAberration, amplitudeContrast, magnification): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) createSetFunc = getattr(self, '_create' + self._outputClassName) imgSet = createSetFunc() imgSet.setIsPhaseFlipped(self.haveDataBeenPhaseFlipped.get()) acquisition = imgSet.getAcquisition() self.fillAcquisition(acquisition) # Call a function that should be implemented by each subclass self.setSamplingRate(imgSet) outFiles = [imgSet.getFileName()] imgh = ImageHandler() img = imgSet.ITEM_TYPE() img.setAcquisition(acquisition) n = 1 copyOrLink = self.getCopyOrLink() for i, (fileName, fileId) in enumerate(self.iterFiles()): dst = self._getExtraPath(basename(fileName)) copyOrLink(fileName, dst) # Handle special case of Imagic images, copying also .img or .hed self.handleImgHed(copyOrLink, fileName, dst) if self._checkStacks: _, _, _, n = imgh.getDimensions(dst) if n > 1: for index in range(1, n + 1): img.cleanObjId() img.setMicId(fileId) img.setFileName(dst) img.setIndex(index) self._addImageToSet(img, imgSet) else: img.setObjId(fileId) img.setFileName(dst) # Fill the micName if img is a Micrograph. self._fillMicName(img, fileName) self._addImageToSet(img, imgSet) outFiles.append(dst) sys.stdout.write("\rImported %d/%d" % (i + 1, self.numberOfFiles)) sys.stdout.flush() print "\n" args = {} outputSet = self._getOutputName() args[outputSet] = imgSet self._defineOutputs(**args) return outFiles
def prepareMask(self,maskObject,fnMask,TsMaskOut,XdimOut): img=ImageHandler() img.convert(maskObject, fnMask) self.runJob('xmipp_image_resize',"-i %s --factor %f"%(fnMask,maskObject.getSamplingRate()/TsMaskOut),numberOfMpi=1) maskXdim, _, _, _ =img.getDimensions((1,fnMask)) if XdimOut!=maskXdim: self.runJob('xmipp_transform_window',"-i %s --size %d"%(fnMask,XdimOut),numberOfMpi=1) self.runJob('xmipp_transform_threshold',"-i %s --select below 0.5 --substitute binarize"%fnMask,numberOfMpi=1)
def importImagesStep(self, pattern, voltage, sphericalAberration, amplitudeContrast, magnification): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) createSetFunc = getattr(self, '_create' + self._outputClassName) imgSet = createSetFunc() imgSet.setIsPhaseFlipped(self.haveDataBeenPhaseFlipped.get()) acquisition = imgSet.getAcquisition() self.fillAcquisition(acquisition) # Call a function that should be implemented by each subclass self.setSamplingRate(imgSet) outFiles = [imgSet.getFileName()] imgh = ImageHandler() img = imgSet.ITEM_TYPE() img.setAcquisition(acquisition) n = 1 copyOrLink = self.getCopyOrLink() for i, (fileName, fileId) in enumerate(self.iterFiles()): dst = self._getExtraPath(basename(fileName)) copyOrLink(fileName, dst) # Handle special case of Imagic images, copying also .img or .hed self.handleImgHed(copyOrLink, fileName, dst) if self._checkStacks: _, _, _, n = imgh.getDimensions(dst) if n > 1: for index in range(1, n+1): img.cleanObjId() img.setMicId(fileId) img.setFileName(dst) img.setIndex(index) imgSet.append(img) else: img.setObjId(fileId) img.setFileName(dst) self._fillMicName(img, fileName) # fill the micName if img is a Micrograph. imgSet.append(img) outFiles.append(dst) sys.stdout.write("\rImported %d/%d" % (i+1, self.numberOfFiles)) sys.stdout.flush() print "\n" args = {} outputSet = self._getOutputName() args[outputSet] = imgSet self._defineOutputs(**args) return outFiles
def test_truncateMask(self): ih = ImageHandler() maskFn = self.dataset.getFile('masks/mask.vol') outFn = join('/tmp', 'mask.vol') ih.truncateMask(maskFn, outFn, newDim=128) EXPECTED_SIZE = (128, 128, 128, 1) self.assertTrue(os.path.exists(outFn)) self.assertTrue(pwutils.getFileSize(outFn) > 0) self.assertEqual(ih.getDimensions(outFn), EXPECTED_SIZE) pwutils.cleanPath(outFn)
def test_readCompressedTIF(self): """ Check we can read tif files """ micFn = self.dsFormat.getFile('c3-adp-se-xyz-0228_200.tif') ih = ImageHandler() # Check that we can read the dimensions of the dm4 file: EXPECTED_SIZE = (7676, 7420, 1, 38) self.assertEqual(ih.getDimensions(micFn), EXPECTED_SIZE) # We could even convert to an mrc file: outSuffix = pwutils.replaceBaseExt(micFn, 'mrc') outFn = join('/tmp', outSuffix) print "Converting: \n%s -> %s" % ((1, micFn), outFn) ih.convert((1, micFn), outFn) self.assertTrue(os.path.exists(outFn)) self.assertTrue(pwutils.getFileSize(outFn) > 0) self.assertEqual(ih.getDimensions(outFn), (7676, 7420, 1, 1)) # Clean up tmp files pwutils.cleanPath(outFn)
def prepareMask(self, maskObject, fnMask, TsMaskOut, XdimOut): img = ImageHandler() img.convert(maskObject, fnMask) self.runJob('xmipp_image_resize', "-i %s --factor %f" % (fnMask, maskObject.getSamplingRate() / TsMaskOut), numberOfMpi=1) maskXdim, _, _, _ = img.getDimensions((1, fnMask)) if XdimOut != maskXdim: self.runJob('xmipp_transform_window', "-i %s --size %d" % (fnMask, XdimOut), numberOfMpi=1) self.runJob('xmipp_transform_threshold', "-i %s --select below 0.5 --substitute binarize" % fnMask, numberOfMpi=1)
def _validate(self): from pyworkflow.em.convert import ImageHandler ci = self.getImportClass() if ci is None: errors = ProtImportMicBase._validate(self) for micFn, _ in self.iterFiles(): imgh = ImageHandler() if imgh.isImageFile(micFn): _, _, z, n = imgh.getDimensions(micFn) if n > 1 or z > 1: errors.append("The protocol not support micrographs stored in stacks. " "If you want to obtain your micrographs individually, " "you can run the following command:\n" "scipion run scipion_directory/scripts/split_stacks.py --files *your files* --ext *extension*") # JMRT: only check the first image, for large dataset # even reading the header can take a while break return errors else: return ci.validateMicrographs()
def _insertMovieStep(self, movie): # Redefine this function to add the shifts and factor # to the processMovieStep function and run properly in parallel with threads #retrive shifts here so there is no conflict #if the object is accessed inside at the same time by multiple threads if self.applyAlignment and movie.hasAlignment(): shifts = movie.getAlignment().getShifts() else: #TODO: I do not think this option is ever used # Read movie dimensions to iterate through each frame movieName = movie.getFileName() imgh = ImageHandler() _, _, _, n = imgh.getDimensions(movieName) shifts = [0] * (2*n) movieStepId = self._insertFunctionStep('processMovieStep', movie.getObjId(), movie.getFileName(), shifts, prerequisites=[]) return movieStepId
def _validate(self): from pyworkflow.em.convert import ImageHandler ci = self.getImportClass() if ci is None: errors = ProtImportMicBase._validate(self) for micFn, _ in self.iterFiles(): imgh = ImageHandler() if imgh.isImageFile(micFn): _, _, z, n = imgh.getDimensions(micFn) if n > 1 or z > 1: errors.append( "The protocol not support micrographs stored in stacks. " "If you want to obtain your micrographs individually, " "you can run the following command:\n" "scipion run scipion_directory/scripts/split_stacks.py --files *your files* --ext *extension*" ) # JMRT: only check the first image, for large dataset # even reading the header can take a while break return errors else: return ci.validateMicrographs()
def importMaskStep(self, path, samplingRate): """ Copy mask from maskPath. """ self.info("Using mask path: '%s'" % path) # Copy the image file into the project dst = self._getExtraPath(basename(path)) pwutils.copyFile(path, dst) # Retrive image dimensions imgh = ImageHandler() _, _, z, n = imgh.getDimensions(dst) # Create a 2D or 3D Mask, consider the case of n>1 # as the case of some volume maps in mrc format if z > 1 or n > 1: mask = VolumeMask() else: mask = Mask() mask.setFileName(dst) mask.setSamplingRate(samplingRate) self._defineOutputs(outputMask=mask)
def importImagesStreamStep(self, pattern, voltage, sphericalAberration, amplitudeContrast, magnification): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) imgSet = self._getOutputSet() if self.isContinued() else None self.importedFiles = set() if imgSet is None: createSetFunc = getattr(self, '_create' + self._outputClassName) imgSet = createSetFunc() elif imgSet.getSize() > 0: # in case of continue imgSet.loadAllProperties() self._fillImportedFiles(imgSet) imgSet.enableAppend() pointerExcludedMovs = getattr(self, 'moviesToExclude', None) if pointerExcludedMovs is not None: excludedMovs = pointerExcludedMovs.get() self._fillImportedFiles(excludedMovs) imgSet.setIsPhaseFlipped(self.haveDataBeenPhaseFlipped.get()) acquisition = imgSet.getAcquisition() self.fillAcquisition(acquisition) # Call a function that should be implemented by each subclass self.setSamplingRate(imgSet) outFiles = [imgSet.getFileName()] imgh = ImageHandler() img = imgSet.ITEM_TYPE() img.setAcquisition(acquisition) n = 1 copyOrLink = self.getCopyOrLink() outputName = self._getOutputName() alreadyWarned = False # Use this flag to warn only once finished = False # this is only used when creating stacks from frame files self.createdStacks = set() i = 0 lastDetectedChange = datetime.now() # Ignore the timeout variables if we are not really in streaming mode if self.dataStreaming: timeout = timedelta(seconds=self.timeout.get()) fileTimeout = timedelta(seconds=self.fileTimeout.get()) else: timeout = timedelta(seconds=5) fileTimeout = timedelta(seconds=5) while not finished: time.sleep(3) # wait 3 seconds before check for new files someNew = False someAdded = False for fileName, uniqueFn, fileId in self.iterNewInputFiles(): someNew = True if self.fileModified(fileName, fileTimeout): continue dst = self._getExtraPath(uniqueFn) self.importedFiles.add(uniqueFn) if ' ' in dst: if not alreadyWarned: self.warning('Warning: your file names have white spaces!') self.warning('Removing white spaces from copies/symlinks.') alreadyWarned = True dst = dst.replace(' ', '') copyOrLink(fileName, dst) self.debug('Importing file: %s' % fileName) self.debug("uniqueFn: %s" % uniqueFn) self.debug("dst Fn: %s" % dst) if self._checkStacks: _, _, _, n = imgh.getDimensions(dst) someAdded = True self.debug('Appending file to DB...') if self.importedFiles: # enable append after first append imgSet.enableAppend() if n > 1: for index in range(1, n+1): img.cleanObjId() img.setMicId(fileId) img.setFileName(dst) img.setIndex(index) self._addImageToSet(img, imgSet) else: img.setObjId(fileId) img.setFileName(dst) # Fill the micName if img is either a Micrograph or a Movie uniqueFn = uniqueFn.replace(' ', '') self.debug("FILENAME TO fillMicName: %s" % uniqueFn) self._fillMicName(img, uniqueFn) self._addImageToSet(img, imgSet) outFiles.append(dst) self.debug('After append. Files: %d' % len(outFiles)) if someAdded: self.debug('Updating output...') self._updateOutputSet(outputName, imgSet, state=imgSet.STREAM_OPEN) self.debug('Update Done.') self.debug('Checking if finished...someNew: %s' % someNew) now = datetime.now() if not someNew: # If there are no new detected files, we should check the # inactivity time elapsed (from last event to now) and # if it is greater than the defined timeout, we conclude # the import and close the output set # Another option is to check if the protocol have some # special stop condition, this can be used to manually stop # some protocols such as import movies finished = (now - lastDetectedChange > timeout or self.streamingHasFinished()) self.debug("Checking if finished:") self.debug(" Now - Last Change: %s" % pwutils.prettyDelta(now - lastDetectedChange)) self.debug("Finished: %s" % finished) else: # If we have detected some files, we should update # the timestamp of the last event lastDetectedChange = now self._updateOutputSet(outputName, imgSet, state=imgSet.STREAM_CLOSED) self._cleanUp() return outFiles
def importImagesStep(self, pattern, voltage, sphericalAberration, amplitudeContrast, magnification): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) createSetFunc = getattr(self, '_create' + self._outputClassName) imgSet = createSetFunc() imgSet.setIsPhaseFlipped(self.haveDataBeenPhaseFlipped.get()) acquisition = imgSet.getAcquisition() self.fillAcquisition(acquisition) # Call a function that should be implemented by each subclass self.setSamplingRate(imgSet) outFiles = [imgSet.getFileName()] imgh = ImageHandler() img = imgSet.ITEM_TYPE() img.setAcquisition(acquisition) n = 1 copyOrLink = self.getCopyOrLink() alreadyWarned = False # Use this flag to warn only once for i, (fileName, fileId) in enumerate(self.iterFiles()): uniqueFn = self._getUniqueFileName(fileName) dst = self._getExtraPath(uniqueFn) if ' ' in dst: if not alreadyWarned: self.warning('Warning: your file names have white spaces!') self.warning('Removing white spaces from copies/symlinks.') alreadyWarned = True dst = dst.replace(' ', '') copyOrLink(fileName, dst) # Handle special case of Imagic images, copying also .img or .hed self.handleImgHed(copyOrLink, fileName, dst) if self._checkStacks: _, _, _, n = imgh.getDimensions(dst) if n > 1: for index in range(1, n + 1): img.cleanObjId() img.setMicId(fileId) img.setFileName(dst) img.setIndex(index) self._addImageToSet(img, imgSet) else: img.setObjId(fileId) img.setFileName(dst) # Fill the micName if img is either Micrograph or Movie self._fillMicName(img, uniqueFn) self._addImageToSet(img, imgSet) outFiles.append(dst) sys.stdout.write("\rImported %d/%d" % (i + 1, self.numberOfFiles)) sys.stdout.flush() print "\n" args = {} outputSet = self._getOutputName() args[outputSet] = imgSet self._defineOutputs(**args) return outFiles
def importImagesStep(self, pattern, voltage, sphericalAberration, amplitudeContrast, magnification): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) createSetFunc = getattr(self, '_create' + self._outputClassName) imgSet = createSetFunc() imgSet.setIsPhaseFlipped(self.haveDataBeenPhaseFlipped.get()) acquisition = imgSet.getAcquisition() self.fillAcquisition(acquisition) # Call a function that should be implemented by each subclass self.setSamplingRate(imgSet) outFiles = [imgSet.getFileName()] imgh = ImageHandler() img = imgSet.ITEM_TYPE() img.setAcquisition(acquisition) n = 1 copyOrLink = self.getCopyOrLink() alreadyWarned = False # Use this flag to warn only once for i, (fileName, fileId) in enumerate(self.iterFiles()): uniqueFn = self._getUniqueFileName(fileName) dst = self._getExtraPath(uniqueFn) if ' ' in dst: if not alreadyWarned: self.warning('Warning: your file names have white spaces!') self.warning('Removing white spaces from copies/symlinks.') alreadyWarned = True dst = dst.replace(' ', '') copyOrLink(fileName, dst) # Handle special case of Imagic images, copying also .img or .hed self.handleImgHed(copyOrLink, fileName, dst) if self._checkStacks: _, _, _, n = imgh.getDimensions(dst) if n > 1: for index in range(1, n+1): img.cleanObjId() img.setMicId(fileId) img.setFileName(dst) img.setIndex(index) self._addImageToSet(img, imgSet) else: img.setObjId(fileId) img.setFileName(dst) # Fill the micName if img is either Micrograph or Movie uniqueFn = uniqueFn.replace(' ', '') self._fillMicName(img, uniqueFn) self._addImageToSet(img, imgSet) outFiles.append(dst) sys.stdout.write("\rImported %d/%d" % (i+1, self.numberOfFiles)) sys.stdout.flush() print "\n" args = {} outputSet = self._getOutputName() args[outputSet] = imgSet self._defineOutputs(**args) return outFiles
def extractunitCell(self, sym, offset=0, cropZ=False): """ extract unit cell from icosahedral phantom using xmipp_i2 symmetry """ # create phantom (3D map) _samplingRate = 1.34 _, outputFile1 = mkstemp(suffix=".mrc") command = "xmipp_phantom_create " args = " -i %s" % self.filename[sym] args += " -o %s" % outputFile1 runJob(None, command, args, env=Plugin.getEnviron()) ccp4header = Ccp4Header(outputFile1, readHeader=True) x, y, z = ccp4header.getDims() t = Transform() if cropZ: _, outputFile2 = mkstemp(suffix=".mrc") args = "-i %s -o %s" % (outputFile1, outputFile2) args += " --corners " args += " %d " % (-x / 2.) args += " %d " % (-y / 2.) args += " %d " % (0.) args += " %d " % (+x / 2.) args += " %d " % (+y / 2.) args += " %d " % (+z / 2.) runJob(None, "xmipp_transform_window", args, env=Plugin.getEnviron()) t.setShifts(0, 0, 0) outputFile = outputFile2 ccp4header = Ccp4Header(outputFile2, readHeader=True) else: t.setShifts(0, 0, 0) outputFile = outputFile1 ccp4header.setSampling(_samplingRate) ccp4header.setOrigin(t.getShifts()) ccp4header.writeHeader() # import volume if cropZ: args = { 'filesPath': outputFile, 'filesPattern': '', 'samplingRate': _samplingRate, 'copyFiles': True, 'setOrigCoord': True, 'x': 90. * _samplingRate, 'y': 90. * _samplingRate, 'z': 0. # x, y, z in Angstroms } else: args = { 'filesPath': outputFile, 'filesPattern': '', 'samplingRate': _samplingRate, 'copyFiles': True, 'setDefaultOrigin': False, } prot = self.newProtocol(ProtImportVolumes, **args) prot.setObjLabel('import volume(%s)' % XMIPP_SYM_NAME[sym]) self.launchProtocol(prot) # execute protocol extract unitCell args = { 'inputVolumes': prot.outputVolume, 'symmetryGroup': sym, 'symmetryOrder': self.symOrder, 'innerRadius': self.innerRadius, 'outerRadius': self.outerRadius, 'expandFactor': .2, 'offset': offset } prot = self.newProtocol(XmippProtExtractUnit, **args) prot.setObjLabel('extract unit cell') self.launchProtocol(prot) # check results ih = ImageHandler() xdim, ydim, zdim, ndim = \ ih.getDimensions(prot.outputVolume.getFileName()) self.assertTrue(abs(xdim - self.box[sym][0]) < 2) self.assertTrue(abs(ydim - self.box[sym][1]) < 2) self.assertTrue(abs(zdim - self.box[sym][2]) < 2) # create pdb fileoutput args = { 'inputStructure': prot.outputVolume, 'maskMode': NMA_MASK_THRE, 'maskThreshold': 0.5, 'pseudoAtomRadius': 1.5 } prot = self.newProtocol(XmippProtConvertToPseudoAtoms, **args) prot.setObjLabel('get pdb') self.launchProtocol(prot) # check results filenamePdb = prot._getPath('pseudoatoms.pdb') self.assertTrue(os.path.isfile(filenamePdb)) # delete temporary files os.remove(self.filename[sym]) os.remove(outputFile)
def _loadTable(self, tableName): """ Load information from tables PREFIX_Classes, PREFIX_Objects. """ tableName = self.tablePrefixes[tableName] BASIC_COLUMNS = [ Column('id', int, renderType=COL_RENDER_ID), Column('enabled', bool, renderType=COL_RENDER_CHECKBOX), Column('label', str), Column('comment', str), Column('creation', str) ] # Load columns from PREFIX_Classes table columns = list(BASIC_COLUMNS) db = SqliteDb() db._createConnection(self._dbName, 1000) db.executeCommand("SELECT * FROM %sClasses;" % tableName) # This will store the images columsn to join # the _index and the _filename imgCols = {} for row in db._iterResults(): renderType = COL_RENDER_NONE colName = row['column_name'] colLabel = row['label_property'] if colLabel != 'self': # Keep track of _index and _filename pairs to mark as renderable images if colLabel.endswith('_index'): imgCols[colLabel.replace('_index', '')] = colName elif colLabel.endswith('_filename'): # TODO: Maybe not all the labels endswith "_filename" # have to be rendered. # for example in the RotSpectra with '_representative._filename' prefix = colLabel.replace('_filename', '') if prefix in imgCols: renderType = COL_RENDER_IMAGE imgCols[colName] = imgCols[prefix] #CTF FIX elif (colLabel.endswith('_psdFile') or colLabel.endswith('_enhanced_psd') or colLabel.endswith('_ctfmodel_quadrant') or colLabel.endswith('_ctfmodel_halfplane')): renderType = COL_RENDER_IMAGE if row['class_name'] == 'Boolean': renderType = COL_RENDER_CHECKBOX columns.append( Column(colName, str, label=colLabel, renderType=renderType)) table = Table(*columns) checkedImgCols = {} # Check if the image columns are volumes ih = ImageHandler() # Populate the table in the DataSet db.executeCommand("SELECT * FROM %sObjects;" % tableName) for row in db._iterResults(): rowDict = dict(row) for k, v in rowDict.iteritems(): if v is None: rowDict[k] = '' # Set the index@filename for images columns values if k in imgCols: colName = imgCols[k] index = rowDict[colName] filename = os.path.join(self.projectPath, rowDict[k]) filepath = filename.replace(":mrc", "") if not checkedImgCols.get(colName, False): if os.path.exists(filepath): #print "Fn to get dims: %s@%s" % (index,filename) x, y, z, n = ih.getDimensions((index, filename)) if z > 1: table.getColumn(k).setRenderType( COL_RENDER_VOLUME) checkedImgCols[colName] = True if index: rowDict[k] = '%06d@%s' % (index, filename) table.addRow(row['id'], **rowDict) return table
def _processMovie(self, movieId, movieName, movieFolder, shifts): ###pasar shifts movieName = os.path.join(movieFolder, movieName) boxSize = self.boxSize.get() # Read movie dimensions to iterate through each frame imgh = ImageHandler() x, y, z, n = imgh.getDimensions(movieName) first = self.firstFrame.get() if first <= 1: first = 1 last = self.lastFrame.get() if last <= 0 or last >= n: last = n numberOfFrames = last - first + 1 if shifts is None: frames = max(z, n) shifts = [0] * (2 * frames) stkIndex = 0 movieStk = self._getMovieName(movieId, '.stk') movieMdFile = self._getMovieName(movieId, '.xmd') movieMd = md.MetaData() frameMd = md.MetaData() frameMdImages = md.MetaData() frameRow = md.Row() for frame in range(first, last + 1): # Get the frame shifts index = frame - first shiftX = shifts[2 * index] shiftY = shifts[2 * index + 1] frameRoot = os.path.join(movieFolder, 'frame_%02d' % frame) frameName = frameRoot + '.mrc' frameMdFile = frameRoot + '.xmd' framePosFile = frameRoot + '_coordinates.xmd' coordinatesName = frameRoot + '_coordinates.xmd' hasCoordinates = self._writeXmippPosFile(movieId, movieName, coordinatesName, shiftX, shiftY) if hasCoordinates: self.info("Writing frame: %s" % frameName) #TODO: there is no need to write the frame and then operate #the input of the first operation should be the movie imgh.convert(tuple([frame, movieName]), frameName) if self.doRemoveDust: self.info("Removing Dust") self._runNoDust(frameName) self.info("Extracting particles") frameImages = frameRoot + '_images' args = '-i %(frameName)s --pos %(coordinatesName)s ' \ '-o %(frameRoot)s --Xdim %(boxSize)d' % locals() if self.doInvert: args += " --invert" args += " --downsampling %f " % self.factor self.runJob('xmipp_micrograph_scissor', args) cleanPath(frameName) frameStk = frameRoot + '.stk' self.info("Combining particles into one stack.") frameMdImages.read(frameMdFile) frameMd.read('particles@%s' % framePosFile) frameMd.merge(frameMdImages) for objId in frameMd: stkIndex += 1 frameRow.readFromMd(frameMd, objId) location = xmippToLocation(frameRow.getValue(md.MDL_IMAGE)) newLocation = (stkIndex, movieStk) imgh.convert(location, newLocation) # Fix the name to be accesible from the Project directory # so we know that the movie stack file will be moved # to final particles folder newImageName = '%d@%s' % newLocation frameRow.setValue(md.MDL_IMAGE, newImageName) frameRow.setValue(md.MDL_MICROGRAPH_ID, long(movieId)) frameRow.setValue(md.MDL_MICROGRAPH, str(movieId)) frameRow.setValue(md.MDL_FRAME_ID, long(frame)) frameRow.setValue(md.MDL_PARTICLE_ID, frameRow.getValue(md.MDL_ITEM_ID)) frameRow.writeToMd(movieMd, movieMd.addObject()) movieMd.addItemId() movieMd.write(movieMdFile) cleanPath(frameStk) if self.doNormalize: self._runNormalize(movieStk, numberOfFrames)
def _loadTable(self, tableName): """ Load information from tables PREFIX_Classes, PREFIX_Objects. """ tableName = self.tablePrefixes[tableName] BASIC_COLUMNS = [Column('id', int, renderType=COL_RENDER_ID), Column('enabled', bool ,renderType=COL_RENDER_CHECKBOX), Column('label', str), Column('comment', str), Column('creation', str)] # Load columns from PREFIX_Classes table columns = list(BASIC_COLUMNS) db = SqliteDb() db._createConnection(self._dbName, 1000) db.executeCommand("SELECT * FROM %sClasses;" % tableName) # This will store the images columsn to join # the _index and the _filename imgCols = {} for row in db._iterResults(): renderType = COL_RENDER_NONE colName = row['column_name'] colLabel = row['label_property'] if colLabel != 'self': # Keep track of _index and _filename pairs to mark as renderable images if colLabel.endswith('_index'): imgCols[colLabel.replace('_index', '')] = colName elif colLabel.endswith('_filename'): # TODO: Maybe not all the labels endswith "_filename" # have to be rendered. # for example in the RotSpectra with '_representative._filename' prefix = colLabel.replace('_filename', '') if prefix in imgCols: renderType = COL_RENDER_IMAGE imgCols[colName] = imgCols[prefix] #CTF FIX elif (colLabel.endswith('_psdFile') or colLabel.endswith('_enhanced_psd') or colLabel.endswith('_ctfmodel_quadrant') or colLabel.endswith('_ctfmodel_halfplane')): renderType = COL_RENDER_IMAGE if row['class_name'] == 'Boolean': renderType = COL_RENDER_CHECKBOX columns.append(Column(colName, str, label=colLabel, renderType=renderType)) table = Table(*columns) checkedImgCols = {} # Check if the image columns are volumes ih = ImageHandler() # Populate the table in the DataSet db.executeCommand("SELECT * FROM %sObjects;" % tableName) for row in db._iterResults(): rowDict = dict(row) for k, v in rowDict.iteritems(): if v is None: rowDict[k] = '' # Set the index@filename for images columns values if k in imgCols: colName = imgCols[k] index = rowDict[colName] filename = os.path.join(self.projectPath, rowDict[k]) filepath = filename.replace(":mrc", "") if not checkedImgCols.get(colName, False): if os.path.exists(filepath): #print "Fn to get dims: %s@%s" % (index,filename) x, y, z, n = ih.getDimensions((index, filename)) if z > 1: table.getColumn(k).setRenderType(COL_RENDER_VOLUME) checkedImgCols[colName] = True if index: rowDict[k] = '%06d@%s' % (index, filename) table.addRow(row['id'], **rowDict) return table
def __applyTransform(suffix, pdbFileName, shift, angles, sampling): """ auxiliary function, transform PDB and 3dmap files""" # create a Scipion transformation matrix from numpy import deg2rad rotation_matrix = euler_matrix(deg2rad(angles[0]), deg2rad(angles[1]), deg2rad(angles[2]), 'szyz') translation = translation_matrix(shift) M = concatenate_matrices(rotation_matrix, translation) # apply it to the pdb file # if rotation move to center aSH = AtomicStructHandler(pdbFileName) if (angles[0] != 0. or angles[1] != 0. or angles[2] != 0.): from pyworkflow.em.convert import ImageHandler ih = ImageHandler() x, y, z, n = ih.getDimensions("emd_%s.map" % EMDBID) x /= 2. y /= 2. z /= 2. localShift = [-x, -y, -z] rotation_matrix = euler_matrix(0., 0., 0., 'szyz') translation = translation_matrix(localShift) localM = concatenate_matrices(rotation_matrix, translation) aSH.transform(localM, sampling=sampling) aSH.transform(M, sampling=sampling) if (angles[0] != 0. or angles[1] != 0. or angles[2] != 0.): localShift = [x, y, z] rotation_matrix = euler_matrix(0., 0., 0., 'szyz') translation = translation_matrix(localShift) localM = concatenate_matrices(rotation_matrix, translation) aSH.transform(localM, sampling=sampling) aSH.write("%s_%s_transformed.ent" % (suffix, PDBID.lower())) # get equivalent xmipp transformation shift, angles = __getXmippEulerAngles(M) # shift 3map and set sampling __runXmippProgram("xmipp_transform_geometry", '-i emd_%s.map ' '-o %s_emd_%s_transform.map ' '--interp linear ' '--shift %f %f %f ' '--rotate_volume euler %f %f %f ' % ( EMDBID, suffix, EMDBID, shift[0], shift[1], shift[2], angles[0], angles[1], angles[2] ) ) header = Ccp4Header("%s_emd_%s_transform.map" % (suffix, EMDBID), readHeader=True) header.setSampling(sampling) # put the sampling back, xmipp_transform_geometry erased it header.writeHeader() # view the results with chimera from pyworkflow.em.viewers import Chimera args = "%s %s %s %s" % ( pdbFileName, "emd_%s.map" % EMDBID, "%s_%s_transformed.ent" % (suffix, PDBID.lower()), "%s_emd_%s_transform.map" % (suffix, EMDBID) ) Chimera.runProgram(args)
def importImagesStreamStep(self, pattern, voltage, sphericalAberration, amplitudeContrast, magnification): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) imgSet = self._getOutputSet() if self.isContinued() else None self.importedFiles = set() if imgSet is None: createSetFunc = getattr(self, '_create' + self._outputClassName) imgSet = createSetFunc() elif imgSet.getSize() > 0: # in case of continue imgSet.loadAllProperties() self._fillImportedFiles(imgSet) imgSet.enableAppend() pointerExcludedMovs = getattr(self, 'moviesToExclude', None) if pointerExcludedMovs is not None: excludedMovs = pointerExcludedMovs.get() self._fillImportedFiles(excludedMovs) imgSet.setIsPhaseFlipped(self.haveDataBeenPhaseFlipped.get()) acquisition = imgSet.getAcquisition() self.fillAcquisition(acquisition) # Call a function that should be implemented by each subclass self.setSamplingRate(imgSet) outFiles = [imgSet.getFileName()] imgh = ImageHandler() img = imgSet.ITEM_TYPE() img.setAcquisition(acquisition) n = 1 copyOrLink = self.getCopyOrLink() outputName = self._getOutputName() finished = False # this is only used when creating stacks from frame files self.createdStacks = set() i = 0 lastDetectedChange = datetime.now() # Ignore the timeout variables if we are not really in streaming mode if self.dataStreaming: timeout = timedelta(seconds=self.timeout.get()) fileTimeout = timedelta(seconds=self.fileTimeout.get()) else: timeout = timedelta(seconds=5) fileTimeout = timedelta(seconds=5) while not finished: time.sleep(3) # wait 3 seconds before check for new files someNew = False someAdded = False for fileName, uniqueFn, fileId in self.iterNewInputFiles(): someNew = True if self.fileModified(fileName, fileTimeout): continue dst = self._getExtraPath(uniqueFn) self.importedFiles.add(uniqueFn) copyOrLink(fileName, dst) self.debug('Importing file: %s' % fileName) self.debug("uniqueFn: %s" % uniqueFn) self.debug("dst Fn: %s" % dst) if self._checkStacks: _, _, _, n = imgh.getDimensions(dst) someAdded = True self.debug('Appending file to DB...') if self.importedFiles: # enable append after first append imgSet.enableAppend() if n > 1: for index in range(1, n + 1): img.cleanObjId() img.setMicId(fileId) img.setFileName(dst) img.setIndex(index) self._addImageToSet(img, imgSet) else: img.setObjId(fileId) img.setFileName(dst) # Fill the micName if img is either a Micrograph or a Movie self.debug("FILENAME TO fillMicName: %s" % uniqueFn) self._fillMicName(img, uniqueFn) self._addImageToSet(img, imgSet) outFiles.append(dst) self.debug('After append. Files: %d' % len(outFiles)) if someAdded: self.debug('Updating output...') self._updateOutputSet(outputName, imgSet, state=imgSet.STREAM_OPEN) self.debug('Update Done.') self.debug('Checking if finished...someNew: %s' % someNew) now = datetime.now() if not someNew: # If there are no new detected files, we should check the # inactivity time elapsed (from last event to now) and # if it is greater than the defined timeout, we conclude # the import and close the output set # Another option is to check if the protocol have some # special stop condition, this can be used to manually stop # some protocols such as import movies finished = (now - lastDetectedChange > timeout or self.streamingHasFinished()) self.debug("Checking if finished:") self.debug(" Now - Last Change: %s" % pwutils.prettyDelta(now - lastDetectedChange)) self.debug("Finished: %s" % finished) else: # If we have detected some files, we should update # the timestamp of the last event lastDetectedChange = now self._updateOutputSet(outputName, imgSet, state=imgSet.STREAM_CLOSED) self._cleanUp() return outFiles
def _processMovie(self, movieId, movieName, movieFolder, shifts):###pasar shifts movieName = os.path.join(movieFolder, movieName) boxSize = self.boxSize.get() # Read movie dimensions to iterate through each frame imgh = ImageHandler() x, y, z, n = imgh.getDimensions(movieName) first = self.firstFrame.get() if first <= 1: first = 1 last = self.lastFrame.get() if last <= 0 or last >= n: last = n numberOfFrames = last - first + 1 stkIndex = 0 movieStk = self._getMovieName(movieId, '.stk') movieMdFile = self._getMovieName(movieId, '.xmd') movieMd = md.MetaData() frameMd = md.MetaData() frameMdImages = md.MetaData() frameRow = md.Row() for frame in range(first, last+1): # Get the frame shifts index = frame - first shiftX = shifts[2*index] shiftY = shifts[2*index+1] frameRoot = os.path.join(movieFolder, 'frame_%02d' % frame) frameName = frameRoot + '.mrc' frameMdFile = frameRoot + '.xmd' framePosFile = frameRoot + '_coordinates.xmd' coordinatesName = frameRoot + '_coordinates.xmd' hasCoordinates = self._writeXmippPosFile(movieId, movieName, coordinatesName, shiftX, shiftY) if hasCoordinates: self.info("Writing frame: %s" % frameName) #TODO: there is no need to write the frame and then operate #the input of the first operation should be the movie imgh.convert(tuple([frame, movieName]), frameName) if self.doRemoveDust: self.info("Removing Dust") self._runNoDust(frameName) self.info("Extracting particles") frameImages = frameRoot + '_images' args = '-i %(frameName)s --pos %(coordinatesName)s ' \ '-o %(frameRoot)s --Xdim %(boxSize)d' % locals() if self.doInvert: args += " --invert" args += " --downsampling %f " % self.factor self.runJob('xmipp_micrograph_scissor', args) cleanPath(frameName) frameStk = frameRoot + '.stk' self.info("Combining particles into one stack.") frameMdImages.read(frameMdFile) frameMd.read('particles@%s' % framePosFile) frameMd.merge(frameMdImages) for objId in frameMd: stkIndex += 1 frameRow.readFromMd(frameMd, objId) location = xmippToLocation(frameRow.getValue(md.MDL_IMAGE)) newLocation = (stkIndex, movieStk) imgh.convert(location, newLocation) # Fix the name to be accesible from the Project directory # so we know that the movie stack file will be moved # to final particles folder newImageName = '%d@%s' % newLocation frameRow.setValue(md.MDL_IMAGE, newImageName) frameRow.setValue(md.MDL_MICROGRAPH_ID, long(movieId)) frameRow.setValue(md.MDL_MICROGRAPH, str(movieId)) frameRow.setValue(md.MDL_FRAME_ID, long(frame)) frameRow.setValue(md.MDL_PARTICLE_ID, frameRow.getValue(md.MDL_ITEM_ID)) frameRow.writeToMd(movieMd, movieMd.addObject()) movieMd.addItemId() movieMd.write(movieMdFile) cleanPath(frameStk) if self.doNormalize: self._runNormalize(movieStk, numberOfFrames)