예제 #1
0
 def __init__(self, **kwargs):
     OrderedObject.__init__(self, **kwargs)
     self.name = String('default')
     self.maxCores = Integer()
     self.allowMPI = Boolean()
     self.allowThreads = Boolean()
     self.maxHours = Integer()
예제 #2
0
def rowToParticle(partRow, **kwargs):
    """ Create a Particle from a row of a meta """
    img = em.Particle()
    
    # Provide a hook to be used if something is needed to be 
    # done for special cases before converting image to row
    preprocessImageRow = kwargs.get('preprocessImageRow', None)
    if preprocessImageRow:
        preprocessImageRow(img, partRow)
    
    # Decompose Relion filename
    index, filename = relionToLocation(partRow.getValue(md.RLN_IMAGE_NAME))
    img.setLocation(index, filename)
    
    if partRow.containsLabel(md.RLN_PARTICLE_CLASS):
        img.setClassId(partRow.getValue(md.RLN_PARTICLE_CLASS))
    
    if kwargs.get('readCtf', True):
        img.setCTF(rowToCtfModel(partRow))
        
    # alignment is mandatory at this point, it shoud be check
    # and detected defaults if not passed at readSetOf.. level
    alignType = kwargs.get('alignType') 
    
    if alignType != em.ALIGN_NONE:
        img.setTransform(rowToAlignment(partRow, alignType))
        
    if kwargs.get('readAcquisition', True):
        img.setAcquisition(rowToAcquisition(partRow))
        
    if kwargs.get('magnification', None):
        img.getAcquisition().setMagnification(kwargs.get("magnification"))
    
    setObjId(img, partRow)
    # Read some extra labels
    rowToObject(partRow, img, {},
                extraLabels=IMAGE_EXTRA_LABELS + kwargs.get('extraLabels', []))

    img.setCoordinate(rowToCoordinate(partRow))
    
    # copy micId if available from row to particle
    if partRow.hasLabel(md.RLN_MICROGRAPH_ID):
        img.setMicId(partRow.getValue(md.RLN_MICROGRAPH_ID))
    
    # copy particleId if available from row to particle
    if partRow.hasLabel(md.RLN_PARTICLE_ID):
        img._rlnParticleId = Integer(partRow.getValue(md.RLN_PARTICLE_ID))
    
    # copy particleId if available from row to particle
    if partRow.hasLabel(md.RLN_PARTICLE_RANDOM_SUBSET):
        img._rln_halfId = Integer(partRow.getValue(md.RLN_PARTICLE_RANDOM_SUBSET))
    
    # Provide a hook to be used if something is needed to be 
    # done for special cases before converting image to row
    postprocessImageRow = kwargs.get('postprocessImageRow', None)
    if postprocessImageRow:
        postprocessImageRow(img, partRow)
    return img
    def _getMetrics(self):
        """ Internal method to compute some metrics. """
        # mean values of FSC-Q

        mtd = md.MetaData()
        mtd.read(self._getFileName(MD_MEANS))

        mean = mtd.getValue(MDL_VOLUME_SCORE1, 1)
        meanA = mtd.getValue(MDL_VOLUME_SCORE2, 1)

        # means value for map divided by resolution (FSC-Qr)
        mtd2 = md.MetaData()
        mtd2.read(self._getFileName(MD2_MEANS))

        mean2 = mtd2.getValue(MDL_VOLUME_SCORE1, 1)
        meanA2 = mtd2.getValue(MDL_VOLUME_SCORE2, 1)

        # statistic from fnal pdb with fsc-q
        # Number of atoms greater or less than 0.5
        total_atom = 0
        fscq_greater = 0
        fscq_less = 0
        with open(self._getFileName(PDB_VALUE_FILE)) as f:
            lines_data = f.readlines()
            for j, lin in enumerate(lines_data):

                if (lin.startswith('ATOM') or lin.startswith('HETATM')):

                    total_atom = total_atom + 1
                    fscq_atom = float(lin[54:60])

                    if (fscq_atom > 0.5):
                        fscq_greater = fscq_greater + 1

                    if (fscq_atom < -0.5):
                        fscq_less = fscq_less + 1

        porc_greater = (fscq_greater * 100) / total_atom
        porc_less = (fscq_less * 100) / total_atom

        return {
            'mean': Float(mean),
            'meanA': Float(meanA),
            'mean2': Float(mean2),
            'meanA2': Float(meanA2),
            'total_atom': Integer(total_atom),
            'fscq_greater': Integer(fscq_greater),
            'fscq_less': Integer(fscq_less),
            'porc_greater': Float(porc_greater),
            'porc_less': Float(porc_less)
        }
예제 #4
0
    def compareClassesStep(self, i1, i2):
        set1 = self.inputClasses1.get()
        set2 = self.inputClasses2.get()

        # Compare each pair of class from set1 and set2
        # compute the Jaccard index for each (J = len(intersection) / len(union))
        # Create a list will all pairs indexes and the sort them
        jaccardList = []
        f = open(self._getPath('jaccard.txt'), 'w')
        f.write(
            '; class1 class2 intersection(i) union(i) jaccard index = len(i)/len(u)\n'
        )
        for cls1 in set1:
            ids1 = cls1.getIdSet()
            for cls2 in set2:
                ids2 = cls2.getIdSet()
                inter = len(ids1.intersection(ids2))
                union = len(ids1.union(ids2))
                jaccardIndex = float(inter) / union
                jaccardTuple = (cls1.getObjId(), cls2.getObjId(), inter, union,
                                jaccardIndex)
                f.write('%d %d %d %d %0.3f\n' % jaccardTuple)
                jaccardList.append(jaccardTuple)
        f.close()

        jaccardList.sort(key=lambda e: e[4], reverse=True)
        visitedClasses = set()
        outputFn = self._getPath('consensus.sqlite')
        cleanPath(outputFn)
        outputSet = EMSet(filename=outputFn)

        for clsId1, clsId2, inter, union, jaccardIndex in jaccardList:
            if clsId1 not in visitedClasses:
                visitedClasses.add(clsId1)  # mark as visited
                cls1 = set1[clsId1]
                cls2 = set2[clsId2]
                o = Object()
                o.setObjLabel('classes %d - %d' % (clsId1, clsId2))
                o.class1 = cls1.clone()
                o.class1.id = Integer(clsId1)
                o.class2 = cls2.clone()
                o.class2.id = Integer(clsId2)
                o.jaccard = Float(jaccardIndex)
                o.intersection = Integer(inter)
                o.union = Integer(union)
                outputSet.append(o)

        self._defineOutputs(outputConsensus=outputSet)
예제 #5
0
    def createOutputStep(self):
        from sklearn.manifold import TSNE
        Xdim = self.inputParticles.get().getXDim()
        self.Ts = self.inputParticles.get().getSamplingRate()
        newTs = self.targetResolution.get() * 1.0 / 3.0
        self.newTs = max(self.Ts, newTs)
        self.newXdim = int(Xdim * self.Ts / newTs)
        fnOut = self._getFileName('fnOut')
        mdOut = md.MetaData(fnOut)
        coeffMatrix = np.vstack(mdOut.getColumnValues(md.MDL_SPH_COEFFICIENTS))
        X_tsne_1d = TSNE(n_components=1).fit_transform(coeffMatrix)
        X_tsne_2d = TSNE(n_components=2).fit_transform(coeffMatrix)

        newMdOut = md.MetaData()
        i = 0
        for row in md.iterRows(mdOut):
            newRow = row
            newRow.setValue(md.MDL_SPH_TSNE_COEFF1D, float(X_tsne_1d[i, 0]))
            newRow.setValue(md.MDL_SPH_TSNE_COEFF2D,
                            [float(X_tsne_2d[i, 0]),
                             float(X_tsne_2d[i, 1])])
            if self.newTs != self.Ts:
                coeffs = mdOut.getValue(md.MDL_SPH_COEFFICIENTS,
                                        row.getObjId())
                correctionFactor = self.inputVolume.get().getDim(
                )[0] / self.newXdim
                coeffs = [correctionFactor * coeff for coeff in coeffs]
                newRow.setValue(md.MDL_SPH_COEFFICIENTS, coeffs)
            newRow.addToMd(newMdOut)
            i += 1
        newMdOut.write(fnOut)

        inputSet = self.inputParticles.get()
        partSet = self._createSetOfParticles()

        partSet.copyInfo(inputSet)
        partSet.setAlignmentProj()
        partSet.copyItems(inputSet,
                          updateItemCallback=self._updateParticle,
                          itemDataIterator=md.iterRows(
                              fnOut, sortByLabel=md.MDL_ITEM_ID))
        partSet.L1 = Integer(self.l1.get())
        partSet.L2 = Integer(self.l2.get())
        partSet.Rmax = Integer(self.inputVolume.get().getDim()[0] / 2)

        self._defineOutputs(outputParticles=partSet)
        self._defineTransformRelation(self.inputParticles, partSet)
예제 #6
0
 def createOutputStep(self):
     """ The output is just an Integer. Other protocols can use it in those
         IntParam if it has set allowsPointer=True
     """
     micSet = self.inputMicrographs.get()
     boxSize = Integer(self.particleBoxsize)
     self._defineOutputs(boxsize=boxSize)
     self._defineSourceRelation(micSet, boxSize)
예제 #7
0
    def _createOutputStep(self):
        # New Output would be an Integer
        boxSize = Integer(10)

        if self.iBoxSize.hasValue():
            boxSize.set(2 * int(self.iBoxSize.get()))

        self._defineOutputs(oBoxSize=boxSize)
예제 #8
0
    def _updateItem(self, particle, row):
        self.reader.setParticleTransform(particle, row)
        # FIXME: check if other attrs need saving
        particle._rlnImageOriginalName = String(row.rlnImageOriginalName)
        particle._rlnRandomSubset = Integer(row.rlnRandomSubset)

        newLoc = convert.relionToLocation(row.rlnImageName)
        particle.setLocation(newLoc)
    def readPartsFromMics(self, micList, outputParts):
        """ Read the particles extract for the given list of micrographs
        and update the outputParts set with new items.
        """
        relionToLocation = relion.convert.relionToLocation
        p = Particle()
        p._rlnOpticsGroup = Integer()
        acq = self.getInputMicrographs().getAcquisition()
        # JMRT: Ideally I would like to disable the whole Acquisition for each
        #       particle row, but the SetOfImages will set it again.
        #       Another option could be to disable in the set, but then in
        #       streaming, other protocols might get the wrong optics info
        pAcq = Acquisition(magnification=acq.getMagnification(),
                           voltage=acq.getVoltage(),
                           amplitudeContrast=acq.getAmplitudeContrast(),
                           sphericalAberration=acq.getSphericalAberration())
        p.setAcquisition(pAcq)

        tmp = self._getTmpPath()
        extra = self._getExtraPath()

        for mic in micList:
            posSet = set()
            coordDict = {self._getPos(c): c
                         for c in self.coordDict[mic.getObjId()]}
            del self.coordDict[mic.getObjId()]

            ogNumber = mic.getAttributeValue('_rlnOpticsGroup', 1)

            partsStar = self.__getMicFile(mic, '_extract.star', folder=tmp)
            partsTable = relion.convert.Table(fileName=partsStar)
            stackFile = self.__getMicFile(mic, '.mrcs', folder=tmp)
            endStackFile = self.__getMicFile(mic, '.mrcs', folder=extra)
            pwutils.moveFile(stackFile, endStackFile)

            for part in partsTable:
                pos = (int(float(part.rlnCoordinateX)),
                       int(float(part.rlnCoordinateY)))

                if pos in posSet:
                    print("Duplicate coordinate at: %s, IGNORED. " % str(pos))
                    coord = None
                else:
                    coord = coordDict.get(pos, None)

                if coord is not None:
                    # scale the coordinates according to particles dimension.
                    coord.scale(self.getBoxScale())
                    p.copyObjId(coord)
                    idx, fn = relionToLocation(part.rlnImageName)
                    p.setLocation(idx, endStackFile)
                    p.setCoordinate(coord)
                    p.setMicId(mic.getObjId())
                    p.setCTF(mic.getCTF())
                    p._rlnOpticsGroup.set(ogNumber)
                    outputParts.append(p)
                    posSet.add(pos)
    def createOutputStep(self):
        posDir = self._getExtraPath()
        coordSet = self._createSetOfCoordinates(self.inputMics)
        readSetOfCoordinates(posDir, self.inputMics, coordSet)
        self._defineOutputs(outputCoordinates=coordSet)
        self._defineSourceRelation(self.inputMicrographs, coordSet)

        boxSize = Integer(coordSet.getBoxSize())
        self._defineOutputs(boxsize=boxSize)
        self._defineSourceRelation(self.inputMicrographs.get(), boxSize)
예제 #11
0
    def show(self, form, *params):
        patchValues = [Integer(i) for i in range(32, 130, 8)]

        # Get a data provider from the patchValues to be used in the tree (dialog)
        provider = ListTreeProviderString(patchValues)

        dlg = dialog.ListDialog(form.root, "Paych shape values", provider,
                                "Select one of the size values)")

        # Set the chosen value back to the form
        form.setVar(PATCH_SHAPE, dlg.values[0].get())
    def launchParticlePickGUIStep(self, micFn):
        # Launch the particle picking GUI
        extraDir = self._getExtraPath()
        process = launchSupervisedPickerGUI(micFn, extraDir, self)
        process.wait()
        # generate the discarded output only if there is a good output
        if self.saveDiscarded and exists(self._getPath('coordinates.sqlite')):
            self.createDiscardedStep()

        coordSet = self.getCoords()
        if coordSet:
            boxSize = Integer(coordSet.getBoxSize())
            self._defineOutputs(boxsize=boxSize)
            self._defineSourceRelation(self.inputMicrographs.get(), boxSize)
예제 #13
0
파일: gui.py 프로젝트: liz18/scipion
def saveConfig(filename):
    from pyworkflow.mapper import SqliteMapper
    from pyworkflow.object import String, Integer

    mapper = SqliteMapper(filename)
    o = Config()
    for k, v in globals().iteritems():
        if k.startswith('cfg'):
            if type(v) is str:
                value = String(v)
            else:
                value = Integer(v)
            setattr(o, k, value)
    mapper.insert(o)
    mapper.commit()
예제 #14
0
 def _defineParams(self, form, fullForm=True):
     self._defineParams1(form,"t","Cp")
     if fullForm:
         form.addParam('fitType', params.EnumParam, choices=["Linear","Logarithmic","Relative"], label="Fit mode", default=1,
                       help='Linear: sum (Cobserved-Cpredicted)^2\nLogarithmic: sum(log10(Cobserved)-log10(Cpredicted))^2\n'\
                            "Relative: sum ((Cobserved-Cpredicted)/Cobserved)^2")
         form.addParam('Nexp', params.IntParam, label="Number of exponentials", default=1,
                       help='Number of exponentials to fit')
     else:
         self.fitType=Integer()
         self.fitType.set(1)
         self.Nexp=Integer()
         self.Nexp.set(1)
     form.addParam('bounds', params.StringParam, label="Amplitude and time constant bounds", default="", expertLevel=LEVEL_ADVANCED,
                   help='Bounds for the c_i amplitudes and lambdas.\nExample 1: (0,10);(0,1e-2) -> c1 in (0,10), lambda1 in (0,1e-2)\n'\
                        'Example 2: (0,10);(0,1e-2);(0,1);(0,1e-1) -> c1 in (0,10), lambda1 in (0,1e-2), c2 in (0,1), lambda2 in (0,1e-1)')
     form.addParam('confidenceInterval', params.FloatParam, label="Confidence interval=", default=95, expertLevel=LEVEL_ADVANCED,
                   help='Confidence interval for the fitted parameters')
     if fullForm:
         form.addParam('reportX', params.StringParam, label="Evaluate at X=", default="", expertLevel=LEVEL_ADVANCED,
                       help='Evaluate the model at these X values\nExample 1: [0,5,10,20,40,100]\nExample 2: 0:0.55:10, from 0 to 10 in steps of 0.5')
     else:
         self.reportX=String()
         self.reportX.set("")
예제 #15
0
            def updateItem(item, row):
                micName = getMicName(item)

                if micName not in micDict:
                    raise Exception("Micrograph name (aka micName) '%s' was "
                                    "not found in the 'data_micrographs' table of "
                                    "the input star file: %s"
                                    % (micName, inputStar))

                ogNumber = micDict[micName]

                if not hasattr(item, '_rlnOpticsGroup'):
                    item._rlnOpticsGroup = Integer()

                item._rlnOpticsGroup.set(ogNumber)
예제 #16
0
 def initializeRejDict(self):
     self.discDict = {'defocus': 0,
                           'astigmatism': 0,
                           'singleResolution': 0,
                           '_xmipp_ctfCritFirstZero': 0,
                           '_xmipp_ctfCritfirstZeroRatio': 0,
                           '_xmipp_ctfCritCorr13': 0,
                           '_xmipp_ctfIceness': 0,
                           '_xmipp_ctfCritCtfMargin': 0,
                           '_xmipp_ctfCritNonAstigmaticValidty': 0,
                           'consensusResolution': 0
                           }
     for k in self.discDict:
         setattr(self, "rejBy"+k, Integer(0))
     self._store()
    def _readValidationPklFile(self, fileName):
        self.SUMMARYFILENAME = self._getTmpPath(self.SUMMARYFILENAME)
        command = """import pickle
import collections
import json

def pickleData(file):
    with open(file,"r") as f:
        return pickle.load(f)

# process file {VALIDATIONCRYOEMPKLFILENAME}"
data = pickleData('{VALIDATIONCRYOEMPKLFILENAME}')
dictSummary = collections.OrderedDict()

dictSummary['Rhama_Outliers'] = data.model.geometry.ramachandran.outliers
dictSummary['Rhama_Favored'] = data.model.geometry.ramachandran.favored
dictSummary['Rota_Outliers'] = data.model.geometry.rotamer.outliers
dictSummary['Cbeta_Outliers_n'] = data.model.geometry.c_beta.cbetadev.n_outliers
dictSummary['Clash_score'] = data.model.geometry.clash.score
dictSummary['MolProbity_score'] = data.model.geometry.molprobity_score
""".format(VALIDATIONCRYOEMPKLFILENAME=fileName)

        command += """with open('%s',"w") as f:
    f.write(json.dumps(dictSummary))
""" % (self.SUMMARYFILENAME)

        pythonFileName = self.SUMMARYFILENAME.replace('.txt', '.py')
        # write script file
        with open(pythonFileName, "w") as f:
            f.write(command)

        # execute file with phenix.python
        Plugin.runPhenixProgram("", pythonFileName)

        # read file in scipion python
        with open(self.SUMMARYFILENAME, "r") as f:
            dictSummary = f.read()

        dictSummary = json.loads(dictSummary,
                                 object_pairs_hook=collections.OrderedDict)

        self.ramachandranOutliers = Float(dictSummary['Rhama_Outliers'])
        self.ramachandranFavored = Float(dictSummary['Rhama_Favored'])
        self.rotamerOutliers = Float(dictSummary['Rota_Outliers'])
        self.cbetaOutliers = Integer(dictSummary['Cbeta_Outliers_n'])
        self.clashscore = Float(dictSummary['Clash_score'])
        self.overallScore = Float(dictSummary['MolProbity_score'])
예제 #18
0
 def __init__(self, **kwargs):
     OrderedObject.__init__(self, **kwargs)
     self.name = String()
     # Number of cores from which the queue is mandatory
     # 0 means no mandatory at all
     # 1 will force to launch all jobs through the queue
     self.mandatory = Integer()
     self.queues = None  # List for queue configurations
     self.submitCommand = String()
     # Allow to change the prefix of submission scripts
     # we used by default the ID.job, but in some clusters
     # the job script should start by a letter
     self.submitPrefix = String()
     self.checkCommand = String()
     self.cancelCommand = String()
     self.submitTemplate = String()
     self.jobDoneRegex = String()
예제 #19
0
    def _defineParams(self, form):
        form.addSection('Input')
        form.addParam('inputExperiment',
                      params.PointerParam,
                      label="Input experiment",
                      pointerClass='PKPDExperiment',
                      help='Select an experiment with samples')
        form.addParam(
            'protElimination',
            params.PointerParam,
            label="Elimination rate",
            pointerClass='ProtPKPDEliminationRate',
            help=
            'Select an execution of a protocol estimating the elimination rate'
        )
        form.addParam(
            "absorptionF",
            params.FloatParam,
            label="Absorption fraction",
            default=1,
            help="Between 0 (=no absorption) and 1 (=full absorption)")

        form.addParam(
            'bounds',
            params.StringParam,
            label="Ka, V, [tlag] bounds",
            default="",
            expertLevel=LEVEL_ADVANCED,
            help=
            'Bounds for Ka (absorption constant), V (distribution volume) and optionally tlag.\nExample 1: (0,1e-3);(30,50);(0.1,0.5) -> Ka in (0,1e-3), V in (30,50) and tlag in (0.1,0.5)\n'
        )
        form.addParam('confidenceInterval',
                      params.FloatParam,
                      label="Confidence interval",
                      default=95,
                      expertLevel=LEVEL_ADVANCED,
                      help='Confidence interval for the fitted parameters')
        form.addParam(
            'includeTlag',
            params.BooleanParam,
            label="Include tlag",
            default=True,
            expertLevel=LEVEL_ADVANCED,
            help='Calculate the delay between administration and absorption')
        self.fitType = Integer()  # Logarithmic fit
        self.fitType.set(1)
 def _parseFile(self, fileName):
     with open(fileName, encoding="ISO-8859-1") as f:
         line = f.readline()
         while line:
             words = line.strip().split()
             if len(words) > 1:
                 if (words[0] == 'Ramachandran' and words[1] == 'outliers'):
                     self.ramachandranOutliers = Float(words[3])
                 elif (words[0] == 'favored' and words[1] == '='):
                     self.ramachandranFavored = Float(words[2])
                 elif (words[0] == 'Rotamer' and words[1] == 'outliers'):
                     self.rotamerOutliers = Float(words[3])
                 elif (words[0] == 'C-beta' and words[1] == 'deviations'):
                     self.cbetaOutliers = Integer(words[3])
                 elif (words[0] == 'Clashscore' and words[1] == '='):
                     self.clashscore = Float(words[2])
                 elif (words[0] == 'MolProbity' and words[1] == 'score'):
                     self.overallScore = Float(words[3])
             line = f.readline()
예제 #21
0
    def _updateItem(self, item, row):
        """ Implement this function to do some
        update actions over each single item
        that will be stored in the output Set.
        """
        # Add alignment info from corresponding item on inputAlignment
        inputAlignment = self.inputAlignment.get()
        scale = inputAlignment.getSamplingRate()/self.inputParticles.get().getSamplingRate()

        alignedParticle = inputAlignment[item.getObjId()]
        # If alignment is found for this particle set the alignment info
        # on the output particle, if not do not write that item
        if alignedParticle is not None:
            alignment = alignedParticle.getTransform()
            alignment.scaleShifts(scale, shiftsAppliedBefore=self.shiftsAppliedBefore.get())
            item.setTransform(alignment)

            if self.assignRandomSubsets:
                subset = alignedParticle.getAttributeValue('_rlnRandomSubset', None)
                if subset is not None:
                    item._rlnRandomSubset = Integer(subset)
        else:
            item._appendItem = False
    def _updateOutput(self, tsIdList):
        """ Update the output set with the finished Tilt-series.
        Params:
            :param tsIdList: list of ids of finished tasks.
        """
        ts = self._getTiltSeries(tsIdList[0])
        tsId = ts.getTsId()
        objId = ts.getObjId()
        # Flag to check the first time we save output
        self._createOutput = getattr(self, '_createOutput', True)

        outputSet = self._getOutputSet()

        if outputSet is None:
            # Special case just to update the outputSet status
            # but it only makes sense when there is outputSet
            if not tsIdList:
                return
            outputSet = self._createOutputSet()
        else:
            outputSet.enableAppend()
            self._createOutput = False

        newCTFTomoSeries = CTFTomoSeries()
        newCTFTomoSeries.copyInfo(ts)
        newCTFTomoSeries.setTiltSeries(ts)
        newCTFTomoSeries.setTsId(tsId)
        newCTFTomoSeries.setObjId(objId)

        outputSet.append(newCTFTomoSeries)

        index = 1
        for ti in self._tsDict.getTiList(tsId):
            newCTFTomo = ti._ctfModel
            newCTFTomo.setIndex(Integer(index))
            index += 1
            newCTFTomoSeries.append(newCTFTomo)

        newCTFTomoSeries.calculateDefocusUDeviation()
        newCTFTomoSeries.calculateDefocusVDeviation()

        if not (newCTFTomoSeries.getIsDefocusUDeviationInRange() and
                newCTFTomoSeries.getIsDefocusVDeviationInRange()):
            newCTFTomoSeries.setEnabled(False)

        newCTFTomoSeries.write(properties=False)
        outputSet.update(newCTFTomoSeries)

        if self._createOutput:
            self._defineOutputs(**{self._getOutputName(): outputSet})
            self._defineSourceRelation(self._getInputTs(pointer=True),
                                       outputSet)
            self._createOutput = False
        else:
            outputSet.write()
            self._store(outputSet)

        outputSet.close()
        self._store()

        if self._tsDict.allDone():
            self._coStep.setStatus(STATUS_NEW)
예제 #23
0
    def _selectCTF(self, ctfId):
        # Depending on the flags selected by the user, we set the values of
        # the params to compare with

        def compareValue(ctf, label, comp, crit):
            """ Returns True if the ctf.label NOT complain the crit by comp
            """
            if hasattr(ctf, label):
                if comp == 'lt':
                    discard = getattr(ctf, label).get() < crit
                elif comp == 'bt':
                    discard = getattr(ctf, label).get() > crit
                else:
                    raise Exception("'comp' must be either 'lt' or 'bt'.")
            else:
                print("%s not found. Skipping evaluation on that." % label)
                return False
            if discard:
                self.discDict[label] += 1
            return discard

        minDef, maxDef = self._getDefociValues()
        maxAstig = self._getMaxAstisgmatism()
        minResol = self._getMinResol()

        # TODO: Change this way to get the ctf.
        ctf = self.inputCTF.get()[ctfId]

        defocusU = ctf.getDefocusU()
        defocusV = ctf.getDefocusV()
        astigm = abs(defocusU - defocusV)
        resol = self._getCtfResol(ctf)

        defRangeCrit = (defocusU < minDef or defocusU > maxDef or
                        defocusV < minDef or defocusV > maxDef)
        if defRangeCrit:
            self.discDict['defocus'] += 1

        astigCrit = astigm > maxAstig
        if astigCrit:
            self.discDict['astigmatism'] += 1

        singleResolCrit = resol > minResol
        if singleResolCrit:
            self.discDict['singleResolution'] += 1

        firstCondition = defRangeCrit or astigCrit or singleResolCrit

        consResolCrit = False
        if self.calculateConsensus:
            consResolCrit = self.minConsResol < self._freqResol[ctfId]
            if consResolCrit:
                self.discDict['consensusResolution'] += 1

        secondCondition = False
        if self.useCritXmipp:
            firstZero = self._getCritFirstZero()
            minFirstZero, maxFirstZero = self._getCritFirstZeroRatio()
            corr = self._getCritCorr()
            iceness = self._getIceness()
            ctfMargin = self._getCritCtfMargin()
            minNonAstigmatic, maxNonAstigmatic = \
                self._getCritNonAstigmaticValidity()

            ctfX = self.xmippCTF[ctfId]
            secondCondition = (
                compareValue(ctfX, '_xmipp_ctfCritFirstZero', 'lt', firstZero) or
                compareValue(ctfX, '_xmipp_ctfCritfirstZeroRatio', 'lt', minFirstZero) or
                compareValue(ctfX, '_xmipp_ctfCritfirstZeroRatio', 'bt', maxFirstZero) or
                compareValue(ctfX, '_xmipp_ctfCritCorr13', 'lt', corr) or
                compareValue(ctfX, '_xmipp_ctfIceness', 'bt', iceness) or
                compareValue(ctfX, '_xmipp_ctfCritCtfMargin', 'lt', ctfMargin) or
                compareValue(ctfX, '_xmipp_ctfCritNonAstigmaticValidty', 'lt', minNonAstigmatic) or
                compareValue(ctfX, '_xmipp_ctfCritNonAstigmaticValidty', 'bt', maxNonAstigmatic))

        """ Write to a text file the items that have been done. """
        if firstCondition or consResolCrit or secondCondition:
            fn = self._getCtfSelecFileDiscarded()
            with open(fn, 'a') as f:
                f.write('%d F\n' % ctf.getObjId())
        else:
            if (ctf.isEnabled()):
                fn = self._getCtfSelecFileAccepted()
                with open(fn, 'a') as f:
                    f.write('%d T\n' % ctf.getObjId())
            else:
                fn = self._getCtfSelecFileAccepted()
                with open(fn, 'a') as f:
                    f.write('%d F\n' % ctf.getObjId())

        for k, v in self.discDict.iteritems():
            setattr(self, "rejBy"+k, Integer(v))
        self._store()
    def createOutputStep(self):
        inputMovies = self.inputMovies.get()
        micSet = self._createSetOfMicrographs()
        micSet.copyInfo(inputMovies)
        # Also create a Set of Movies with the alignment parameters
        if self.doSaveMovie:
            movieSet = self._createSetOfMovies()
            movieSet.copyInfo(inputMovies)
            movieSet.cropOffsetX = Integer(self.cropOffsetX)
            movieSet.cropOffsetY = Integer(self.cropOffsetY)
            movieSet.cropDimX = Integer(self.cropDimX)
            movieSet.cropDimY = Integer(self.cropDimY)
            movieSet.sumFrame0 = Integer(self.sumFrame0)
            movieSet.sumFrameN = Integer(self.sumFrameN)

        alMethod = self.alignMethod.get()
        for movie in self.inputMovies.get():
            micName = self._getNameExt(movie.getFileName(), '_aligned', 'mrc')
            metadataName = self._getNameExt(movie.getFileName(), '_aligned',
                                            'xmd')
            plotCartName = self._getNameExt(movie.getFileName(), '_plot_cart',
                                            'png')
            psdCorrName = self._getNameExt(movie.getFileName(),
                                           '_aligned_corrected', 'psd')
            # Parse the alignment parameters and store the log files
            alignedMovie = movie.clone()

            if self.run:
                alignedMovie.setFileName(
                    self._getExtraPath(
                        self._getNameExt(movie.getFileName(), '_aligned',
                                         'mrcs')))
            ####>>>This is wrong. Save an xmipp metadata
            alignedMovie.alignMetaData = String(
                self._getExtraPath(metadataName))
            alignedMovie.plotCart = self._getExtraPath(plotCartName)
            alignedMovie.psdCorr = self._getExtraPath(psdCorrName)
            '''if (alMethod == AL_OPTICAL or
                alMethod == AL_DOSEFGPUOPTICAL or 
                alMethod == AL_CROSSCORRELATIONOPTICAL):
                movieCreatePlot(alignedMovie, True)'''

            if self.doSaveMovie:
                movieSet.append(alignedMovie)

            mic = em.Micrograph()
            # All micrograph are copied to the 'extra' folder after each step
            mic.setFileName(self._getExtraPath(micName))

            # The micName of a micrograph MUST be the same as the original movie
            #             mic.setMicName(micName)
            mic.setMicName(movie.getMicName())

            if (alMethod == AL_OPTICAL or alMethod == AL_DOSEFGPUOPTICAL
                    or alMethod == AL_CROSSCORRELATIONOPTICAL):

                mic.alignMetaData = String(self._getExtraPath(metadataName))
                mic.plotCart = self._getExtraPath(plotCartName)
                movieCreatePlot(mic, True)
                mic.plotCart = em.Image()
                mic.plotCart.setFileName(self._getExtraPath(plotCartName))
            #if alMethod != AL_DOSEFGPU and alMethod != AL_CROSSCORRELATION:
            mic.psdCorr = em.Image()
            mic.psdCorr.setFileName(self._getExtraPath(psdCorrName))
            micSet.append(mic)

            # TODO: Methods for dosefgpu should be transferred to here
            """
            if alMethod == AL_DOSEFGPU:
                # Parse the alignment parameters and store the log files
                alignedMovie = movie.clone()
                logFile = self._getExtraPath(self._getLogFile(movie.getObjId()))
                import pyworkflow.em.packages.dosefgpu as dosefgpu
                alignment = dosefgpu.parseMovieAlignment(logFile)
                alignedMovie.setAlignment(alignment)
                movieSet.append(alignedMovie)
            """
        self._defineOutputs(outputMicrographs=micSet)
        self._defineSourceRelation(self.inputMovies, micSet)
        if self.doSaveMovie:
            self._defineOutputs(outputMovies=movieSet)
        """
예제 #25
0
 def addDiscardedStr(label):
     obj = getattr(self, "rejBy%s" % label, Integer(0))
     number = obj.get()
     return "" if number == 0 else "  (%d discarded)" % number
    def operateStep(self):

        outputSet = self.inputSet.get().create(self._getPath())

        if self.operation.get() == 0:
            # Filter columns
            referenceValue = self.filterValue.get()
            value = self.inputSet.get().getFirstItem().getAttributeValue(
                self.filterColumn.get())

            if isinstance(value, float):
                referenceValue = float(referenceValue)
            elif isinstance(value, int):
                referenceValue = int(referenceValue)

            filterOp = self.filterOp.get()
            if filterOp == 6:
                referenceValue2 = self.filterValue2.get()

                if isinstance(value, float):
                    referenceValue2 = float(referenceValue2)
                elif isinstance(value, int):
                    referenceValue2 = int(referenceValue2)

            for oldEntry in self.inputSet.get():
                value = oldEntry.getAttributeValue(self.filterColumn.get())
                if isinstance(value, Float):
                    value = float(value)
                elif isinstance(value, Integer):
                    value = int(value)

                add = False
                if filterOp == 0:  # ==
                    add = value == referenceValue
                elif filterOp == 1:  # >
                    add = value > referenceValue
                elif filterOp == 2:  # >=
                    add = value > referenceValue
                elif filterOp == 3:  # <
                    add = value < referenceValue
                elif filterOp == 4:  # <=
                    add = value <= referenceValue
                elif filterOp == 5:  # !=
                    add = value != referenceValue
                elif filterOp == 6:  # between
                    add = (value <= referenceValue
                           and value >= referenceValue2)
                elif filterOp == 7:  #startswith
                    add = value.startswith(referenceValue)
                elif filterOp == 8:  # endswith
                    add = value.endswith(referenceValue)
                elif filterOp == 9:  # contains
                    add = referenceValue in value
                elif filterOp == 10:  # does not startswith
                    add = not (value.startswith(referenceValue))
                elif filterOp == 11:  # does not endswith
                    add = not (value.endswith(referenceValue))
                elif filterOp == 12:  # does not contains
                    add = not (referenceValue in value)
                if add:
                    newEntry = self.inputSet.get().ITEM_TYPE()
                    newEntry.copy(oldEntry)
                    outputSet.append(newEntry)

        elif self.operation.get() == 1:
            # Keep columns
            keepList = [x.strip() for x in self.keepColumns.get().split()]

            ignoreList = []
            for name, _ in self.inputSet.get().getFirstItem().getAttributes():
                if not name in keepList:
                    ignoreList.append(name)
            for oldEntry in self.inputSet.get():
                newEntry = self.inputSet.get().ITEM_TYPE()
                newEntry.copy(oldEntry, ignoreAttrs=ignoreList)
                outputSet.append(newEntry)

        elif self.operation.get() == 2:
            # Unique
            found = {}
            for oldEntry in self.inputSet.get():
                value = oldEntry.getAttributeValue(self.filterColumn.get())
                if not value in found:
                    found[value] = True
                    newEntry = self.inputSet.get().ITEM_TYPE()
                    newEntry.copy(oldEntry)
                    outputSet.append(newEntry)

        elif self.operation.get() >= 3 and self.operation.get() <= 6:
            # Top N, Bottom N,Top %, Bottom %
            V = []
            for entry in self.inputSet.get():
                V.append(entry.getAttributeValue(self.filterColumn.get()))
            V.sort()
            op = self.operation.get()
            if op == 3:
                threshold = V[-self.N.get()]
            elif op == 4:
                threshold = V[self.N.get() - 1]
            elif op == 5:
                threshold = V[-ceil(self.percentile.get() / 100 * len(V))]
            elif op == 6:
                threshold = V[ceil(self.percentile.get() / 100 * len(V)) - 1]

            for oldEntry in self.inputSet.get():
                value = oldEntry.getAttributeValue(self.filterColumn.get())
                if (op == 3 or op == 5) and value >= threshold:
                    newEntry = self.inputSet.get().ITEM_TYPE()
                    newEntry.copy(oldEntry)
                    outputSet.append(newEntry)
                elif (op == 4 or op == 6) and value <= threshold:
                    newEntry = self.inputSet.get().ITEM_TYPE()
                    newEntry.copy(oldEntry)
                    outputSet.append(newEntry)

        elif self.operation.get() == 7:
            # Count the number of entries that are the same
            count = {}
            for oldEntry in self.inputSet.get():
                value = oldEntry.getAttributeValue(self.filterColumn.get())
                if not value in count:
                    count[value] = 0
                count[value] += 1

            for oldEntry in self.inputSet.get():
                value = oldEntry.getAttributeValue(self.filterColumn.get())

                newEntry = self.inputSet.get().ITEM_TYPE()
                newEntry.copy(oldEntry)
                newEntry.count = Integer(count[value])
                outputSet.append(newEntry)

        elif self.operation.get() == 8:
            # Intersection between 2 Sets
            secondSet = {}
            for entry in self.secondSet.get():
                value = entry.getAttributeValue(self.filterColumn.get())
                if not value in secondSet:
                    secondSet[value] = True

            for oldEntry in self.inputSet.get():
                value = oldEntry.getAttributeValue(self.filterColumn.get())

                if value in secondSet:
                    newEntry = self.inputSet.get().ITEM_TYPE()
                    newEntry.copy(oldEntry)
                    outputSet.append(newEntry)

        elif self.operation.get() == 9:
            # Sort
            V = []
            newEntries = []
            for entry in self.inputSet.get():
                V.append(entry.getAttributeValue(self.filterColumn.get()))
                newEntry = self.inputSet.get().ITEM_TYPE()
                newEntry.copy(entry)
                newEntry.cleanObjId()
                newEntries.append(newEntry)
            if self.direction.get() == 0:
                idxSort = np.argsort(V)
            else:
                idxSort = np.argsort(-np.asarray(V))

            for idx in idxSort:
                outputSet.append(newEntries[idx])

        if len(outputSet) > 0:
            self._defineOutputs(output=outputSet)
            self._defineSourceRelation(self.inputSet, outputSet)
예제 #27
0
 def __init__(self, **args):
     EMObject.__init__(self, **args)
     self.type = Integer()
     self.type.set(self.SAT)
     self.substanceParams = None
예제 #28
0
    def __init__(self, **args):
        EMObject.__init__(self, **args)
        self.type = Integer()
        self.type.set(self.INTERP)

        self.lungParams = None
 def getEven(self, boxSize):
     return Integer(int(int(boxSize) / 2 + 0.75) * 2)
예제 #30
0
 def __init__(self, **args):
     ProtRefine3D.__init__(self, **args)
     ProtClassify3D.__init__(self, **args)
     self.numberOfCtfGroups = Integer(1)
     self._lastIter = Integer(0)