예제 #1
0
    def _copyFile(self, inputFile):
        """ Copy file to the destination folder and return new filename. """
        if inputFile not in self._processed:
            pwutils.copyFile(os.path.join(self._src, inputFile), self._dst)
            self._processed.add(inputFile)

        return os.path.join(self._dst, os.path.basename(inputFile))
예제 #2
0
    def importMaskStep(self, path, samplingRate):
        """ Copy mask from maskPath.
        """
        self.info("Using mask path: '%s'" % path)

        # Copy the image file into the project
        dst = self._getExtraPath(basename(path))
        dst, cleaned = cleanFileName(dst)
        pwutils.copyFile(path, dst)

        # Retrieve image dimensions
        imgh = ImageHandler()
        _, _, z, n = imgh.getDimensions(dst)

        # Create a 2D or 3D Mask, consider the case of n>1
        # as the case of some volume maps in mrc format
        if z > 1 or n > 1:
            mask = VolumeMask()
        else:
            mask = Mask()

        mask.setFileName(dst)
        mask.setSamplingRate(samplingRate)

        self._defineOutputs(**{self._possibleOutputs.outputMask.name: mask})
    def importCoordinatesStep(self, samplingRate):
        importTomograms = self.importTomograms.get()
        suffix = self._getOutputSuffix(SetOfCoordinates3D)
        coordsSet = self._createSetOfCoordinates3D(importTomograms, suffix)
        coordsSet.setSamplingRate(samplingRate)
        coordsSet.setPrecedents(importTomograms)
        coordsSet.setBoxSize(self.boxSize.get())

        if self.getImportFrom() == IMPORT_FROM_SQLITE:
            sqliteFile = self.getMatchFiles()[0]
            newFileName = self._getPath(basename(sqliteFile))
            copyFile(sqliteFile, newFileName)
            coordsSet._mapperPath.set('%s, %s' % (newFileName, ''))
            coordsSet.load()
        else:
            ci = self.getImportClass()
            for tomo in importTomograms.iterItems():
                tomoName = basename(os.path.splitext(tomo.getFileName())[0])
                for coordFile, fileId in self.iterFiles():
                    fileName = basename(os.path.splitext(coordFile)[0])
                    if tomo is not None and tomoName == fileName:
                        # Parse the coordinates in the given format for this micrograph
                        if self.getImportFrom() == IMPORT_FROM_EMAN or self.getImportFrom() == IMPORT_FROM_TXT:
                            def addCoordinate(coord, x, y, z):
                                coord.setVolume(tomo.clone())
                                coord.setPosition(x, y, z, const.BOTTOM_LEFT_CORNER)
                                coordsSet.append(coord)
                            ci.importCoordinates3D(coordFile, addCoordinate)
                        elif self.getImportFrom() == IMPORT_FROM_DYNAMO:
                            ci(coordFile, coordsSet, tomo.clone())

        args = {}
        args[self.OUTPUT_PREFIX] = coordsSet
        self._defineOutputs(**args)
        self._defineSourceRelation(self.importTomograms, coordsSet)
    def testFlipAssessment(self):
        """ Test the method used to """

        mrcFile = self.ds.getFile('micrographs/006.mrc')

        # test wrong ispg value (0) in mrc file
        self.assertTrue(convert.needToFlipOnY(mrcFile),
                        "needToFlipOnY wrong for bad mrc.")

        # test non mrc file
        self.assertFalse(convert.needToFlipOnY('dummy.foo'),
                         "needToFlipOnY wrong for non mrc.")

        # Test right ispg value (0 in mrc file)
        # Copy 006
        goodMrc = self.getOutputPath('good_ispg.mrc')
        pwutils.copyFile(mrcFile, goodMrc)

        # Change the ISPG value in the file header
        header = Ccp4Header(goodMrc, readHeader=True)
        header.setISPG(0)
        header.writeHeader()

        # test good mrc file
        self.assertFalse(convert.needToFlipOnY(goodMrc),
                         "needToFlipOnY wrong for good mrc.")
예제 #5
0
def main():
    # Get arguments.
    args = get_parser().parse_args()

    setObj = EMSet(filename=args.setFile)
    firstItem = setObj.getFirstItem()
    root = pwutils.findRootFrom(args.setFile, firstItem.getFileName())
    print("ROOT: ", root)

    if args.output_text:
        with open(args.output, 'w') as f:
            for movie in setObj:
                f.write('%s\n' % movie.getFileName())
    elif args.print:
        for item in setObj:
            print(item.getFileName())
    elif args.copy:
        for item in setObj:
            fn = os.path.join(root, item.getFileName())
            print('Copying %s to %s' % (fn, args.output))
            pwutils.copyFile(fn, args.output)

    elif args.check_dims:
        from pwem.emlib.image import ImageHandler
        ih = ImageHandler()

        counter = {}

        for item in setObj:
            fn = os.path.join(root, item.getFileName())
            dims = ih.getDimensions(fn)
            counter[dims] = 1 if dims not in counter else counter[dims] + 1
            print('%s: %s' % (os.path.basename(fn), dims))

        pwutils.prettyDict(counter)
    def _mergeAllParFiles(self, iterN, ref, numberOfBlocks):
        """ This method merge all parameters files that have been created in a refineIterStep """

        file2 = self._getFileName('output_par_class', iter=iterN, ref=ref)
        if numberOfBlocks != 1:
            f2 = open(file2, 'w+')

            for block in range(1, numberOfBlocks + 1):
                file1 = self._getFileName('output_par_block_class',
                                          block=block,
                                          iter=iterN,
                                          ref=ref)
                f1 = open(file1)

                for l in f1:
                    if not l.startswith('C'):
                        f2.write(l)
                f1.close()
            f2.close()
        else:
            file1 = self._getFileName('output_par_block_class',
                                      block=1,
                                      iter=iterN,
                                      ref=ref)
            copyFile(file1, file2)
예제 #7
0
    def createOutputStep(self, atomStructPath):
        """ Copy the PDB structure and register the output object.
        """
        if not exists(atomStructPath):
            raise Exception("Atomic structure not found at *%s*" %
                            atomStructPath)

        baseName = basename(atomStructPath)
        localPath = abspath(self._getExtraPath(baseName))

        if str(atomStructPath) != str(localPath):  # from local file
            pwutils.copyFile(atomStructPath, localPath)

        localPath = relpath(localPath)

        pdb = emobj.AtomStruct()
        volume = self.inputVolume.get()

        # if a volume exists assign it to the pdb object
        # IMPORTANT: we DO need "if volume is not None"
        # because we need to persist the pdb object
        # before we can make the last source relation
        if volume is not None:
            pdb.setVolume(volume)

        pdb.setFileName(localPath)
        self._defineOutputs(outputPdb=pdb)

        if volume is not None:
            self._defineSourceRelation(volume, pdb)
예제 #8
0
def runProjectionMatching(self, iterN, refN, args, **kwargs):
    """ Loop over all CTF groups and launch a projection matching for each one.
    Note: Iterate ctf groups in reverse order to have same order as 
          in add_to docfiles from angular_class_average. #FIXME: check why reverse order is needed
    """
    projMatchRootName = self._getFileName('projMatchRootNames', iter=iterN, ref=refN)
    refname = self._getFileName('projectLibraryStk', iter=iterN, ref=refN)
    
    numberOfCtfGroups = self.numberOfCtfGroups.get()
#     ctfGroupName = self._getPath(self.ctfGroupDirectory, '%(ctfGroupRootName)s')
    #remove output metadata
    cleanPath(projMatchRootName)
    
    for ctfN in reversed(list(self.allCtfGroups())):
        self._log.info('CTF group: %d/%d' % (ctfN, numberOfCtfGroups))
        ctfArgs = ' -i %(inputdocfile)s -o %(outputname)s --ref %(refname)s'        
        
        inputdocfile = self._getBlockFileName(ctfBlockName, ctfN, self.docFileInputAngles[iterN-1])
        outputname = self._getBlockFileName(ctfBlockName, ctfN, projMatchRootName)
        baseTxtFile = removeExt(refname)
        neighbFile = baseTxtFile + '_sampling.xmd'
        cleanPath(neighbFile)
        neighbFileb = baseTxtFile + '_group' + str(ctfN).zfill(self.FILENAMENUMBERLENGTH) + '_sampling.xmd'
        copyFile(neighbFileb, neighbFile)
        print "copied file ", neighbFileb, "to", neighbFile
        if self.doCTFCorrection and self._referenceIsCtfCorrected[iterN]:
            ctfArgs += ' --ctf %s' % self._getBlockFileName('', ctfN, self._getFileName('stackCTFs'))
    
        progArgs = ctfArgs % locals() + args
        self.runJob('xmipp_angular_projection_matching', progArgs, **kwargs)
예제 #9
0
def runFilterVolumeStep(self, iterN, refN, constantToAddToFiltration):
    reconstructedVolume = self._getFileName('reconstructedFileNamesIters',
                                            iter=iterN,
                                            ref=refN)
    reconstructedFilteredVolume = self.reconstructedFilteredFileNamesIters[
        iterN][refN]
    if self.useFscForFilter:
        if self._fourierMaxFrequencyOfInterest[iterN + 1] == -1:
            fourierMaxFrequencyOfInterest = self.resolSam / self._getFourierMaxFrequencyOfInterest(
                iterN, refN)
            print "el valor de la resolucion es :", self._getFourierMaxFrequencyOfInterest(
                iterN, refN)
            filterInPxAt = fourierMaxFrequencyOfInterest + constantToAddToFiltration
        else:
            filterInPxAt = constantToAddToFiltration

    if (filterInPxAt > 0.5):
        copyFile(reconstructedVolume, reconstructedFilteredVolume)
    else:
        args = ' -i %(volume)s -o %(filteredVol)s --fourier low_pass %(filter)s'
        params = {
            'volume': reconstructedVolume,
            'filteredVol': reconstructedFilteredVolume,
            'filter': filterInPxAt
        }
        self.runJob("xmipp_transform_filter", args % params)
예제 #10
0
 def _splitParFile(self, iterN, ref, numberOfBlocks):
     """ This method split the parameter files that have been previously merged """
     
     prevIter = iterN -1
     file1 = self._getFileName('output_par_class', iter=prevIter, ref=ref)
     if numberOfBlocks != 1:
         for block in range(1, numberOfBlocks + 1):
             f1 = open(file1)
             file2 = self._getFileName('input_par_block_class',prevIter=prevIter, iter=iterN, ref=ref, block=block)
             f2 = open(file2, 'w+')
             _, finalPart = self._particlesInBlock(block, numberOfBlocks)
             
             for l in f1:
                 
                 if l.startswith('C'):
                     f2.write(l)
                 else:
                     split = l.split()
                     numPart = int(''.join(split[:1]))
                     
                     if numPart <= finalPart:
                         f2.write(l)
                     else:
                         break
             f2.close()
             f1.close()
     else:
         file2 = self._getFileName('input_par_block_class',prevIter=prevIter, iter=iterN, ref=ref, block=1)
         copyFile(file1, file2)
예제 #11
0
    def writeCtfStarStep(self):
        inputCTF = self.inputCTF.get()

        if self.micrographSource == 0: # same as CTF estimation
            ctfMicSet = inputCTF.getMicrographs()
        else:
            ctfMicSet = self.inputMicrographs.get()

        micSet = SetOfMicrographs(filename=':memory:')

        psd = inputCTF.getFirstItem().getPsdFile()
        hasPsd = psd and os.path.exists(psd)

        if hasPsd:
            psdPath = self._getPath('PSD')
            pwutils.makePath(psdPath)
            print "Writing PSD files to %s" % psdPath

        for ctf in inputCTF:
            # Get the corresponding micrograph
            mic = ctfMicSet[ctf.getObjId()]
            if mic is None:
                print("Skipping CTF id: %s, it is missing from input "
                      "micrographs. " % ctf.getObjId())
                continue

            micFn = mic.getFileName()
            if not os.path.exists(micFn):
                print "Skipping micrograph %s, it does not exists. " % micFn
                continue

            mic2 = mic.clone()
            mic2.setCTF(ctf)
            if hasPsd:
                psdFile = ctf.getPsdFile()
                newPsdFile = join(psdPath, '%s_psd.mrc' % mic.getMicName())
                if not os.path.exists(psdFile):
                    print "PSD file %s does not exits" % psdFile
                    print "Skipping micrograph %s" % micFn
                    continue
                pwutils.copyFile(psdFile, newPsdFile)
                ctf.setPsdFile(newPsdFile)
            micSet.append(mic2)

        starFile = self._getPath(self.CTF_STAR_FILE % self.getObjId())
        print "Writing set: %s" % inputCTF
        print " to: %s" % starFile

        acq = ctfMicSet.getAcquisition()
        self.samplingRate = ctfMicSet.getSamplingRate()
        mag = acq.getMagnification()
        self.detectorPixelSize = 1e-4 * self.samplingRate * mag

        writeSetOfMicrographs(micSet, starFile,
                              preprocessImageRow=self.preprocessMicrograph)

        # Let's create a link from the project roots to facilitate the import
        # of the star file into a Relion project
        pwutils.createLink(starFile, os.path.basename(starFile))
예제 #12
0
    def _createFilsXmlFile(self, templateFile, outDir, isSource=True):
        EUCLIDEAN_DIST = 'eu_dst'
        GEODESIC_DIST = 'geo_dst'
        GEODESIC_LEN = 'geo_len'
        FIL_SINU = 'sin'

        # Copy the template xml to extra
        filename = join(outDir, basename(templateFile))
        copyFile(templateFile, filename)

        # Prepare the data to be read as expected by the xml file
        geomDict = OrderedDict()
        if isSource:
            self._xmlSources = filename
            segLabel = encodePresegArea(self.segLabelS.get())
            geomDict[EUCLIDEAN_DIST] = [
                MIN_EUC_DIST_S, MAX_EUC_DIST_S, EUC_RANGE_S
            ]
            geomDict[GEODESIC_DIST] = [
                MIN_GEO_DIST_S, MAX_GEO_DIST_S, GEO_RANGE_S
            ]
            geomDict[GEODESIC_LEN] = [
                MIN_GEO_LEN_S, MAX_GEO_LEN_S, GEO_LEN_RANGE_S
            ]
            geomDict[FIL_SINU] = [MIN_FIL_SINU_S, MAX_FIL_SINU_S, SINU_RANGE_S]
        else:
            self._xmlTargets = filename
            segLabel = encodePresegArea(self.segLabelT.get())
            geomDict[EUCLIDEAN_DIST] = [
                MIN_EUC_DIST_T, MAX_EUC_DIST_T, EUC_RANGE_T
            ]
            geomDict[GEODESIC_DIST] = [
                MIN_GEO_DIST_T, MAX_GEO_DIST_T, GEO_RANGE_T
            ]
            geomDict[GEODESIC_LEN] = [
                MIN_GEO_LEN_T, MAX_GEO_LEN_T, GEO_LEN_RANGE_T
            ]
            geomDict[FIL_SINU] = [MIN_FIL_SINU_T, MAX_FIL_SINU_T, SINU_RANGE_S]

        # Edit the corresponding fields
        xmlTree = ET.parse(filename)
        rootElement = xmlTree.getroot()
        mb_slice = rootElement.findall("mb_slice")
        mb_slice = mb_slice[0]
        mb_slice.find('side').text = str(segLabel)

        for key, valList in geomDict.items():
            for el in mb_slice.findall(key):
                if el.attrib['id'] == 'low':
                    el.text = str(getattr(self, valList[0]).get())
                elif el.attrib['id'] == 'high':
                    el.text = str(getattr(self, valList[1]).get())
                elif el.attrib['id'] == 'sign':
                    el.text = self._decodeRangeValue(
                        getattr(self, valList[2]).get())

        # Write the modified xml file.
        xmlTree.write(filename, encoding='UTF-8', xml_declaration=True)
예제 #13
0
    def writeCtfStarStep(self):
        inputCTF = self.inputCTF.get()

        if self.micrographSource == 0:  # same as CTF estimation
            ctfMicSet = inputCTF.getMicrographs()
        else:
            ctfMicSet = self.inputMicrographs.get()

        micSet = SetOfMicrographs(filename=':memory:')

        psd = inputCTF.getFirstItem().getPsdFile()
        hasPsd = psd and os.path.exists(psd)

        if hasPsd:
            psdPath = self._getPath('PSD')
            pwutils.makePath(psdPath)
            print "Writing PSD files to %s" % psdPath

        for ctf in inputCTF:
            # Get the corresponding micrograph
            mic = ctfMicSet[ctf.getObjId()]
            if mic is None:
                print(
                    "Skipping CTF id: %s, it is missing from input "
                    "micrographs. " % ctf.getObjId())
                continue

            micFn = mic.getFileName()
            if not os.path.exists(micFn):
                print "Skipping micrograph %s, it does not exists. " % micFn
                continue

            mic2 = mic.clone()
            mic2.setCTF(ctf)
            if hasPsd:
                psdFile = ctf.getPsdFile()
                newPsdFile = join(psdPath, '%s_psd.mrc' % mic.getMicName())
                if not os.path.exists(psdFile):
                    print "PSD file %s does not exits" % psdFile
                    print "Skipping micrograph %s" % micFn
                    continue
                pwutils.copyFile(psdFile, newPsdFile)
                ctf.setPsdFile(newPsdFile)
            micSet.append(mic2)

        starFile = self._getPath(self.CTF_STAR_FILE % self.getObjId())
        print "Writing set: %s" % inputCTF
        print " to: %s" % starFile

        writeSetOfMicrographs(micSet,
                              starFile,
                              preprocessImageRow=self.preprocessMicrograph)

        # Let's create a link from the project roots to facilitate the import
        # of the star file into a Relion project
        pwutils.createLink(starFile, os.path.basename(starFile))
예제 #14
0
    def importMicrographs(self, pattern, suffix, voltage, sphericalAberration,
                          amplitudeContrast):
        """ Copy images matching the filename pattern
        Register other parameters.
        """
        filePaths = glob(pwutils.expandPattern(pattern))

        # imgSet = SetOfMicrographs(filename=self.micsPairsSqlite, prefix=suffix)
        imgSet = self._createSetOfMicrographs(suffix=suffix)
        acquisition = imgSet.getAcquisition()
        # Setting Acquisition properties
        acquisition.setVoltage(voltage)
        acquisition.setSphericalAberration(sphericalAberration)
        acquisition.setAmplitudeContrast(amplitudeContrast)

        # Call a function that should be implemented by each subclass
        self._setOtherPars(imgSet)

        outFiles = [imgSet.getFileName()]
        imgh = emlib.image.ImageHandler()
        img = imgSet.ITEM_TYPE()
        n = 1
        size = len(filePaths)

        filePaths.sort()

        for i, fn in enumerate(filePaths):
            # ext = os.path.splitext(basename(f))[1]
            dst = self._getExtraPath(basename(fn))
            if self.copyToProj:
                pwutils.copyFile(fn, dst)
            else:
                pwutils.createLink(fn, dst)

            if n > 1:
                for index in range(1, n + 1):
                    img.cleanObjId()
                    img.setFileName(dst)
                    img.setIndex(index)
                    imgSet.append(img)
            else:
                img.cleanObjId()
                img.setFileName(dst)
                # Fill the micName if img is a Micrograph.
                self._fillMicName(img, fn, pattern)
                imgSet.append(img)
            outFiles.append(dst)

            sys.stdout.write("\rImported %d/%d" % (i + 1, size))
            sys.stdout.flush()

        print("\n")

        imgSet.write()

        return imgSet
예제 #15
0
    def createReport(self):
        fnTex = "report.tex"
        fhTex = open(self._getExtraPath(fnTex), "w")
        template = """
\\documentclass[12pt]{article}
\\usepackage{amsmath,amsthm,amssymb,amsfonts} 
\\usepackage{graphicx}
\\usepackage{pdfpages}
\\DeclareGraphicsExtensions{.pdf,.png,.jpg}
\\begin{document}
\\title{User Report}
\\author{Created by Scipion}
\\maketitle
"""
        fhTex.write(template)
        
        fnDir = self.filesPath.get()

        if not os.path.isdir(fnDir):
            fnDir = os.path.basename(fnDir)

        for fileName in sorted(glob.glob(os.path.join(fnDir, "*"))):
            fnDest = os.path.basename(fileName).lower()
            fnDest = fnDest.replace(" ", "_")
            fnDest = fnDest.replace(":", "_")
            fnDest = fnDest.replace(";", "_")
            pwutils.copyFile(fileName, self._getExtraPath(fnDest))

            if fnDest.endswith(".tex") or fnDest.endswith(".txt"):
                fhTex.write("\\input{%s}\n" % fnDest)
                fhTex.write("\n")
            elif fnDest.endswith(".png") or fnDest.endswith(".jpg"):
                fhTex.write("\\begin{center}\n")
                fhTex.write("\\includegraphics[width=14cm]{%s}\n" % fnDest)
                fhTex.write("\\end{center}\n")
                fhTex.write("\n")
            elif fnDest.endswith(".pdf"):
                fhTex.write("\\includepdf[pages=-]{%s}\n" % fnDest)
                fhTex.write("\\clearpage\n")
                fhTex.write("\n")
        
        template = """ 
\\end{document}
"""
        fhTex.write(template)
        fhTex.close()

        args = "-interaction=nonstopmode " + fnTex
        self.runJob(PDFLATEX, args, cwd=self._getExtraPath())

        fnPDF = self._getExtraPath("report.pdf")

        if os.path.exists(fnPDF):
            pwutils.moveFile(fnPDF, self._getPath("report.pdf"))
        else:
            raise Exception("PDF file was not produced.")
예제 #16
0
def copyOrLinkFileName(imgRow, prefixDir, outputDir, copyFiles=False):
    index, imgPath = relionToLocation(imgRow.getValue(md.RLN_IMAGE_NAME))
    baseName = os.path.basename(imgPath)
    newName = os.path.join(outputDir, baseName)
    if not os.path.exists(newName):
        if copyFiles:
            pwutils.copyFile(os.path.join(prefixDir, imgPath), newName)
        else:
            pwutils.createLink(os.path.join(prefixDir, imgPath), newName)
            
    imgRow.setValue(md.RLN_IMAGE_NAME, locationToRelion(index, newName))
    def _importFromFolderStep(self):
        """ This function will copy Xmipp .pos files for
        simulating a particle picking run...this is only
        for testing purposes.
        """
        extraDir = self._getExtraPath()

        for f in pwutils.getFiles(self.importFolder.get()):
            pwutils.copyFile(f, extraDir)

        self.registerCoords(extraDir, readFromExtra=True)
예제 #18
0
    def _importFromFolderStep(self):
        """ This function will copy Xmipp .pos files for
        simulating a particle picking run...this is only
        for testing purposes.
        """
        extraDir = self._getExtraPath()

        for f in pwutils.getFiles(self.importFolder.get()):
            pwutils.copyFile(f, extraDir)

        self.registerCoords(extraDir, readFromExtra=True)
예제 #19
0
 def initIterStep(self, iterN):
     """ Prepare files and directories for the current iteration """
     
     self._createIterWorkingDir(iterN) # create the working directory for the current iteration.
     prevIter = iterN - 1
     
     if iterN==1:
         vol = self.input3DReference.get()
         
         imgFn = self._getFileName('particles')
         volFn = self._getFileName('init_vol')
         refVol = self._getFileName('ref_vol', iter=iterN) # reference volume of the step.
         #TODO check if the input is already a single mrc stack
         self.writeParticlesByMic(imgFn)
         em.ImageHandler().convert(vol.getLocation(), volFn) # convert the reference volume into a mrc volume
         copyFile(volFn, refVol)  #Copy the initial volume in the current directory.
         
     for ref in self._allRefs():
         refVol = self._getFileName('ref_vol_class', iter=iterN, ref=ref) # reference volume of the step.
         iterVol =  self._getFileName('iter_vol_class', iter=iterN, ref=ref) # refined volumes of the step
         if iterN == 1:
             copyFile(volFn, iterVol)  #Copy the initial volume in current directory.
         else:
             self._splitParFile(iterN, ref, self.cpuList[ref-1])
             prevIterVol = self._getFileName('iter_vol_class', iter=prevIter, ref=ref) # volumes of the previous iteration
             copyFile(prevIterVol, refVol)   #Copy the reference volume as refined volume.
             copyFile(refVol, iterVol)   #Copy the reference volume as refined volume.
예제 #20
0
    def classifyStep(self, imcFile, numberOfFactors, numberOfClasses):
        """ Apply the selected filter to particles. 
        Create the set of particles.
        """
        # Copy file to working directory, it could be also a link
        imcLocalFile = basename(imcFile)
        copyFile(imcFile, self._getPath(imcLocalFile))
        self.info("Copied file '%s' to '%s' " % (imcFile, imcLocalFile))
        # Spider automatically add _IMC to the ca-pca result file
        imcBase = removeExt(imcLocalFile).replace('_IMC', '')
        
        self._params.update({'x27': numberOfFactors,
                             '[cas_prefix]': imcBase,
                             })
        self._updateParams()

        self.runTemplate(self.getScript(), self.getExt(), self._params)   
    def _createPickingXmlFile(self, templateFile, outDir):
        # Copy the template xml to extra
        filename = join(outDir, basename(templateFile))
        self._xmlSlices = filename
        copyFile(templateFile, filename)

        # Edit the corresponding fields
        xmlTree = ET.parse(filename)
        rootElement = xmlTree.getroot()
        mb_slice = rootElement.findall("mb_slice")
        mb_slice = mb_slice[0]
        mb_slice.find(SIDE).text = str(
            encodePresegArea(getattr(self, SIDE).get()))
        mb_slice.find(CONT).text = self._decodeContValue(
            getattr(self, CONT).get())

        # Write the modified xml file.
        xmlTree.write(filename, encoding='UTF-8', xml_declaration=True)
예제 #22
0
    def _loadHosts(self, hosts):
        """ Loads hosts configuration from hosts file. """
        # If the host file is not passed as argument...
        projHosts = self.getPath(PROJECT_CONFIG, PROJECT_CONFIG_HOSTS)

        if hosts is None:
            # Try first to read it from the project file .config./hosts.conf
            if os.path.exists(projHosts):
                hostsFile = projHosts
            else:
                localDir = os.path.dirname(os.environ['SCIPION_LOCAL_CONFIG'])
                hostsFile = [os.environ['SCIPION_HOSTS'],
                             os.path.join(localDir, 'hosts.conf')]
        else:
            pwutils.copyFile(hosts, projHosts)
            hostsFile = hosts

        self._hosts = pwconfig.loadHostsConf(hostsFile)
예제 #23
0
    def _loadProtocols(self, protocolsConf):
        """ Load protocol configuration from a .conf file. """
        # If the host file is not passed as argument...
        projProtConf = self.getPath(PROJECT_CONFIG, PROJECT_CONFIG_PROTOCOLS)

        if protocolsConf is None:
            # Try first to read it from the project file .config/hosts.conf
            if os.path.exists(projProtConf):
                protConf = projProtConf
            else:
                localDir = os.path.dirname(os.environ['SCIPION_LOCAL_CONFIG'])
                protConf = [os.environ['SCIPION_PROTOCOLS'],
                            os.path.join(localDir, 'protocols.conf')]
        else:
            pwutils.copyFile(protocolsConf, projProtConf)
            protConf = protocolsConf

        self._protocolViews = pwconfig.loadProtocolsConf(protConf)
예제 #24
0
def runProjectionMatching(self, iterN, refN, args, **kwargs):
    """ Loop over all CTF groups and launch a projection matching for each one.
    Note: Iterate ctf groups in reverse order to have same order as 
          in add_to docfiles from angular_class_average. #FIXME: check why reverse order is needed
    """
    projMatchRootName = self._getFileName('projMatchRootNames',
                                          iter=iterN,
                                          ref=refN)
    refname = self._getFileName('projectLibraryStk', iter=iterN, ref=refN)

    numberOfCtfGroups = self.numberOfCtfGroups.get()
    #     ctfGroupName = self._getPath(self.ctfGroupDirectory, '%(ctfGroupRootName)s')
    #remove output metadata
    cleanPath(projMatchRootName)

    for ctfN in reversed(list(self.allCtfGroups())):
        self._log.info('CTF group: %d/%d' % (ctfN, numberOfCtfGroups))
        ctfArgs = ' -i %(inputdocfile)s -o %(outputname)s --ref %(refname)s'

        inputdocfile = self._getBlockFileName(
            ctfBlockName, ctfN, self.docFileInputAngles[iterN - 1])
        outputname = self._getBlockFileName(ctfBlockName, ctfN,
                                            projMatchRootName)
        baseTxtFile = removeExt(refname)
        neighbFile = baseTxtFile + '_sampling.xmd'
        cleanPath(neighbFile)
        neighbFileb = baseTxtFile + '_group' + str(ctfN).zfill(
            self.FILENAMENUMBERLENGTH) + '_sampling.xmd'
        copyFile(neighbFileb, neighbFile)
        print "copied file ", neighbFileb, "to", neighbFile

        threads = self.numberOfThreads.get()
        trhArgs = ' --mem %(mem)s --thr %(thr)s'
        thrParams = {
            'mem': self.availableMemory.get() * threads,
            'thr': threads,
        }

        if self.doCTFCorrection and self._referenceIsCtfCorrected[iterN]:
            ctfArgs += ' --ctf %s' % self._getBlockFileName(
                '', ctfN, self._getFileName('stackCTFs'))

        progArgs = ctfArgs % locals() + args + trhArgs % thrParams
        self.runJob('xmipp_angular_projection_matching', progArgs, **kwargs)
    def copyOrLinkBinary(self,
                         imgRow,
                         label,
                         basePath,
                         destBasePath,
                         copyFiles=False):
        index, imgPath = relionToLocation(imgRow.get(label))
        baseName = os.path.join(os.path.dirname(imgPath),
                                os.path.basename(imgPath))
        os.makedirs(os.path.join(destBasePath, os.path.dirname(imgPath)),
                    exist_ok=True)
        newName = os.path.join(destBasePath, baseName)
        if not os.path.exists(newName):
            if copyFiles:
                pwutils.copyFile(os.path.join(basePath, imgPath), newName)
            else:
                pwutils.createLink(os.path.join(basePath, imgPath), newName)

        imgRow.set(label, locationToRelion(index, newName))
예제 #26
0
 def _mergeAllParFiles(self, iterN, ref, numberOfBlocks):
     """ This method merge all parameters files that have been created in a refineIterStep """
     
     file2 = self._getFileName('output_par_class', iter=iterN, ref=ref)
     if numberOfBlocks != 1:
         f2 = open(file2, 'w+')
         
         for block in range(1, numberOfBlocks + 1):
             file1 = self._getFileName('output_par_block_class', block=block, iter=iterN, ref=ref)
             f1 = open(file1)
             
             for l in f1:
                 if not l.startswith('C'):
                     f2.write(l)
             f1.close()
         f2.close()
     else:
         file1 = self._getFileName('output_par_block_class', block=1, iter=iterN, ref=ref)
         copyFile(file1, file2)
예제 #27
0
    def createOutputStep(self, objId):
        fnFile = os.path.basename(self.inputFile.get())
        copyFile(self.inputFile.get(), self._getPath(fnFile))

        tables = self.readTables(self._getPath(fnFile))

        for tableName, sampleNames, allT, allMeasurements in tables:
            self.experiment = PKPDExperiment()
            self.experiment.general["title"] = self.title.get()
            self.experiment.general["comment"] = self.comment.get()

            # Add variables
            if not self.addVar(self.experiment, self.tVar.get()):
                raise Exception("Cannot process time variable")
            if not self.addVar(self.experiment, self.xVar.get()):
                raise Exception("Cannot process measurement variable")
            tvarName = self.tVar.get().split(';')[0].replace(';', '')
            xvarName = self.xVar.get().split(';')[0].replace(';', '')

            # Create the samples
            for sampleName in sampleNames:
                self.experiment.samples[sampleName] = PKPDSample()
                self.experiment.samples[sampleName].parseTokens(
                    [sampleName], self.experiment.variables,
                    self.experiment.doses, self.experiment.groups)
                self.experiment.samples[sampleName].addMeasurementPattern(
                    [sampleName, tvarName, xvarName])
                samplePtr = self.experiment.samples[sampleName]
                exec("samplePtr.measurement_%s=%s" % (tvarName, allT))

            # Fill the samples
            for i in range(len(allT)):
                for j in range(len(sampleNames)):
                    samplePtr = self.experiment.samples[sampleNames[j]]
                    exec('samplePtr.measurement_%s.append("%s")' %
                         (xvarName, allMeasurements[i][j]))

            self.experiment.write(
                self._getPath("experiment%s.pkpd" % tableName))
            self.experiment._printToStream(sys.stdout)
            self._defineOutputs(
                **{"outputExperiment%s" % tableName: self.experiment})
예제 #28
0
    def createFinalFilesStep(self):
        # -----metadata to save all final models-------
        finalModel = self._getFileName('finalModel')
        finalModelMd = self._getMetadata()

        # -----metadata to save all final particles-----
        finalData = self._getFileName('finalData')

        fn = self._getFileName('rawFinalData')
        print("FN: ", fn)
        tableIn = Table(fileName=fn, tableName='particles')
        cols = [str(c) for c in tableIn.getColumnNames()]
        ouTable = Table(columns=cols, tableName='particles')

        for rLev in self._getRLevList():
            it = self._lastIter(rLev)
            modelFn = self._getFileName('model', iter=it,
                                        lev=self._level, rLev=rLev)
            modelMd = self._getMetadata('model_classes@' + modelFn)

            refLabel = md.RLN_MLMODEL_REF_IMAGE
            imgRow = md.getFirstRow(modelMd)
            fn = imgRow.getValue(refLabel)

            mapId = self._getRunLevId(rLev=rLev)
            newMap = self._getMapById(mapId)
            imgRow.setValue(refLabel, newMap)
            copyFile(fn, newMap)
            self._mapsDict[fn] = mapId

            imgRow.addToMd(finalModelMd)

            dataFn = self._getFileName('data', iter=it,
                                       lev=self._level, rLev=rLev)

            pTable = Table()
            for row in pTable.iterRows(dataFn, tableName='particles'):
                newRow = row._replace(rlnClassNumber=rLev)
                ouTable.addRow(*newRow)

        self.writeStar(finalData, ouTable)
        finalModelMd.write('model_classes@' + finalModel)
예제 #29
0
    def createOutputStep(self, atomStructPaths):
        """ Copy the PDB structures and register the output object.
      """
        outASs = emobj.SetOfAtomStructs().create(self._getPath())
        for atomStructPath in atomStructPaths:
            if not exists(atomStructPath):
                raise Exception("Atomic structure not found at *%s*" %
                                atomStructPath)

            baseName = basename(atomStructPath)
            localPath = abspath(self._getExtraPath(baseName))

            if str(atomStructPath) != str(localPath):  # from local file
                pwutils.copyFile(atomStructPath, localPath)

            localPath = relpath(localPath)

            outASs.append(emobj.AtomStruct(filename=localPath))

        self._defineOutputs(**{self._OUTNAME: outASs})
예제 #30
0
def runFilterVolumeStep(self, iterN, refN, constantToAddToFiltration):
    reconstructedVolume = self._getFileName('reconstructedFileNamesIters', iter=iterN, ref=refN)
    reconstructedFilteredVolume = self.reconstructedFilteredFileNamesIters[iterN][refN]
    if self.useFscForFilter:
        if self._fourierMaxFrequencyOfInterest[iterN+1] == -1:
            fourierMaxFrequencyOfInterest = self.resolSam / self._getFourierMaxFrequencyOfInterest(iterN, refN)
            print "el valor de la resolucion es :", self._getFourierMaxFrequencyOfInterest(iterN, refN)
            filterInPxAt = fourierMaxFrequencyOfInterest + constantToAddToFiltration
        else:
            filterInPxAt = constantToAddToFiltration
    
    if (filterInPxAt > 0.5):
        copyFile(reconstructedVolume, reconstructedFilteredVolume)
    else:
        args = ' -i %(volume)s -o %(filteredVol)s --fourier low_pass %(filter)s'
        params = {'volume': reconstructedVolume,
                  'filteredVol': reconstructedFilteredVolume,
                  'filter' : filterInPxAt
                  }
        self.runJob("xmipp_transform_filter", args % params)
예제 #31
0
    def mergeClassesStep(self):
        if self.doGrouping:
            from cryomethods.functions import NumpyImgHandler
            npIh = NumpyImgHandler()
            makePath(self._getLevelPath(self._level))
            listVol = self._getFinalMaps()
            matrix = npIh.getAllNpList(listVol, 2)
            labels = self._clusteringData(matrix)

            clsChange = 0
            prevStar = self._getFileName('rawFinalData')
            pTable = Table()
            origStar = self._getFileName('input_star', lev=1, rLev=1)
            opticsTable = Table(fileName=origStar, tableName='optics')
            print("OPTABLE: ", origStar, opticsTable.size())
            for row in pTable.iterRows(prevStar, key="rlnClassNumber",
                                       tableName='particles'):
                clsPart = row.rlnClassNumber
                newClass = labels[clsPart - 1] + 1
                newRow = row._replace(rlnClassNumber=newClass)

                if not newClass == clsChange:
                    if not clsChange == 0:
                        self.writeStar(fn, ouTable, opticsTable)
                    clsChange = newClass
                    fn = self._getFileName('input_star', lev=self._level,
                                           rLev=newClass)
                    tableIn = Table(fileName=prevStar, tableName='particles')
                    cols = [str(c) for c in tableIn.getColumnNames()]
                    ouTable = Table(columns=cols, tableName='particles')
                ouTable.addRow(*newRow)
            print("mergeClassesStep ouTable.size: ", ouTable.size())
            self.writeStar(fn, ouTable, opticsTable)

        else:
            prevData = self._getFileName('rawFinalData')
            finalData = self._getFileName('finalData')
            prevModel = self._getFileName('rawFinalModel')
            finalModel = self._getFileName('finalModel')
            copyFile(prevData, finalData)
            copyFile(prevModel, finalModel)
    def alignParticlesStep(self, innerRadius, outerRadius):
        """ Execute the pairwise.msa script to align the particles. """
        particles = self.inputParticles.get()
        xdim = particles.getDimensions()[0]

        self._params.update({
            '[idim-header]': xdim,
            '[cg-option]': self.cgOption.get(),
            '[inner-rad]': innerRadius,
            '[outer-rad]': outerRadius,  # convert radius to diameter
            '[search-range]': self.searchRange.get(),
            '[step-size]': self.stepSize.get(),
            '[selection_list]': self._params['particlesSel'],
            '[unaligned_image]': self._params['particles'] + '@******',
            '[nummps]': self.numberOfThreads.get(),
        })

        copy1Script = Plugin.getScript('mda/center1.msa')
        newScript = pwutils.replaceBaseExt(copy1Script, self.getExt())
        pwutils.copyFile(copy1Script, self._getPath(newScript))
        self.runTemplate(self.getScript(), self.getExt(), self._params)
    def testAffinityProp(self):
        from cryomethods.functions import MlMethods, NumpyImgHandler
        Plugin.setEnviron()
        volList = self._getVolList()
        ml = MlMethods()
        npIh = NumpyImgHandler()

        dictNames = {}
        groupDict = {}
        matrix = npIh.getAllNpList(volList, 2)

        # covMatrix, listNpVol = ml.getCovMatrixAuto(volList, 2)
        # eigenVec, eigVal = ml.doPCA(covMatrix, 1)
        # matrix = ml.getMatProjAuto(listNpVol, eigenVec)

        labels = ml.doSklearnAffProp(matrix)
        # labels = ml.doSklearnKmeans(matrix)
        # labels = ml.doSklearnDBSCAN(matrix)
        print(labels)

        if labels is not None:
            f = open('volumes_clustered.txt', 'w')
            for vol, label in zip(volList, labels):
                dictNames[vol] = label
                destFn = '/home/josuegbl/PROCESSING/TESLA/projects/RNC_HTLnd2/MAPS' + basename(
                    vol)
                copyFile(vol, destFn)
            for key, value in sorted(dictNames.items()):
                groupDict.setdefault(value, []).append(key)

            counter = 0
            for key, value in groupDict.items():
                valueStr = ' '.join(value)
                line = 'chimera %s\n' % valueStr
                f.write(line)
                counter += 1
                avgFn = 'map_average_class_%02d.mrc' % counter
                avgNp, _ = npIh.getAverageMap(value)
                npIh.saveMrc(avgNp, avgFn)
            f.close()
예제 #34
0
    def test_sidesplitter(self):
        protRef, protMask = self._createRef3DProtBox("auto-refine",
                                                     ProtRelionRefine3D)
        protRef._createFilenameTemplates()
        volPath = protRef._getFileName('finalvolume', ref3d=1).split(':')[0]
        volHalf1 = protRef._getFileName('final_half1_volume',
                                        ref3d=1).split(':')[0]
        volHalf2 = protRef._getFileName('final_half2_volume',
                                        ref3d=1).split(':')[0]

        copyFile(self.volFn, volPath)
        copyFile(self.half1Fn, volHalf1)
        copyFile(self.half2Fn, volHalf2)

        protRef.outputVolume.setFileName(volPath)
        protRef.outputVolume.setHalfMaps([volHalf1, volHalf2])
        project = protRef.getProject()
        project._storeProtocol(protRef)

        print(magentaStr("\n==> Testing sidesplitter - after refine 3d:"))
        sidesplitterProt = self.newProtocol(ProtSideSplitter,
                                            protRefine=protRef,
                                            mask=protMask.outputMask)
        sidesplitterProt.setObjLabel('sidesplitter after Auto-refine')

        self.launchProtocol(sidesplitterProt)
        self._validations(sidesplitterProt.outputVolume1, 60, 3)
예제 #35
0
 def alignParticlesStep(self, innerRadius, outerRadius):
     """ Execute the pairwise.msa script to alignm the particles.
     """
     particles = self.inputParticles.get()
     xdim = particles.getDimensions()[0]
     
     self._params.update({
                          '[idim-header]': xdim,
                          '[cg-option]': self.cgOption.get(),
                          '[inner-rad]': innerRadius,
                          '[outer-rad]': outerRadius, # convert radius to diameter
                          '[search-range]': self.searchRange.get(),
                          '[step-size]': self.stepSize.get(),
                          '[selection_list]': self._params['particlesSel'],
                          '[unaligned_image]': self._params['particles'] + '@******',
                          '[nummps]': self.numberOfThreads.get(),
                         })
     
     copy1Script = getScript('mda/center1.msa')
     newScript = pwutils.replaceBaseExt(copy1Script, self.getExt())
     pwutils.copyFile(copy1Script, self._getPath(newScript))
     self.runTemplate(self.getScript(), self.getExt(), self._params)
    def classifyStep(self, imcFile, numberOfFactors, numberOfClasses):
        """ Apply the selected filter to particles. 
        Create the set of particles.
        """
        # Copy file to working directory, it could be also a link
        imcLocalFile = basename(imcFile)
        copyFile(imcFile, self._getPath(imcLocalFile))
        self.info("Copied file '%s' to '%s' " % (imcFile, imcLocalFile))
        # Spider automatically add _IMC to the ca-pca result file
        # JMRT: I have modify the kmeans.msa script to not add _IMC
        # automatically, it can be also used with _SEQ suffix,
        # so we will pass the whole cas_file
        imcBase = removeExt(imcLocalFile)#.replace('_IMC', '')
        imcPrefix = imcBase.replace('_IMC', '').replace('_SEQ', '')
        
        self._params.update({'x27': numberOfFactors,
                             '[cas_prefix]': imcPrefix,
                             '[cas_file]': imcBase,
                             })
        self._updateParams()

        self.runTemplate(self.getScript(), self.getExt(), self._params)   
예제 #37
0
    def deformStep(self, inputVolFn, refVolFn, i, j, step_id):
        if j == 0:
            refVolFn_aux = self._getExtraPath(
                os.path.basename(
                    os.path.splitext(refVolFn)[0] + self.OUTPUT_SUFFIX %
                    (j + 1)))
        else:
            refVolFn_aux = self._getTmpPath(self.ALIGNED_VOL % (j + 1))
        if i == 0:
            fnOut_aux = self._getExtraPath(
                os.path.basename(
                    os.path.splitext(inputVolFn)[0] + self.OUTPUT_SUFFIX %
                    (i + 1)))
        else:
            fnOut_aux = self._getTmpPath(self.ALIGNED_VOL % (i + 1))
        refVolFn = self._getTmpPath("reference_%d.mrc" % step_id)
        fnOut = self._getTmpPath("input_%d.mrc" % step_id)
        copyFile(refVolFn_aux, refVolFn)
        copyFile(fnOut_aux, fnOut)
        fnOut2 = self._getTmpPath('vol%dDeformedTo%d.mrc' % (i + 1, j + 1))

        params = ' -i %s -r %s -o %s --l1 %d --l2 %d --sigma "%s" --oroot %s --regularization %f' %\
                 (fnOut, refVolFn, fnOut2, self.l1.get(), self.l2.get(), self.sigma.get(),
                  self._getExtraPath('Pair_%d_%d' % (i, j)), self.penalization.get())
        if self.newRmax != 0:
            params = params + ' --Rmax %d' % self.newRmax

        if self.useGpu.get():
            self.runJob("xmipp_cuda_volume_deform_sph", params)
        else:
            params = params + ' --thr 1'
            self.runJob("xmipp_volume_deform_sph", params)

        self.computeCorr(fnOut2, refVolFn, i, j)

        cleanPath(fnOut)
        cleanPath(refVolFn)
        cleanPath(fnOut2)
    def classifyStep(self, imcFile, numberOfFactors, numberOfClasses):
        """ Apply the selected filter to particles. 
        Create the set of particles.
        """
        # Copy file to working directory, it could be also a link
        imcLocalFile = basename(imcFile)
        copyFile(imcFile, self._getPath(imcLocalFile))
        self.info("Copied file '%s' to '%s' " % (imcFile, imcLocalFile))
        # Spider automatically add _IMC to the ca-pca result file
        # JMRT: I have modify the kmeans.msa script to not add _IMC
        # automatically, it can be also used with _SEQ suffix,
        # so we will pass the whole cas_file
        imcBase = removeExt(imcLocalFile)#.replace('_IMC', '')
        imcPrefix = imcBase.replace('_IMC', '').replace('_SEQ', '')
        
        self._params.update({'x27': numberOfFactors,
                             'x30': self.numberOfThreads.get(),
                             '[cas_prefix]': imcPrefix,
                             '[cas_file]': imcBase,
                             })
        self._updateParams()

        self.runTemplate(self.getScript(), self.getExt(), self._params)
    def _splitParFile(self, iterN, ref, numberOfBlocks):
        """ This method split the parameter files that have been previously merged """

        prevIter = iterN - 1
        file1 = self._getFileName('output_par_class', iter=prevIter, ref=ref)
        if numberOfBlocks != 1:
            for block in range(1, numberOfBlocks + 1):
                f1 = open(file1)
                file2 = self._getFileName('input_par_block_class',
                                          prevIter=prevIter,
                                          iter=iterN,
                                          ref=ref,
                                          block=block)
                f2 = open(file2, 'w+')
                _, finalPart = self._particlesInBlock(block, numberOfBlocks)

                for l in f1:

                    if l.startswith('C'):
                        f2.write(l)
                    else:
                        split = l.split()
                        numPart = int(''.join(split[:1]))

                        if numPart <= finalPart:
                            f2.write(l)
                        else:
                            break
                f2.close()
                f1.close()
        else:
            file2 = self._getFileName('input_par_block_class',
                                      prevIter=prevIter,
                                      iter=iterN,
                                      ref=ref,
                                      block=1)
            copyFile(file1, file2)
예제 #40
0
파일: masks.py 프로젝트: coocoky/scipion
    def importMaskStep(self, path, samplingRate):
        """ Copy mask from maskPath.
        """
        self.info("Using mask path: '%s'" % path)

        # Copy the image file into the project
        dst = self._getExtraPath(basename(path))
        pwutils.copyFile(path, dst)

        # Retrive image dimensions
        imgh = ImageHandler()
        _, _, z, n = imgh.getDimensions(dst)

        # Create a 2D or 3D Mask, consider the case of n>1
        # as the case of some volume maps in mrc format
        if z > 1 or n > 1:
            mask = VolumeMask()
        else:
            mask = Mask()

        mask.setFileName(dst)
        mask.setSamplingRate(samplingRate)

        self._defineOutputs(outputMask=mask)
예제 #41
0
    def generateReportImages(self, firstThumbIndex=0, micScaleFactor=6):
        """ Function to generate thumbnails for the report. Uses data from
        self.thumbPaths.

        ===== Params =====
        - firstThumbIndex: index from which we start generating thumbnails
        - micScaleFactor: how much to reduce in size the micrographs.

        """
        ih = ImageHandler()

        numMics = len(self.thumbPaths[MIC_PATH])

        for i in range(firstThumbIndex, numMics):
            print('Generating images for mic %d' % (i+1))
            # mic thumbnails
            dstImgPath = join(self.reportDir, self.thumbPaths[MIC_THUMBS][i])
            if not exists(dstImgPath):
                if self.micThumbSymlinks:
                    pwutils.copyFile(self.thumbPaths[MIC_PATH][i], dstImgPath)
                else:
                    ih.computeThumbnail(self.thumbPaths[MIC_PATH][i],
                                        dstImgPath, scaleFactor=micScaleFactor,
                                        flipOnY=True)

            # shift plots
            if SHIFT_THUMBS in self.thumbPaths:
                dstImgPath = join(self.reportDir, self.thumbPaths[SHIFT_THUMBS][i])
                if not exists(dstImgPath):
                    pwutils.copyFile(self.thumbPaths[SHIFT_PATH][i], dstImgPath)

            # Psd thumbnails
            # If there ARE thumbnail for the PSD (no ctf protocol and
            # moviealignment hasn't computed it
            if PSD_THUMBS in self.thumbPaths:
                if self.ctfProtocol is None:
                    srcImgPath = self.thumbPaths[PSD_PATH][i]
                    dstImgPath = join(self.reportDir, self.thumbPaths[PSD_THUMBS][i])
                    if not exists(dstImgPath) and srcImgPath is not None:
                        if srcImgPath.endswith('psd'):
                            psdImg1 = ih.read(srcImgPath)
                            psdImg1.convertPSD()
                            psdImg1.write(dstImgPath)
                            ih.computeThumbnail(dstImgPath, dstImgPath,
                                                scaleFactor=1, flipOnY=True)
                        else:
                            pwutils.copyFile(srcImgPath, dstImgPath)
                else:
                    dstImgPath = join(self.reportDir, self.thumbPaths[PSD_THUMBS][i])
                    if not exists(dstImgPath):
                        ih.computeThumbnail(self.thumbPaths[PSD_PATH][i],
                                            dstImgPath, scaleFactor=1, flipOnY=True)

        return
예제 #42
0
def create_service_project(request):
    if request.is_ajax():
        import os
        from pyworkflow.object import Pointer
        from pyworkflow.em.protocol import ProtUnionSet, ProtImportAverages
        from pyworkflow.em.packages.xmipp3 import XmippProtRansac, XmippProtReconstructSignificant, XmippProtAlignVolumeForWeb
        from pyworkflow.em.packages.eman2 import EmanProtInitModel
        from pyworkflow.em.packages.simple import ProtPrime
        
        # Create a new project
        projectName = request.GET.get('projectName')
        
        # Filename to use as test data 
        testDataKey = request.GET.get('testData')
        
        #customMenu = os.path.join(os.path.dirname(os.environ['SCIPION_PROTOCOLS']), 'menu_initvolume.conf')
        
        manager = getServiceManager('myfirstmap')
        writeCustomMenu(manager.protocols)
        project = manager.createProject(projectName, runsView=1, 
                                        hostsConf=manager.hosts,
                                        protocolsConf=manager.protocols
                                        ) 
        
        project.getSettings().setLifeTime(14)
        project.saveSettings()
        #copyFile(customMenu, project.getPath('.config', 'protocols.conf'))
        
        # 1. Import averages
        
        # If using test data execute the import averages run
        # options are set in 'project_utils.js'
        dsMDA = DataSet.getDataSet('initial_volume')
        
        if testDataKey :
            fn = dsMDA.getFile(testDataKey)
            newFn = join(project.uploadPath, basename(fn))
            copyFile(fn, newFn)
            
            label_import = 'import averages ('+ testDataKey +')'
            protImport = project.newProtocol(ProtImportAverages, objLabel=label_import)
            protImport.filesPath.set(newFn)
            protImport.samplingRate.set(1.)
            project.launchProtocol(protImport, wait=True)
        else:
            protImport = project.newProtocol(ProtImportAverages, objLabel='import averages')
            project.saveProtocol(protImport)
            
        
        # 2a. Ransac 
        protRansac = project.newProtocol(XmippProtRansac)
        protRansac.setObjLabel('xmipp - ransac')
        protRansac.inputSet.set(protImport)
        protRansac.inputSet.setExtended('outputAverages')
        if testDataKey :
            setProtocolParams(protRansac, testDataKey)
        project.saveProtocol(protRansac)
        
        # 2b. Eman 
        protEmanInitVol = project.newProtocol(EmanProtInitModel)
        protEmanInitVol.setObjLabel('eman - initial vol')
        protEmanInitVol.inputSet.set(protImport)
        protEmanInitVol.inputSet.setExtended('outputAverages')
        if testDataKey :
            setProtocolParams(protEmanInitVol, testDataKey)
        project.saveProtocol(protEmanInitVol)
        
        # 2c. Significant 
        protSignificant = project.newProtocol(XmippProtReconstructSignificant)
        protSignificant.setObjLabel('xmipp - significant')
        protSignificant.inputSet.set(protImport)
        protSignificant.inputSet.setExtended('outputAverages')
        if testDataKey :
            setProtocolParams(protSignificant, testDataKey)
        project.saveProtocol(protSignificant)
        
        # 3. Join result volumes
        p1 = Pointer()
        p1.set(protRansac)
        p1.setExtended('outputVolumes')
        
        p2 = Pointer()
        p2.set(protEmanInitVol)
        p2.setExtended('outputVolumes')
        
        p3 = Pointer()
        p3.set(protSignificant)
        p3.setExtended('outputVolume')
        
        protJoin = project.newProtocol(XmippProtAlignVolumeForWeb)
        protJoin.setObjLabel('align volumes')
        protJoin.inputVolumes.append(p1)
        protJoin.inputVolumes.append(p2)
        protJoin.inputVolumes.append(p3)
#         protJoin.inputVolumes.append(p4)
        project.saveProtocol(protJoin)
        
        protValidate = project.newProtocol(XmippProtValidateNonTilt)
        protValidate.setObjLabel('validate nontilt')
        protValidate.inputVolumes.set(protJoin)
        protValidate.inputVolumes.setExtended('outputVolumes')
        protValidate.inputParticles.set(protImport)
        protValidate.inputParticles.setExtended('outputAverages')
        protValidate.numberOfThreads.set(8)
        if testDataKey :
            setProtocolParams(protValidate, testDataKey)
#         protJoin.inputVolumes.append(p4)
        project.saveProtocol(protValidate)
        
        
    return HttpResponse(mimetype='application/javascript')
    def createOutputStep(self, objId):
        fnFile = os.path.basename(self.inputFile.get())
        copyFile(self.inputFile.get(),self._getPath(fnFile))

        self.experiment = PKPDExperiment()
        self.experiment.general["title"]=self.title.get()
        self.experiment.general["comment"]=self.comment.get()

        ok = True

        # Read the variables
        self.listOfVariables = []
        for line in self.variables.get().replace('\n',';;').split(';;'):
            tokens = line.split(';')
            if len(tokens)!=5:
                print("Skipping variable: ",line)
                ok = False
                continue
            varname = tokens[0].strip()
            self.listOfVariables.append(varname)
            self.experiment.variables[varname] = PKPDVariable()
            self.experiment.variables[varname].parseTokens(tokens)

        # Read vias
        for line in self.vias.get().replace('\n',';;').split(';;'):
            if line!="":
                tokens = line.split(';')
                if len(tokens)<2:
                    print("Skipping via: ",line)
                    ok = False
                    continue
                vianame = tokens[0].strip()
                self.experiment.vias[vianame] = PKPDVia()
                self.experiment.vias[vianame].parseTokens(tokens)

        # Read the doses
        for line in self.doses.get().replace('\n',';;').split(';;'):
            if line!="":
                tokens = line.split(';')
                if len(tokens)<5:
                    print("Skipping dose: ",line)
                    ok = False
                    continue
                dosename = tokens[0].strip()
                self.experiment.doses[dosename] = PKPDDose()
                self.experiment.doses[dosename].parseTokens(tokens,self.experiment.vias)

        # Read the sample doses
        for line in self.dosesToSamples.get().replace('\n',';;').split(';;'):
            try:
                tokens = line.split(';')
                samplename = tokens[0].strip()
                if len(tokens)>1:
                    tokens[1]="dose="+tokens[1]
                self.experiment.samples[samplename] = PKPDSample()
                self.experiment.samples[samplename].parseTokens(tokens, self.experiment.variables, self.experiment.doses,
                                                                self.experiment.groups)
            except Exception as e:
                ok = False
                print("Problem with line: ",line,str(e))

        if ok:
            # Read the measurements
            self.readTextFile()
            self.experiment.write(self._getPath("experiment.pkpd"))
            self.experiment._printToStream(sys.stdout)
            self._defineOutputs(outputExperiment=self.experiment)
예제 #44
0
    def test_SqliteMapper(self):
        fn = self.getOutputPath("basic.sqlite")
        fnGoldCopy = self.getOutputPath('gold.sqlite')
        fnGold = self.modelGoldSqlite
        
        print ">>> Using db: ", fn
        print "        gold: ", fnGold
        print "   gold copy: ", fnGoldCopy
        
        pwutils.copyFile(fnGold, fnGoldCopy)

        mapper = SqliteMapper(fn)
        # Insert a Complex
        c = Complex.createComplex() # real = 1, imag = 1
        mapper.insert(c)
        # Insert an Integer
        i = Integer(1)
        mapper.insert(i)
        # Insert two Boolean
        b = Boolean(False)
        b2 = Boolean(True)
        mapper.insert(b)
        mapper.insert(b2)
        #Test storing pointers
        p = Pointer()
        p.set(c)
        mapper.insert(p)
        
        # Store csv list
        strList = ['1', '2', '3']
        csv = CsvList()
        csv += strList
        mapper.insert(csv)
        
        # Test normal List
        iList = List()
        mapper.insert(iList) # Insert the list when empty        
        
        i1 = Integer(4)
        i2 = Integer(3)
        iList.append(i1)
        iList.append(i2)
        
        mapper.update(iList) # now update with some items inside
        
        pList = PointerList()
        p1 = Pointer()
        p1.set(b)
        p2 = Pointer()
        p2.set(b2)
        pList.append(p1)
        pList.append(p2)
        
        mapper.store(pList)
        

        # Test to add relations
        relName = 'testRelation'
        creator = c
        mapper.insertRelation(relName, creator, i, b)
        mapper.insertRelation(relName, creator, i, b2)
        
        mapper.insertRelation(relName, creator, b, p)
        mapper.insertRelation(relName, creator, b2, p)        
        
        # Save changes to file
        mapper.commit()
        self.assertEqual(1, mapper.db.getVersion())

        # Intentionally keep gold.sqlite as version 0 to check
        # backward compatibility
        db = SqliteDb()
        db._createConnection(fnGold, timeout=1000)
        print "Checking old version is properly read"
        self.assertEqual(0, db.getVersion())
        colNamesGold = [u'id', u'parent_id', u'name', u'classname', 
                        u'value', u'label', u'comment', u'object_parent_id', 
                        u'object_child_id', u'creation']
        colNames = [col[1] for col in db.getTableColumns('Relations')]
        self.assertEqual(colNamesGold, colNames)
        
        # Reading test
        mapper2 = SqliteMapper(fnGoldCopy, globals())
        print "Checking that Relations table is updated and version to 1"
        self.assertEqual(1, mapper2.db.getVersion())
        # Check that the new column is properly added after updated to version 1
        colNamesGold += [u'object_parent_extended', u'object_child_extended']
        colNames = [col[1] for col in mapper2.db.getTableColumns('Relations')]
        self.assertEqual(colNamesGold, colNames)
        
        l = mapper2.selectByClass('Integer')[0]
        self.assertEqual(l.get(), 1)

        c2 = mapper2.selectByClass('Complex')[0]
        self.assertTrue(c.equalAttributes(c2))
        
        b = mapper2.selectByClass('Boolean')[0]
        self.assertTrue(not b.get())

        p = mapper2.selectByClass('Pointer')[0]
        self.assertEqual(c, p.get())
        
        csv2 = mapper2.selectByClass('CsvList')[0]
        self.assertTrue(list.__eq__(csv2, strList))
        
        # Iterate over all objects
        allObj = mapper2.selectAll()
        iterAllObj = mapper2.selectAll(iterate=True)

        for a1, a2 in zip(allObj, iterAllObj):
            # Note compare the scalar objects, which have a well-defined comparison
            if isinstance(a1, Scalar):
                self.assertEqual(a1, a2)
            
        # Test relations
        childs = mapper2.getRelationChilds(relName, i)
        parents = mapper2.getRelationParents(relName, p)
        # In this case both childs and parent should be the same
        for c, p in zip(childs, parents):
            self.assertEqual(c, p, "Childs of object i, should be the parents of object p")

        relations = mapper2.getRelationsByCreator(creator)
        for row in relations:
            print row