def testManagedPolicy(self): self.addComponent('foo:doc', '1.0.0', filePrimer = 1) self.addComponent('foo:runtime', '1.0.0', filePrimer = 2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class AlwaysError(policy.GroupEnforcementPolicy): def doProcess(self, recipe): self.recipe.reportErrors("Automatic error") """ policyPath = os.path.join(self.cfg.root, 'policy', 'errpolicy.py') # we're effectively creating /tmp/_/root/tmp/_/root/policy/... # we're doing this so that the system db in /tmp/_/root will # match the absolute path of the actual policy file. self.addComponent('errorpolicy:runtime', fileContents = [(policyPath, policyStr)]) self.updatePkg('errorpolicy:runtime') try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [os.path.dirname(policyPath)] enforceManagedPolicy = self.cfg.enforceManagedPolicy self.cfg.enforceManagedPolicy = True util.mkdirChain(os.path.dirname(policyPath)) f = open(policyPath, 'w') f.write(policyStr) f.close() self.assertRaises(policy.PolicyError, self.build, simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.enforceManagedPolicy = enforceManagedPolicy self.cfg.policyDirs = policyDirs util.rmtree(os.path.dirname(policyPath))
def testMissingKey(self): DummyRepos.findTrove = lambda *args, **kwargs: (('', '', ''),) d = DummyIso() csdir = tempfile.mkdtemp() logFd, logFile = tempfile.mkstemp() oldErr = os.dup(sys.stderr.fileno()) os.dup2(logFd, sys.stderr.fileno()) os.close(logFd) ChangeSetFromFile = changeset.ChangeSetFromFile Trove = trove.Trove try: f = open(os.path.join(csdir, 'test.ccs'), 'w') f.write('') f.close() changeset.ChangeSetFromFile = DummyChangeSet trove.Trove = DummyTrove try: d.extractPublicKeys('', '', csdir) except RuntimeError: pass else: self.fail('Missing keys did not raise runtime error') finally: trove.Trove = Trove changeset.ChangeSetFromFile = ChangeSetFromFile os.dup2(oldErr, sys.stderr.fileno()) os.close(oldErr) util.rmtree(csdir) util.rmtree(logFile)
def testPreferXZoverUNLZMA(self): # CNY-3231 # Make sure if both xz and unlzma are present, that we prefer xz workDir = tempfile.mkdtemp(prefix="utiltest-") oldPath = os.getenv('PATH') xzPath = os.path.join(workDir, "xz") unlzmaPath = os.path.join(workDir, "unlzma") dumbFilePath = os.path.join(workDir, "some-file") scriptContents = "#!/bin/bash\n\n/bin/cat" file(xzPath, "w").write(scriptContents) file(unlzmaPath, "w").write(scriptContents) data = "Feed dog to cat" file(dumbFilePath, "w").write(data) os.chmod(xzPath, 0755) os.chmod(unlzmaPath, 0755) try: os.environ['PATH'] = workDir decompressor = util.LZMAFile(file(dumbFilePath)) self.assertEqual(decompressor.read(), data) decompressor.close() # Make sure we prefer xz over unlzma self.assertEqual(decompressor.executable, xzPath) # But if xz is not available, we can use unlzma os.unlink(xzPath) decompressor = util.LZMAFile(file(dumbFilePath)) self.assertEqual(decompressor.read(), data) decompressor.close() self.assertEqual(decompressor.executable, unlzmaPath) finally: os.environ['PATH'] = oldPath util.rmtree(workDir)
def testXmlLogWriter(self): tmpDir = tempfile.mkdtemp() try: logPath = os.path.join(tmpDir, 'log') writer = logger.XmlLogWriter(logPath) writer.start() writer.freetext('message 1') writer.newline() writer.freetext('message 2') writer.carriageReturn() writer.newline() writer.freetext('message 3') writer.carriageReturn() writer.reportMissingBuildRequires('foo:runtime bar:lib') writer.close() data = open(logPath).read() finally: util.rmtree(tmpDir) lines = data.splitlines() self.assertEquals(len(lines), 9) self.assertEquals(lines[-1], '</log>') recordsMatch = min(x.startswith('<record>') and x.endswith('</record>') for x in lines[2:-1]) self.assertEquals(recordsMatch, True) assert(x for x in lines if 'missingBuildRequires</descriptor><level>WARNING</level><message>foo:runtime bar:lib</message>' in x)
def deleteProject(self, projectId, projectFQDN, commit=True): try: # try deleteing the repository self.reposDB.delete(projectFQDN) for contentsDir in self.cfg.reposContentsDir.split(): contentsDir = contentsDir % projectFQDN if os.path.isdir(contentsDir): util.rmtree(contentsDir) # If the parent dir is empty, delete that too. # (e.g. /srv/rbuilder/repos/hostname.rbuilder.com) parentDir = os.path.dirname(os.path.normpath(contentsDir)) if os.path.isdir(parentDir) and not os.listdir(parentDir): try: os.rmdir(parentDir) except OSError: pass # try removing the project cu = self.db.cursor() cu.execute("DELETE FROM Projects WHERE projectId=?", projectId) except: self.db.rollback() raise else: if commit: self.db.commit()
def getRecipe(self, subpath): """Return a dictionary of file contents at the given subpath""" assert self.revision # Update the local repository cache. workDir = tempfile.mkdtemp() try: prefix = self.checkout(workDir, subpath) or '' # Read in all the files for the requested subpath subDir = os.path.join(workDir, prefix, subpath) if not os.path.isdir(subDir): raise RuntimeError( "sourceTree %s does not exist or is not a directory" % subpath) files = {} for name in os.listdir(subDir): filePath = os.path.realpath(os.path.join(subDir, name)) if not filePath.startswith(workDir): raise RuntimeError( "Illegal symlink %s points outside checkout: %s" % (os.path.join(subpath, name), filePath)) with open(filePath, 'rb') as fobj: files[name] = fobj.read() return files finally: util.rmtree(workDir)
def testCompileExpression1(self): self.addComponent('foo:doc', '1.0.0', filePrimer = 1) self.addComponent('foo:runtime', '1.0.0', filePrimer = 2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class ValidateFilters(policy.GroupEnforcementPolicy): def do(self): assert self.exceptions is None, \ "exceptions: '%s' is not None" % self.exceptions assert self.inclusions is None, \ "inclusions: '%s' is not None" % self.inclusions assert self.exceptionFilters == [], \ "exceptionFilters: '%s' is not []" % self.exceptionFilters assert self.inclusionFilters == [], \ "inclusionFilters: '%s' is not []" % self.inclusionFilters """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] self.registerPolicy(tmpDir, policyStr) self.build(simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def testWrite(self): class FakeTreeGenerator(object): parsePackageData = lambda *args, **kwargs: None extractChangeSets = lambda *args, **kwargs: None writeCsList = lambda *args, **kwargs: None writeGroupCs = lambda *args, **kwargs: None tmpDir = tempfile.mkdtemp() getArchFlavor = installable_iso.getArchFlavor splitDistro = splitdistro.splitDistro try: splitdistro.splitDistro = lambda *args, **kwargs: None installable_iso.getArchFlavor = lambda *args, **kwargs: \ deps.Flavor() g = self.getHandler(buildtypes.INSTALLABLE_ISO) g._setupTrove = lambda *args, **kwargs: None g.extractChangeSets = lambda *args, **kwargs: FakeTreeGenerator() g.retrieveTemplates = lambda *args, **kwargs: (tmpDir, 38) g.prepareTemplates = lambda *args, **kwargs: (tmpDir, 38) g.extractMediaTemplate = lambda *args, **kwargs: None g.extractPublicKeys = lambda *args, **kwargs: None g.setupKickstart = lambda *args, **kwargs: None g.writeProductImage = lambda *args, **kwargs: None g.buildIsos = lambda *args, **kwargs: None g.baseFlavor = deps.Flavor() g.status = self.status g.jobData['name'] = 'test build' g.troveName = 'test' g.maxIsoSize = 650 * 1024 * 1024 g.buildOVF10 = False g.write() finally: splitdistro.splitDistro = splitDistro installable_iso.getArchFlavor = getArchFlavor util.rmtree(tmpDir)
def clean_roots(self): # Contents roots are no longer used; delete everything root = os.path.join(self.cfg.basePath, 'roots') for name in os.listdir(root): path = os.path.join(root, name) log.info("Deleting old contents root %s", name) rmtree(path)
def testUnmanagedPolicy2(self): self.addComponent('foo:doc', '1.0.0', filePrimer = 1) self.addComponent('foo:runtime', '1.0.0', filePrimer = 2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class AlwaysError(policy.GroupEnforcementPolicy): def doProcess(self, recipe): self.recipe.reportErrors("Automatic error") """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] enforceManagedPolicy = self.cfg.enforceManagedPolicy self.cfg.enforceManagedPolicy = True self.registerPolicy(tmpDir, policyStr) # this will fail with CookError because we shouldn't be allowed # to use a policy that's not managed by conary self.assertRaises(cook.CookError, self.build, simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.enforceManagedPolicy = enforceManagedPolicy self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def testBuildIsos(self): basedir = tempfile.mkdtemp() popen = os.popen rename = os.rename # tested function changes dirs. cwd = os.getcwd() try: os.popen = lambda *args, **kwargs: StringIO.StringIO('734003201') os.rename = lambda a, b: self.touch(b) topdir = os.path.join(basedir, 'topdir') self.touch(os.path.join(topdir, 'images', 'boot.iso')) disc1 = os.path.join(basedir, 'disc1') util.mkdirChain(disc1) disc2 = os.path.join(basedir, 'disc2') self.touch(os.path.join(disc2, 'isolinux', 'isolinux.bin')) util.mkdirChain(os.path.join(basedir, 'junk')) g = self.getHandler(buildtypes.INSTALLABLE_ISO) g.basefilename = 'testcase' g.jobData['name'] = 'test build' g.jobData['project'] = {} g.jobData['project']['name'] = 'test project' g.buildIsos(topdir) self.failIf(len(self.callLog) != 4, "incorrect number of calls") finally: os.chdir(cwd) os.popen = popen os.rename = rename util.rmtree(basedir)
def testQueue(self): root = self.cfg.root root0 = root + '/foo' root1 = root + '/foo-1' root2 = root + '/foo-2' root3 = root + '/foo-3' queue = rootmanager.ChrootQueue(root, 2) # limit of two chroots self.assertEquals(queue.requestSlot('foo', [], True), (None, root0)) self.assertEquals(queue.requestSlot('foo', [], True), (None, root1)) self.assertEquals(queue.requestSlot('foo', [], True), None) util.mkdirChain(root0) queue.chrootFinished(root0) self.assertEquals(sorted(queue.listOldChroots()), [root0]) self.assertEquals(queue.requestSlot('foo', [], True), (root0, root2)) util.mkdirChain(root2) util.rmtree(root0) queue.deleteChroot(root0) self.assertEquals(queue.requestSlot('foo', [], True), None) queue.markBadChroot(root2) # we can't reuse root2 anymore - it's marked as bad. But that means # it's no longer using a space, so we can add a chroot self.assertEquals(queue.requestSlot('foo', [], True), (None, root0)) self.assertEquals(queue.requestSlot('foo', [], True), None) def _shorten(x): return x[len(root)+1:] self.assertEquals(sorted(queue.listChroots()), [_shorten(x) for x in (root0, root1)]) self.assertEquals(sorted(queue.listOldChroots()), [])
def testImageGroupPolicyRun(self): self.addComponent('foo:doc', '1.0.0', filePrimer = 1) self.addComponent('foo:runtime', '1.0.0', filePrimer = 2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class AlwaysFails(policy.ImageGroupEnforcementPolicy): def doTroveSet(self, troveSet): raise RuntimeError, "doTroveSet should not have been called" """ recipeStr = """ class ImageGroup(GroupRecipe): name = 'group-fitlers' version = '1.0' clearBuildRequires() def setup(r): r.add('foo') """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] self.registerPolicy(tmpDir, policyStr) # a non-image group will pass self.build(simpleGroupRecipe, 'GroupSimpleAdd') # a image group will fail self.assertRaises(RuntimeError, self.build, recipeStr, 'ImageGroup') finally: self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def testImageGroupPolicyRun(self): self.addComponent('foo:doc', '1.0.0', filePrimer=1) self.addComponent('foo:runtime', '1.0.0', filePrimer=2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class AlwaysFails(policy.ImageGroupEnforcementPolicy): def doTroveSet(self, troveSet): raise RuntimeError, "doTroveSet should not have been called" """ recipeStr = """ class ImageGroup(GroupRecipe): name = 'group-fitlers' version = '1.0' clearBuildRequires() def setup(r): r.add('foo') """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] self.registerPolicy(tmpDir, policyStr) # a non-image group will pass self.build(simpleGroupRecipe, 'GroupSimpleAdd') # a image group will fail self.assertRaises(RuntimeError, self.build, recipeStr, 'ImageGroup') finally: self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def testCheckoutShadow(self): self.openRepository() self.addComponent('simple:source', [('simple.recipe', recipes.simpleRecipe)]) self.addComponent('simple:runtime') self.addCollection('simple', [':runtime']) trv = self.addCollection('group-dist', ['simple']) self.initProductDirectory('foo') os.chdir('foo/devel') txt = self.runCommand('checkout simple', exitCode=1) expectedText = '\n'.join(( 'error: The upstream source provides a version of this package.', 'Please specify:', ' --shadow to shadow this package', ' --derive to derive from it', ' --new to replace it with a new version', '')) assert txt == expectedText txt = self.runCommand('checkout simple --shadow') self.assertEquals(txt, "Shadowed package 'simple' in './simple'\n") os.chdir('simple') assert('@NEW@' not in open('CONARY').read()) trv = self.findAndGetTrove('simple:source=localhost@foo:foo-1-devel') self.assertEquals(str(trv.getVersion()), '/localhost@rpl:linux//foo:foo-1-devel/1.0-1') os.chdir('..') util.rmtree('simple') txt = self.runCommand('checkout simple') self.assertEquals(txt, "Checked out existing package 'simple' in './simple'\n") os.chdir('simple') assert('@NEW@' not in open('CONARY').read())
def testUnmanagedPolicy2(self): self.addComponent('foo:doc', '1.0.0', filePrimer=1) self.addComponent('foo:runtime', '1.0.0', filePrimer=2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class AlwaysError(policy.GroupEnforcementPolicy): def doProcess(self, recipe): self.recipe.reportErrors("Automatic error") """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] enforceManagedPolicy = self.cfg.enforceManagedPolicy self.cfg.enforceManagedPolicy = True self.registerPolicy(tmpDir, policyStr) # this will fail with CookError because we shouldn't be allowed # to use a policy that's not managed by conary self.assertRaises(cook.CookError, self.build, simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.enforceManagedPolicy = enforceManagedPolicy self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def testCompileExpression1(self): self.addComponent('foo:doc', '1.0.0', filePrimer=1) self.addComponent('foo:runtime', '1.0.0', filePrimer=2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class ValidateFilters(policy.GroupEnforcementPolicy): def do(self): assert self.exceptions is None, \ "exceptions: '%s' is not None" % self.exceptions assert self.inclusions is None, \ "inclusions: '%s' is not None" % self.inclusions assert self.exceptionFilters == [], \ "exceptionFilters: '%s' is not []" % self.exceptionFilters assert self.inclusionFilters == [], \ "inclusionFilters: '%s' is not []" % self.inclusionFilters """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] self.registerPolicy(tmpDir, policyStr) self.build(simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def extractMediaTemplate(self, topdir): tmpRoot = tempfile.mkdtemp(dir=constants.tmpDir) try: client = self.getConaryClient(\ tmpRoot, getArchFlavor(self.baseFlavor).freeze()) log.info("extracting ad-hoc content from " \ "media-template=%s" % client.cfg.installLabelPath[0].asString()) uJob = self._getUpdateJob(client, "media-template") if uJob: client.applyUpdate(uJob, callback = self.callback) log.info("success: copying media template data to unified tree") # copy content into unified tree root. add recurse and no-deref # flags to command. following symlinks is really bad in this case. oldTemplateDir = os.path.join(tmpRoot, 'usr', 'lib', 'media-template') if os.path.exists(oldTemplateDir): call('cp', '-R', '--no-dereference', oldTemplateDir, topdir) for tDir in ('all', 'disc1'): srcDir = os.path.join(tmpRoot, tDir) destDir = os.path.join(topdir, 'media-template2') if os.path.exists(srcDir): util.mkdirChain(destDir) call('cp', '-R', '--no-dereference', srcDir, destDir) else: log.info("media-template not found on repository") finally: util.rmtree(tmpRoot, ignore_errors = True)
def testFmtdLog(self): tmpDir = tempfile.mkdtemp() try: origHandlers = log.fmtLogger.handlers[:] logPath = os.path.join(tmpDir, 'log.xml') log.openFormattedLog(logPath) log.pushLogDescriptor('foo') self.captureOutput(log.debug, 'debug message') log.pushLogDescriptor('bar') self.captureOutput(log.info, 'info message') log.popLogDescriptor() self.captureOutput(log.warning, 'warning message') log.popLogDescriptor() self.captureOutput(log.error, 'error message') log.pushLogDescriptor('bad_descriptor') hdlr = [x for x in log.fmtLogger.handlers \ if x not in origHandlers][0] hdlr.close() log.fmtLogger.handlers.remove(hdlr) data = open(logPath).read().splitlines() self.assertFalse('bad_descriptor' in data[-2], "descriptor stack wasn't cleared on log close.") self.assertEquals(data[-1], '</log>') self.assertEquals(len(data), 9) self.assertSubstring('<level>DEBUG</level>', data[2]) self.assertSubstring('<message>begin log</message>', data[2]) self.assertSubstring('<level>DEBUG</level>', data[-2]) self.assertSubstring('<message>end log</message>', data[-2]) finally: util.rmtree(tmpDir)
def testBuildIsosFailure(self): basedir = tempfile.mkdtemp() popen = os.popen rename = os.rename # tested function changes dirs. cwd = os.getcwd() try: os.popen = lambda *args, **kwargs: StringIO.StringIO('734003201') os.rename = lambda a, b: None topdir = os.path.join(basedir, 'topdir') self.touch(os.path.join(topdir, 'images', 'boot.iso')) disc1 = os.path.join(basedir, 'disc1') util.mkdirChain(disc1) g = self.getHandler(buildtypes.INSTALLABLE_ISO) g.basefilename = '' g.jobData['name'] = 'test build' g.jobData['project'] = {} g.jobData['project']['name'] = 'test project' g.jobData['project']['hostname'] = 'test' self.assertRaises(RuntimeError, g.buildIsos, topdir) finally: os.chdir(cwd) os.popen = popen os.rename = rename util.rmtree(basedir)
def testExtractChangeSets(self): class FakeTreeGenerator(object): parsePackageData = lambda *args, **kwargs: None extractChangeSets = lambda *args, **kwargs: None class FakeClient(object): def __init__(x, root): x.root = root x.cfg = x x.installLabelPath = [versions.Label('test.rpath.local@rpl:1')] createChangeSet = lambda *args, **kwargs: None csdir = tempfile.mkdtemp() clientVersion = 38 getArchFlavor = installable_iso.getArchFlavor TreeGenerator = gencslist.TreeGenerator try: gencslist.TreeGenerator = lambda *args, **kwargs: \ FakeTreeGenerator() installable_iso.getArchFlavor = lambda x: deps.Flavor() g = self.getHandler(buildtypes.INSTALLABLE_ISO) g.baseFlavor = deps.Flavor() g.troveFlavor = deps.Flavor() g.getConaryClient = lambda root, *args, **kwargs: FakeClient(root) g.troveName = 'test' g.callback = installable_iso.Callback(self.status) g.extractChangeSets(csdir, clientVersion) finally: gencslist.TreeGenerator = TreeGenerator installable_iso.getArchFlavor = getArchFlavor util.rmtree(csdir)
def write(self): sizes = self.getImageSize(realign=0) finalImage = os.path.join(self.outputDir, self.basefilename + ".fs.tar.gz") images = self.makeFSImage(sizes) self.status("Compressing filesystem images") self.gzip(self.workingDir, finalImage) if self.buildOVF10: self.diskFilePath = images["/"] self.diskFileName = os.path.split(self.diskFilePath)[1] self.status("Building OVF 1.0 package") diskFileGzipPath = self.gzip(self.diskFilePath, os.path.join(self.outputDir, self.diskFileName + ".gz")) util.rmtree(self.workingDir) self.ovaPath = self.createOvf( imageName=self.basefilename, imageDescription=self.jobData["description"], diskFormat=constants.RAWFS, diskFilePath=diskFileGzipPath, diskCapacity=sizes["/"], diskCompressed=True, workingDir=self.workDir, outputDir=self.outputDir, ) self.outputFileList.append((self.ovaPath, "Raw Filesystem %s" % constants.OVFIMAGETAG)) self.outputFileList.append((finalImage, "Raw Filesystem Image")) self.postOutput(self.outputFileList)
def makeFSImage(self, sizes): root = self.workDir + "/root" try: # create an image file per mount point imgFiles = {} for mountPoint, req in self.mountDict.items(): size = sizes[mountPoint] tag = mountPoint.replace("/", "") tag = tag and tag or "root" imgFiles[mountPoint] = path = self.mntPointFileName(mountPoint) log.info("Creating mount point %s at %s with size %d bytes", mountPoint, path, size) fs = self.makeBlankFS(path, req.fstype, size, fsLabel=req.name) self.addFilesystem(mountPoint, fs) self.mountAll() # Install image contents. self.installFileTree(root) finally: try: self.umountAll() util.rmtree(root, ignore_errors=True) except: log.logger.exception("Error unmounting partitions:") return imgFiles
def testCheckoutShadow(self): self.openRepository() self.addComponent('simple:source', [('simple.recipe', recipes.simpleRecipe)]) self.addComponent('simple:runtime') self.addCollection('simple', [':runtime']) trv = self.addCollection('group-dist', ['simple']) self.initProductDirectory('foo') os.chdir('foo/devel') txt = self.runCommand('checkout simple', exitCode=1) expectedText = '\n'.join( ('error: The upstream source provides a version of this package.', 'Please specify:', ' --shadow to shadow this package', ' --derive to derive from it', ' --new to replace it with a new version', '')) assert txt == expectedText txt = self.runCommand('checkout simple --shadow') self.assertEquals(txt, "Shadowed package 'simple' in './simple'\n") os.chdir('simple') assert ('@NEW@' not in open('CONARY').read()) trv = self.findAndGetTrove('simple:source=localhost@foo:foo-1-devel') self.assertEquals(str(trv.getVersion()), '/localhost@rpl:linux//foo:foo-1-devel/1.0-1') os.chdir('..') util.rmtree('simple') txt = self.runCommand('checkout simple') self.assertEquals( txt, "Checked out existing package 'simple' in './simple'\n") os.chdir('simple') assert ('@NEW@' not in open('CONARY').read())
def testUpdateMissingKey(self): fingerprint = '95B457D16843B21EA3FC73BBC7C32FC1F94E405E' # supply the pass phrase for our private key keyCache = openpgpkey.getKeyCache() keyCache.getPrivateKey(fingerprint, '111111') self.cfg.signatureKey = fingerprint self.addQuickTestComponent("test:doc", "1.0-1-1") signtrove.signTroves(self.cfg, ["test:doc"]) repos = self.openRepository() # utterly prevent the keycache from knowing about the key, # but give it a place to store a keyserver retrieved key. newKeyCache = openpgpkey.OpenPGPKeyFileCache() tmpPath = mkdtemp() pubRing = self.cfg.pubRing self.cfg.pubRing = [tmpPath + '/pubring.gpg'] newKeyCache.publicPaths = self.cfg.pubRing keyCacheCallback = openpgpkey.KeyCacheCallback(repos, self.cfg) newKeyCache.setCallback(keyCacheCallback) openpgpkey.setKeyCache(newKeyCache) try: self.updatePkg(self.rootDir, "test:doc") newKeyCache.getPublicKey(fingerprint) finally: self.cfg.pubRing = pubRing openpgpkey.setKeyCache(keyCache) util.rmtree(tmpPath)
def initdb(self, meta): """Create a new postgres cluster at the given location.""" log.info("Initializing PostgreSQL %s cluster", meta.version) assert not os.path.exists(meta.dataDir) self.loadPrivs(user=self.user) parentDir = os.path.dirname(meta.dataDir) if not os.path.isdir(parentDir): os.makedirs(parentDir) tempDir = tempfile.mkdtemp(dir=parentDir) try: os.chown(tempDir, self.uidgid[0], self.uidgid[1]) self.dropPrivs() cluster = postgres_major_migrate.Postmaster(dataDir=tempDir, binDir=meta.binDir, port=65000, logPath='/tmp/postgres-initdb.log') cluster.initdb() self.restorePrivs() self.updateMeta(meta) os.rename(tempDir, meta.dataDir) finally: try: if os.path.isdir(tempDir): try: self.restorePrivs() except: traceback.print_exc() log.info("Cleaning up temporary target dir") cny_util.rmtree(tempDir) except: traceback.print_exc()
def testCompressedLogs(self): tmpDir = tempfile.mkdtemp() try: bz2Path = os.path.join(tmpDir, 'log.xml.bz2') bz2Hdlr = xmllog.XmlHandler(bz2Path) self.logger.addHandler(bz2Hdlr) gzPath = os.path.join(tmpDir, 'log.xml.gz') gzHdlr = xmllog.XmlHandler(gzPath) self.logger.addHandler(gzHdlr) logPath = os.path.join(tmpDir, 'log.xml') logHdlr = xmllog.XmlHandler(logPath) self.logger.addHandler(logHdlr) self.logger.info('test') bz2Hdlr.close() gzHdlr.close() logHdlr.close() self.logger.handlers.remove(bz2Hdlr) self.logger.handlers.remove(gzHdlr) self.logger.handlers.remove(logHdlr) # only inspect the first two lines. they won't have timestamps logData = open(logPath).read().splitlines()[:2] gzData = gzip.GzipFile(gzPath, 'r').read().splitlines()[:2] bzData = bz2.BZ2File(bz2Path, 'r').read().splitlines()[:2] self.assertFalse(not logData, "expected log content") self.assertEquals(logData, gzData) self.assertEquals(logData, bzData) finally: util.rmtree(tmpDir)
def testEmptyGrubName(self): ''' Make sure grub title falls back to defaults if /etc/issue exists but is empty. Tests: RBL-2333 ''' tmpDir = tempfile.mkdtemp() try: handler = self.getHandler(buildtypes.RAW_HD_IMAGE) self.touch(os.path.join(tmpDir, 'sbin', 'grub')) self.touch(os.path.join(tmpDir, 'etc', 'issue')) installer = self._getInstaller(tmpDir, handler=handler, kind='grub') installer.setup() installer.install() f = open(os.path.join(tmpDir, 'etc', 'grub.conf')) data = f.read() f.close() self.failUnless(handler.jobData['project']['name'] in data, 'grub title not taken from job data') finally: util.rmtree(tmpDir)
def testManagedPolicy(self): self.addComponent('foo:doc', '1.0.0', filePrimer=1) self.addComponent('foo:runtime', '1.0.0', filePrimer=2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class AlwaysError(policy.GroupEnforcementPolicy): def doProcess(self, recipe): self.recipe.reportErrors("Automatic error") """ policyPath = os.path.join(self.cfg.root, 'policy', 'errpolicy.py') # we're effectively creating /tmp/_/root/tmp/_/root/policy/... # we're doing this so that the system db in /tmp/_/root will # match the absolute path of the actual policy file. self.addComponent('errorpolicy:runtime', fileContents=[(policyPath, policyStr)]) self.updatePkg('errorpolicy:runtime') try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [os.path.dirname(policyPath)] enforceManagedPolicy = self.cfg.enforceManagedPolicy self.cfg.enforceManagedPolicy = True util.mkdirChain(os.path.dirname(policyPath)) f = open(policyPath, 'w') f.write(policyStr) f.close() self.assertRaises(policy.PolicyError, self.build, simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.enforceManagedPolicy = enforceManagedPolicy self.cfg.policyDirs = policyDirs util.rmtree(os.path.dirname(policyPath))
def testWriteProductImage(self): class FakeClient(object): def __init__(x): x.cfg = x x.installLabelPath = [versions.Label('test.rpath.local@rpl:1')] setUpdateCallback = lambda *args, **kwargs: None applyUpdate = lambda *args, **kwargs: None class DummyImages(object): processImages = lambda *args, **kwargs: None __init__ = lambda *args, **kwargs: None topdir = tempfile.mkdtemp() AnacondaImages = installable_iso.AnacondaImages unlink = os.unlink try: self.touch(os.path.join(topdir, 'isolinux', 'test.msg')) self.touch(os.path.join(topdir, 'isolinux', 'isolinux.cfg')) os.unlink = lambda *args, **kwargs: None installable_iso.AnacondaImages = DummyImages g = self.getHandler(buildtypes.INSTALLABLE_ISO) g.callback = installable_iso.Callback(self.status) g.jobData['name'] = 'test build' g.baseTrove = 'baseTrove' g.baseFlavor = deps.Flavor() g.getConaryClient = lambda *args, **kwargs: FakeClient() g._getUpdateJob = lambda *args, **kwargs: True g._getLabelPath = lambda *args, **kwargs: "" g.writeProductImage(topdir, 'x86') self.failUnlessEqual([x[0] for x in self.callLog], ['sed', 'tar', 'tar', 'tar', 'tar', '/sbin/mkfs.cramfs']) finally: os.unlink = unlink installable_iso.AnacondaImages = AnacondaImages util.rmtree(topdir)
def testPolicyAttributes(self): self.addComponent('foo:doc', '1.0.0', filePrimer=1) self.addComponent('foo:runtime', '1.0.0', filePrimer=2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class ValidateFilters(policy.GroupEnforcementPolicy): def preProcess(self): self.preprocess = True def test(self): assert 'preprocess' in self.__dict__ and self.preprocess # returning False indicates test failed return False def do(self): raise RuntimeError, "self.test() should have aborted doProcess" """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] self.registerPolicy(tmpDir, policyStr) self.build(simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def testPolicyAttributes(self): self.addComponent('foo:doc', '1.0.0', filePrimer = 1) self.addComponent('foo:runtime', '1.0.0', filePrimer = 2) self.addCollection('foo', '1.0.0', [':doc', ':runtime']) policyStr = """ from conary.build import policy class ValidateFilters(policy.GroupEnforcementPolicy): def preProcess(self): self.preprocess = True def test(self): assert 'preprocess' in self.__dict__ and self.preprocess # returning False indicates test failed return False def do(self): raise RuntimeError, "self.test() should have aborted doProcess" """ tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] self.registerPolicy(tmpDir, policyStr) self.build(simpleGroupRecipe, 'GroupSimpleAdd') finally: self.cfg.policyDirs = policyDirs util.rmtree(tmpDir)
def delete(self, name): path = self._getTemplate()[1] % 'mysql' db = dbstore.connect(path, 'mysql') reposName = self.translate(name) cu = db.cursor() cu.execute("DROP DATABASE %s" % reposName) util.rmtree(path + reposName, ignore_errors = True)
def cacheUpdateJob(self, applyList, updJob): '''Cache a conary UpdateJob ''' jobPath = self._getJobCachePath(applyList) if os.path.exists(jobPath): util.rmtree(jobPath) os.mkdir(jobPath) updJob.freeze(jobPath)
def testOva(self): workdir = self.makeDirectoryStructure() archivePath = os.path.join(workdir, 'archive.tar.gz') cmd = [ 'tar', 'cf', archivePath, '-C', workdir, 'subdir' ] subprocess.call(cmd) util.rmtree(os.path.join(workdir, "subdir")) self._runTests(archivePath)
def makeBlankFS(self, image, fsType, size, fsLabel=None): if os.path.exists(image): util.rmtree(image) util.mkdirChain(os.path.split(image)[0]) logCall("dd if=/dev/zero of=%s count=1 seek=%d bs=4096" % (image, (size / 4096) - 1)) fs = bootable_image.Filesystem(image, fsType, size, fsLabel=fsLabel) fs.format() return fs
def extractPublicKeys(self, keyDir, topdir, csdir): self.status('Extracting Public Keys') homeDir = tempfile.mkdtemp(dir = constants.tmpDir) tmpRoot = tempfile.mkdtemp(dir = constants.tmpDir) try: client = self.getConaryClient( \ tmpRoot, getArchFlavor(self.baseFlavor).freeze()) fingerprints = {} fpTrovespecs = {} for filename in [x for x in os.listdir(csdir) if x.endswith('.ccs')]: cs = changeset.ChangeSetFromFile(os.path.join(csdir, filename)) troves = [trove.Trove(x) for x in cs.iterNewTroveList()] for trv in troves: label = trv.version.v.trailingLabel() for sig in trv.troveInfo.sigs.digitalSigs.iter(): tspecList = fpTrovespecs.get(sig[0], set()) tspecList.add('%s=%s[%s]' % (trv.getName(), str(trv.getVersion()), str(trv.getFlavor()))) fpTrovespecs[sig[0]] = tspecList if fingerprints.has_key(label): if sig[0] not in fingerprints[label]: fingerprints[label].append(sig[0]) else: fingerprints.update({label:[sig[0]]}) missingKeys = [] for label, fingerprints in fingerprints.items(): for fp in fingerprints: try: key = client.repos.getAsciiOpenPGPKey(label, fp) fd, fname = tempfile.mkstemp(dir = constants.tmpDir) os.close(fd) fd = open(fname, 'w') fd.write(key) fd.close() call('gpg', '--home', homeDir, '--trust-model', 'always', '--import', fname) os.unlink(fname) except openpgpfile.KeyNotFound: missingKeys.append(fp) if missingKeys: errorMessage = 'The following troves do not have keys in ' \ 'their associated repositories:\n' for fingerprint in missingKeys: errorMessage += '%s requires %s\n' % \ (', '.join(fpTrovespecs[fingerprint]), fingerprint) raise RuntimeError(errorMessage) call('gpg', '--home', homeDir, '--export', '--no-auto-check-trustdb', '-o', os.path.join(topdir, 'public_keys.gpg')) finally: util.rmtree(homeDir, ignore_errors = True) util.rmtree(tmpRoot, ignore_errors = True)
def writeAnnotatedFiles(self, files): if os.path.exists(self._annotatePath): util.rmtree(self._annotatePath) annotatePath = self._annotatePath coverage = self.getCoverage() coverage.annotate(files, self._annotatePath, baseDirs=self._baseDirs) if annotatePath.startswith(os.getcwd() + '/'): annotatePath = '.' + annotatePath[len(os.getcwd()):] print print '*** %s file(s) annotated in %s' % (len(files), annotatePath)
def doUpdate(cfg, changeSpecs, **kwargs): callback = kwargs.get('callback', None) if not callback: callback = callbacks.UpdateCallback(trustThreshold=cfg.trustThreshold) kwargs['callback'] = callback else: callback.setTrustThreshold(cfg.trustThreshold) syncChildren = kwargs.get('syncChildren', False) syncUpdate = kwargs.pop('syncUpdate', False) restartInfo = kwargs.get('restartInfo', None) if syncChildren or syncUpdate: installMissing = True else: installMissing = False kwargs['installMissing'] = installMissing fromChangesets = [] for path in kwargs.pop('fromFiles', []): cs = changeset.ChangeSetFromFile(path) fromChangesets.append(cs) kwargs['fromChangesets'] = fromChangesets # Look for items which look like files in the applyList and convert # them into fromChangesets w/ the primary sets for item in changeSpecs[:]: if os.access(item, os.R_OK): try: cs = changeset.ChangeSetFromFile(item) except: continue fromChangesets.append(cs) changeSpecs.remove(item) for troveTuple in cs.getPrimaryTroveList(): changeSpecs.append(trovetup.TroveTuple(*troveTuple).asString()) if kwargs.get('restartInfo', None): # We don't care about applyList, we will set it later applyList = None else: keepExisting = kwargs.get('keepExisting') updateByDefault = kwargs.get('updateByDefault', True) applyList = cmdline.parseChangeList(changeSpecs, keepExisting, updateByDefault, allowChangeSets=True) _updateTroves(cfg, applyList, **kwargs) # Clean up after ourselves if restartInfo: util.rmtree(restartInfo, ignore_errors=True)
def cacheUpdateJob(self, applyList, updJob): jobPath = self._getJobCachePath(applyList) log.info("jobPath %s" % jobPath) if os.path.exists(jobPath): log.info("deleting the JobPath %s " % jobPath) util.rmtree(jobPath) log.info("end deleting the JobPath %s " % jobPath) log.info("making the logPath ") os.mkdir(jobPath) log.info("freeze JobPath") updJob.freeze(jobPath) log.info("end freeze JobPath")
def testListChroots(self): self.openRmakeRepository() client = self.startRmakeServer() trv, cs = self.Component('foo:source') job = self.newJob(trv) trv = job.iterTroves().next() trv.creatingChroot('_local_', 'foo') db = self.openRmakeDatabase() assert([ x.path for x in db.listChroots()] == ['foo']) util.mkdirChain(self.rmakeCfg.buildDir + '/chroots/foo') assert([ x.path for x in client.listChroots()] == ['foo']) util.rmtree(self.rmakeCfg.buildDir + '/chroots/foo') assert([ x.path for x in client.listChroots()] == [])
def testCreate(self): r = self.getRecipe() man = manifest.ExplicitManifest('foo', r) path = os.path.sep + os.path.join(self.destdir, 'foo') man.recordPaths(path) man.manifestsDir = tempfile.mkdtemp() try: man.manifestFile = os.path.join(man.manifestsDir, os.path.basename(man.manifestFile)) man.create() self.assertEquals('/foo\n', open(man.manifestFile).read()) finally: util.rmtree(man.manifestsDir)
def updateAll(cfg, **kwargs): showItems = kwargs.pop('showItems', False) restartInfo = kwargs.get('restartInfo', None) migrate = kwargs.pop('migrate', False) modelArg = kwargs.pop('model', False) modelFile = kwargs.get('systemModelFile', None) model = kwargs.get('systemModel', None) infoArg = kwargs.get('info', False) if model and modelFile and modelFile.exists() and restartInfo is None: model.refreshVersionSnapshots() if modelArg: model.write(sys.stdout) sys.stdout.flush() return None kwargs['installMissing'] = kwargs['removeNotByDefault'] = migrate if 'callback' not in kwargs or not kwargs.get('callback'): kwargs['callback'] = UpdateCallback(cfg) # load trove cache only if --info provided kwargs['loadTroveCache'] = infoArg client = conaryclient.ConaryClient(cfg) # We want to be careful not to break the old style display, for whoever # might have a parser for that output. withLongDisplay = (cfg.fullFlavors or cfg.fullVersions or cfg.showLabels) formatter = UpdateAllFormatter() if restartInfo or (model and modelFile and modelFile.exists()): updateItems = [] applyList = None else: if showItems and withLongDisplay: updateItems = client.getUpdateItemList() dcfg = display.DisplayConfig() dcfg.setTroveDisplay(fullFlavors = cfg.fullFlavors, fullVersions = cfg.fullVersions, showLabels = cfg.showLabels) formatter = display.TroveTupFormatter(dcfg) else: updateItems = client.fullUpdateItemList() applyList = [ (x[0], (None, None), x[1:], True) for x in updateItems ] if showItems: for (name, version, flavor) in sorted(updateItems, key=lambda x:x[0]): print formatter.formatNVF(name, version, flavor) return _updateTroves(cfg, applyList, **kwargs) # Clean up after ourselves if restartInfo: util.rmtree(restartInfo, ignore_errors=True)
def testRemove(self): # test removing a file while keeping the subdir d = tempfile.mkdtemp() fn = os.sep.join((d, 'hello')) f = open(fn, 'w') f.write('hello') subdir = os.sep.join((d, 'subdir')) os.mkdir(subdir) self.logFilter.add() util.remove(os.sep.join((d, '*'))) self.logFilter.remove() self.logFilter.compare(('warning: Not removing directory %s' %subdir)) assert(not os.path.exists(fn) and os.path.isdir(subdir)) util.rmtree(d)
def testLoad(self): r = self.getRecipe() r._pathTranslations.append(('/foo', '/bar')) man = manifest.ExplicitManifest('foo', r) path = os.path.sep + os.path.join(self.destdir, 'foo') man.recordPaths(path) man.manifestsDir = tempfile.mkdtemp() try: man.manifestFile = os.path.join(man.manifestsDir, os.path.basename(man.manifestFile)) man.create() regexp = man.load() finally: util.rmtree(man.manifestsDir) self.assertFalse(regexp.match('/foo')) self.assertFalse(not regexp.match('/bar'))
def testLexerOperation(self): marker = '43227894372910' tmpDir = tempfile.mkdtemp() try: lexer = logger.Lexer(marker) logPath = os.path.join(tmpDir, 'log') writer = logger.FileLogWriter(logPath) writer.start() lexer.registerCallback(writer.handleToken) lexer.scan('test\n43227894372910 foo\ntext 2\n') lexer.close() data = open(logPath).read() # test that close flushed the lexer self.assertEquals(data, 'testtext 2\n') finally: util.rmtree(tmpDir)
def main(args=sys.argv[1:]): cfg = config.openPlan(None, systemOnly=True) parser = optparse.OptionParser() parser.add_option('--base-uri') parser.add_option('--repo') parser.add_option('--plan') parser.add_option('--checkout') options, args = parser.parse_args(args) if not cfg.wmsBase: if options.base_uri: cfg.wmsBase = options.base_uri else: parser.error("Please set wmsBase option in /etc/bobrc or ~/.bobrc") if not options.repo: parser.error("--repo option must be set") if not options.plan and not options.checkout: parser.error("Must set one of --checkout or --plan") rf = RevisionFile() tip = rf.revs.get(options.repo) path = options.repo if not tip: for path, tip in rf.revs.items(): if os.path.basename(path) == options.repo: break else: sys.exit("repo %s not in revision.txt" % options.repo) repo = wms.WmsRepository(cfg, path=path) repo.revision = tip['id'] repo.branch = tip['branch'] repo.revIsExact = True if options.checkout: checkoutDir = os.path.abspath(options.checkout) if os.path.exists(checkoutDir): util.rmtree(checkoutDir) parent = os.path.dirname(checkoutDir) prefix = repo.checkout(parent) os.rename(os.path.join(parent, prefix), checkoutDir) else: planDir = tempfile.mkdtemp(dir='.') try: prefix = repo.checkout(planDir) plan = os.path.join(planDir, prefix, options.plan) return bob_main.main([plan]) finally: util.rmtree(planDir)
def testRecurseDirectoryList(self): dirstruct = [ ('a1', 'F'), ('d1', 'D'), ('d1/f11', 'F'), ('d1/f12', 'F'), ('d1/f13', 'L', '/tmp'), ('d1/f14', 'L', '/dev/null'), ('d12', 'F'), ('d2', 'D'), ('d2/d21', 'D'), ('d2/d21/d31', 'D'), ('f3', 'F'), ] topdir = tempfile.mkdtemp() # Create the directory structure for tup in dirstruct: fname, ftype = tup[:2] fullfname = os.path.join(topdir, fname) if ftype == 'D': os.mkdir(fullfname) continue if ftype == 'F': open(fullfname, "w+") continue # Link linksrc = tup[2] os.symlink(linksrc, fullfname) expected = ['a1', 'd1/f11', 'd1/f12', 'd1/f13', 'd1/f14', 'd12', 'f3'] expected = [os.path.join(topdir, f) for f in expected] actual = [f for f in util.recurseDirectoryList(topdir)] self.assertEqual(actual, expected) expected = [ 'a1', 'd1', 'd1/f11', 'd1/f12', 'd1/f13', 'd1/f14', 'd12', 'd2', 'd2/d21', 'd2/d21/d31', 'f3' ] expected = [os.path.join(topdir, f) for f in expected] expected[0:0] = [topdir] actual = [f for f in util.recurseDirectoryList(topdir, withDirs=True)] self.assertEqual(actual, expected) # Cleanup util.rmtree(topdir)
def testCopyTree(self): # test copying tree with different syntaxes d = tempfile.mkdtemp() subdir = os.sep.join((d, 'subdir')) os.mkdir(subdir) fn = os.sep.join((subdir, 'hello')) f = open(fn, 'w') f.write('hello') d2 = tempfile.mkdtemp() subdir2 = os.sep.join((d2, 'subdir')) fn2 = os.sep.join((subdir2, 'hello')) util.copytree(subdir, d2) assert(os.path.isdir(subdir2) and os.path.exists(fn2)) util.rmtree(subdir2) util.copytree(subdir + '/', d2) assert(os.path.isdir(subdir2) and os.path.exists(fn2)) util.rmtree(d)
def testDontWalkReferencedImageGroups(self): self.addComponent('foo:runtime') self.addCollection('foo', strongList=['foo:runtime']) self.addCollection('group-foo', strongList=['foo'], imageGroup=True) recipeStr = """ class GroupUncooked(GroupRecipe): name = 'group-uncooked' version = '1' clearBuildRequires() imageGroup = True def setup(r): r.add('foo') r.add('group-foo') r.RecordPolicy(exceptions = 'group-foo') """ policyStr = """ from conary.build import policy class RecordPolicy(policy.ImageGroupEnforcementPolicy): def __init__(self, *args, **kwargs): self.outputFile = open('%s', 'w') policy.ImageGroupEnforcementPolicy.__init__(self, *args, **kwargs) def doTroveSet(self, troveSet): self.outputFile.write(str(troveSet) + '\\n') self.outputFile.flush() def __del__(self): self.outputFile.close() """ outputDir = tempfile.mkdtemp() tmpDir = tempfile.mkdtemp() try: policyDirs = self.cfg.policyDirs self.cfg.policyDirs = [tmpDir] outputFile = os.path.join(outputDir, 'log.txt') self.registerPolicy(tmpDir, policyStr % outputFile) grp = self.build(recipeStr, "GroupUncooked") data = open(outputFile).read() self.assertFalse("group-foo" in data, "this trove should not have been mentioned") finally: util.rmtree(tmpDir) util.rmtree(outputDir) self.cfg.policyDirs = policyDirs
def testPluginLoading(self): d = tempfile.mkdtemp(prefix='rmake-plugintest') d2 = tempfile.mkdtemp(prefix='rmake-plugintest2') try: self.writeFile(d + '/fail.py', failedPlugin) self.writeFile(d + '/pass.py', plugin1) # this second pass.py is later on the directory list and # so should get skipped. self.writeFile(d2 + '/pass.py', failedPlugin) self.writeFile(d2 + '/toomany.py', tooManyPlugins) self.writeFile(d + '/.backup', 'badplugincontents') compDir = d2 + '/comp' util.mkdirChain(compDir) self.writeFile(compDir + '/__init__.py', complicatedPlugin) self.writeFile(compDir + '/part1.py', complicatedPlugin_part1) self.writeFile(compDir + '/part2.py', complicatedPlugin_part2) mgr = pluginlib.PluginManager( [d, d2, d2 + 'somedirthatdoesntexist']) self.logFilter.add() mgr.loadPlugins() assert (len(self.logFilter.records) == 2) self.logFilter.records.sort() expected = [ "warning: Failed to import plugin %s/fail.py: " "name 'b' is not defined" % d, "warning: Failed to import plugin %s/toomany.py: " "Can only define one plugin in a plugin module" % d2, ] expected.sort() for record, exp in zip(self.logFilter.records, expected): assert record.startswith( exp), "%s does not start with %s" % (record, exp) # call function foo for all hooks # call function foo for all hooks rc, txt = self.captureOutput(mgr.callHook, 'all', 'foo') rc, txt2 = self.captureOutput(mgr.callHook, 'all', 'foo') assert (txt == 'comp: 1\nblah: 1\n') assert (txt2 == 'comp: 2\nblah: 2\n') mgr.unloadPlugin('comp') rc, txt3 = self.captureOutput(mgr.callHook, 'all', 'foo') assert (txt3 == 'blah: 3\n') finally: util.rmtree(d) util.rmtree(d2)
def testRegularFileContents(self): foo = filetypes.RegularFile(contents=StringIO('foo1')) fileObj = foo.get(pathId) f = foo.getContents() self.assertEquals(f.read(), 'foo1') tmpDir = tempfile.mkdtemp() try: tmpPath = os.path.join(tmpDir, 'foo.txt') f = open(tmpPath, 'w') f.write('foo2') f.close() f = open(tmpPath) foo = filetypes.RegularFile(contents=f) f = foo.getContents() self.assertEquals(f.read(), 'foo2') finally: util.rmtree(tmpDir)