示例#1
0
    def testManagedPolicy(self):
        self.addComponent('foo:doc', '1.0.0', filePrimer=1)
        self.addComponent('foo:runtime', '1.0.0', filePrimer=2)
        self.addCollection('foo', '1.0.0', [':doc', ':runtime'])

        policyStr = """
from conary.build import policy
class AlwaysError(policy.GroupEnforcementPolicy):
    def doProcess(self, recipe):
        self.recipe.reportErrors("Automatic error")
"""

        policyPath = os.path.join(self.cfg.root, 'policy', 'errpolicy.py')
        # we're effectively creating /tmp/_/root/tmp/_/root/policy/...
        # we're doing this so that the system db in /tmp/_/root will
        # match the absolute path of the actual policy file.
        self.addComponent('errorpolicy:runtime',
                          fileContents=[(policyPath, policyStr)])
        self.updatePkg('errorpolicy:runtime')

        try:
            policyDirs = self.cfg.policyDirs
            self.cfg.policyDirs = [os.path.dirname(policyPath)]
            enforceManagedPolicy = self.cfg.enforceManagedPolicy
            self.cfg.enforceManagedPolicy = True
            util.mkdirChain(os.path.dirname(policyPath))
            f = open(policyPath, 'w')
            f.write(policyStr)
            f.close()
            self.assertRaises(policy.PolicyError, self.build,
                              simpleGroupRecipe, 'GroupSimpleAdd')
        finally:
            self.cfg.enforceManagedPolicy = enforceManagedPolicy
            self.cfg.policyDirs = policyDirs
            util.rmtree(os.path.dirname(policyPath))
示例#2
0
 def write_xml(self, data, subdir=None):
     if subdir:
         util.mkdirChain(subdir)
         os.chdir(subdir)
     for filename, xml in data.iteritems():
         with open(filename, 'w') as f:
             f.write(xml)        
示例#3
0
    def testVirtualHardwareVersion(self):
        img = self.img
        Mocked = set(['mkfs.ext3', 'tune2fs', ])
        origLogCall = raw_hd_image.logCall
        logCallArgs = []
        def mockLogCall(cmd, **kw):
            logCallArgs.append((cmd, kw))
            if (isinstance(cmd, list) and cmd[0] in Mocked) or cmd.startswith('mount') or cmd.startswith('umount') or cmd.startswith('chroot'):
                return
            return origLogCall(cmd, **kw)
        self.mock(raw_hd_image, 'logCall', mockLogCall)
        self.mock(bootable_image, 'logCall', mockLogCall)
        self.mock(bootable_image.loophelpers, 'logCall', mockLogCall)
        mknodArgs = []
        def mockMknod(*args):
            mknodArgs.append(args)
        self.mock(os, 'mknod', mockMknod)

        chmodArgs = []
        def mockChmod(*args):
            chmodArgs.append(args)
        self.mock(os, 'chmod', mockMknod)

        util.mkdirChain(os.path.join(img.root, "root"))
        file(os.path.join(img.root, "root", "conary-tag-script.in"), "w").write(
                "echo nothing here")
        util.mkdirChain(img.changesetDir)

        mock.mockMethod(img.downloadChangesets)
        mock.mockMethod(img.postOutput)
        mock.mockMethod(img.loadRPM)
        mock.mock(bootable_image.Filesystem, '_get_uuid')
        bootable_image.Filesystem._get_uuid()._mock.setDefaultReturn('abc123')
        self.mock(img, 'updateGroupChangeSet', lambda x: None)
        img.write()
示例#4
0
    def testRefreshRecipe(self):
        self.cfg.sourceSearchDir = self.workDir + '/source'
        self.buildCfg.sourceSearchDir = self.workDir + '/source'
        util.mkdirChain(self.cfg.sourceSearchDir)
        autoSourceFile = self.cfg.sourceSearchDir + '/autosource'
        self.writeFile(autoSourceFile, 'contents\n')
        self.makeSourceTrove('auto', autoSourceRecipe)
        os.chdir(self.workDir)
        self.checkout('auto')
        os.chdir('auto')

        self.writeFile(autoSourceFile, 'contents2\n')
        self.refresh()

        repos = self.openRmakeRepository()
        helper = self.getRmakeHelper()
        (n,v,f) = self.captureOutput(buildcmd.getTrovesToBuild,
                                     self.buildCfg,
                                     helper.getConaryClient(),
                                     ['auto.recipe'], message='foo')[0][0]
        trv = repos.getTrove(n,v,f)
        filesToGet = []
        for pathId, path, fileId, fileVersion in trv.iterFileList():
            if path == 'autosource':
                filesToGet.append((fileId, fileVersion))
        contents = repos.getFileContents(filesToGet)[0]
        assert(contents.get().read() == 'contents2\n')
示例#5
0
    def testManagedPolicy(self):
        self.addComponent('foo:doc', '1.0.0', filePrimer = 1)
        self.addComponent('foo:runtime', '1.0.0', filePrimer = 2)
        self.addCollection('foo', '1.0.0', [':doc', ':runtime'])

        policyStr = """
from conary.build import policy
class AlwaysError(policy.GroupEnforcementPolicy):
    def doProcess(self, recipe):
        self.recipe.reportErrors("Automatic error")
"""

        policyPath = os.path.join(self.cfg.root, 'policy', 'errpolicy.py')
        # we're effectively creating /tmp/_/root/tmp/_/root/policy/...
        # we're doing this so that the system db in /tmp/_/root will
        # match the absolute path of the actual policy file.
        self.addComponent('errorpolicy:runtime',
                fileContents = [(policyPath, policyStr)])
        self.updatePkg('errorpolicy:runtime')

        try:
            policyDirs = self.cfg.policyDirs
            self.cfg.policyDirs = [os.path.dirname(policyPath)]
            enforceManagedPolicy = self.cfg.enforceManagedPolicy
            self.cfg.enforceManagedPolicy = True
            util.mkdirChain(os.path.dirname(policyPath))
            f = open(policyPath, 'w')
            f.write(policyStr)
            f.close()
            self.assertRaises(policy.PolicyError, self.build,
                    simpleGroupRecipe, 'GroupSimpleAdd')
        finally:
            self.cfg.enforceManagedPolicy = enforceManagedPolicy
            self.cfg.policyDirs = policyDirs
            util.rmtree(os.path.dirname(policyPath))
示例#6
0
    def testSet(self):
        util.mkdirChain(self.top + "/first")
        util.mkdirChain(self.top + "/second")
        first = DataStore(self.top + "/first")
        second = DataStore(self.top + "/second")

        self._testDataStore(DataStoreSet(first, second))
示例#7
0
 def open(self, path, mode):
     if isinstance(path, int):
         logfd = path
     else:
         util.mkdirChain(os.path.dirname(path))
         logfd = os.open(path, os.O_CREAT | os.O_APPEND | os.O_WRONLY)
     self.fd = logfd
示例#8
0
文件: shadow.py 项目: sassoftware/bob
def tempSourceTrove(recipePath, package, helper):
    from conary import state
    from conary import checkin
    from conary import trove
    from conary.lib import util as cnyutil
    pkgname = package.name.split(':')[0]
    nvf, cs = _makeSourceTrove(package, helper)
    targetDir = os.path.join(os.path.dirname(recipePath), pkgname)
    cnyutil.mkdirChain(targetDir)
    sourceStateMap = {}
    pathMap = {}
    conaryStateTargets = {}
    troveCs = cs.getNewTroveVersion(*nvf)
    trv = trove.Trove(troveCs)
    sourceState = state.SourceState(nvf[0], nvf[1], nvf[1].branch())
    if trv.getFactory():
        sourceState.setFactory(trv.getFactory())
    conaryState = state.ConaryState(helper.cfg.context, sourceState)
    sourceStateMap[trv.getNameVersionFlavor()] = sourceState
    conaryStateTargets[targetDir] = conaryState
    for (pathId, path, fileId, version) in troveCs.getNewFileList():
        pathMap[(nvf, path)] = (targetDir, pathId, fileId, version)
    # Explode changeset contents.
    checkin.CheckoutExploder(cs, pathMap, sourceStateMap)
    # Write out CONARY state files.
    for targetDir, conaryState in conaryStateTargets.iteritems():
            conaryState.write(targetDir + '/CONARY')
    return trv, targetDir
示例#9
0
 def vcInit(self):
     util.mkdirChain(self.srcRepoPath)
     os.system('cd %s; git init --bare;'
               ' git config user.name author;'
               ' git config user.email [email protected];'
               ' git add . foo; git commit -a -m "initialized"' %
               self.srcRepoPath)
示例#10
0
def tempSourceTrove(recipePath, package, helper):
    from conary import state
    from conary import checkin
    from conary import trove
    from conary.lib import util as cnyutil
    pkgname = package.name.split(':')[0]
    nvf, cs = _makeSourceTrove(package, helper)
    targetDir = os.path.join(os.path.dirname(recipePath), pkgname)
    cnyutil.mkdirChain(targetDir)
    sourceStateMap = {}
    pathMap = {}
    conaryStateTargets = {}
    troveCs = cs.getNewTroveVersion(*nvf)
    trv = trove.Trove(troveCs)
    sourceState = state.SourceState(nvf[0], nvf[1], nvf[1].branch())
    if trv.getFactory():
        sourceState.setFactory(trv.getFactory())
    conaryState = state.ConaryState(helper.cfg.context, sourceState)
    sourceStateMap[trv.getNameVersionFlavor()] = sourceState
    conaryStateTargets[targetDir] = conaryState
    for (pathId, path, fileId, version) in troveCs.getNewFileList():
        pathMap[(nvf, path)] = (targetDir, pathId, fileId, version)
    # Explode changeset contents.
    checkin.CheckoutExploder(cs, pathMap, sourceStateMap)
    # Write out CONARY state files.
    for targetDir, conaryState in conaryStateTargets.iteritems():
            conaryState.write(targetDir + '/CONARY')
    return trv, targetDir
示例#11
0
 def vcInit(self):
     util.mkdirChain(os.path.dirname(self.srcRepoPath))
     os.system('svnadmin create %s' % self.srcRepoPath)
     os.system('svn -q mkdir file://localhost%s/tags --message message' %
               self.srcRepoPath)
     os.system('svn -q mkdir file://localhost%s/trunk --message message' %
               self.srcRepoPath)
示例#12
0
 def touch(self, fn):
     d = os.path.dirname(fn)
     if not os.path.exists(fn):
         util.mkdirChain(d)
         f = open(fn, 'w')
         f.write('')
         f.close()
示例#13
0
文件: local.py 项目: pombreda/spanner
 def __init__(self, uri, branch, cache='_cache'):
     self.uri = uri
     self.branch = branch
     dirPath = self.uri.split('//', 1)[-1]
     dirPath = dirPath.replace('/', '_')
     self.repoDir = os.path.join(cache, dirPath, 'local')
     conary_util.mkdirChain(self.repoDir)
示例#14
0
    def addScsiModules(self):
        # FIXME: this part of the code needs a rewrite, because any
        # bootable image type / distro combination may need different
        # drivers to be specified here.  It's not a simple True/False.
        # Also, 'Raw HD Image' means QEMU/KVM to me, but someone else
        # might be using it with another environment.
        filePath = self.filePath("etc/modprobe.conf")
        if self.jobData["buildType"] == buildtypes.AMI:
            moduleList = ["xenblk"]
        else:
            moduleList = ["mptbase", "mptspi"]

        if is_SUSE(self.root):
            filePath = filePath + ".local"
            if self.jobData["buildType"] == buildtypes.RAW_HD_IMAGE:
                self.scsiModules = True
                moduleList = ["piix"]

        if not self.scsiModules:
            return

        if not os.path.exists(filePath):
            log.warning("%s not found while adding scsi modules" % os.path.basename(filePath))

        util.mkdirChain(os.path.split(filePath)[0])
        f = open(filePath, "a")
        if os.stat(filePath)[6]:
            f.write("\n")
        for idx in range(0, len(moduleList)):
            f.write("alias scsi_hostadapter%s %s\n" % (idx and idx or "", moduleList[idx]))
        f.close()
示例#15
0
 def setup(self):
     defaults = util.joinPaths(self.image_root, 'etc', 'default', 'grub')
     util.mkdirChain(os.path.dirname(defaults))
     if not os.path.exists(defaults) or not os.lstat(defaults).st_size:
         with open(defaults, 'w') as f_defaults:
             print >> f_defaults, '# Defaults set by rBuilder'
             print >> f_defaults, 'GRUB_DISABLE_RECOVERY=true'
示例#16
0
 def testGetCapsulesTroveList(self):
     # make sure that getCapsulesTroveList is at least not removed...
     from conary.lib import util
     d = tempfile.mkdtemp()
     util.mkdirChain(d + '/var/lib/conarydb/conarydb/')
     db = database.Database(d, '/var/lib/conarydb/conarydb')
     db.getCapsulesTroveList(db.iterAllTroves())
示例#17
0
 def _mount_dev(self):
     # Temporarily bind-mount the jobslave /dev into the chroot so
     # grub2-install can see the loop device it's targeting.
     logCall("mount -o bind /dev %s/dev" %  self.image_root)
     # /etc/grub.d/10_linux tries to find the backing device for loop
     # devices, on the assumption that it's a block device with cryptoloop
     # on top. Replace losetup with a stub while running mkconfig so it
     # keeps the loop device name and all the right UUIDs get emitted.
     losetup = util.joinPaths(self.image_root, '/sbin/losetup')
     os.rename(losetup, losetup + '.bak')
     with open(losetup, 'w') as f_losetup:
         print >> f_losetup, '#!/bin/sh'
         print >> f_losetup, 'echo "$1"'
     os.chmod(losetup, 0755)
     # In order for the root device to be detected as a FS UUID and not
     # /dev/loop0 there needs to be a link in /dev/disk/by-uuid, which
     # doesn't happen with the jobmaster's containerized environment.
     link_path = None
     if self.root_device.uuid:
         link_path = util.joinPaths(self.image_root, '/dev/disk/by-uuid',
                 self.root_device.uuid)
         util.mkdirChain(os.path.dirname(link_path))
         util.removeIfExists(link_path)
         os.symlink(self.root_device.devPath, link_path)
     try:
         yield
     finally:
         try:
             if link_path:
                 os.unlink(link_path)
             os.rename(losetup + '.bak', losetup)
             logCall("umount %s/dev" %  self.image_root)
         except:
             pass
示例#18
0
        def _install(jobList):
            self.cfg.flavor = []
            openpgpkey.getKeyCache().setPublicPath(self.cfg.root +
                                                   '/root/.gnupg/pubring.gpg')
            openpgpkey.getKeyCache().setPrivatePath(self.cfg.root +
                                                    '/root/.gnupg/secring.gpg')
            self.cfg.pubRing = [self.cfg.root + '/root/.gnupg/pubring.gpg']
            client = conaryclient.ConaryClient(self.cfg)
            client.setUpdateCallback(self.callback)
            if self.csCache:
                changeSetList = self.csCache.getChangeSets(
                    client.getRepos(), jobList, callback=self.callback)
            else:
                changeSetList = []

            updJob = client.newUpdateJob()
            try:
                client.prepareUpdateJob(updJob,
                                        jobList,
                                        keepExisting=False,
                                        resolveDeps=False,
                                        recurse=False,
                                        checkPathConflicts=False,
                                        fromChangesets=changeSetList,
                                        migrate=True)
            except conaryclient.update.NoNewTrovesError:
                # since we're migrating, this simply means there were no
                # operations to be performed
                pass
            else:
                util.mkdirChain(self.cfg.root + '/root')
                client.applyUpdate(updJob,
                                   replaceFiles=True,
                                   tagScript=self.cfg.root +
                                   '/root/tagscripts')
示例#19
0
    def testQueue(self):
        root = self.cfg.root
        root0 = root + '/foo'
        root1 = root + '/foo-1'
        root2 = root + '/foo-2'
        root3 = root + '/foo-3'

        queue = rootmanager.ChrootQueue(root, 2) # limit of two chroots
        self.assertEquals(queue.requestSlot('foo', [], True),
                          (None, root0))
        self.assertEquals(queue.requestSlot('foo', [], True),
                          (None, root1))
        self.assertEquals(queue.requestSlot('foo', [], True), None)


        util.mkdirChain(root0)
        queue.chrootFinished(root0)
        self.assertEquals(sorted(queue.listOldChroots()), [root0])
        self.assertEquals(queue.requestSlot('foo', [], True), (root0, root2))
        util.mkdirChain(root2)
        util.rmtree(root0)
        queue.deleteChroot(root0)
        self.assertEquals(queue.requestSlot('foo', [], True), None)

        queue.markBadChroot(root2)
        # we can't reuse root2 anymore - it's marked as bad.  But that means 
        # it's no longer using a space, so we can add a chroot
        self.assertEquals(queue.requestSlot('foo', [], True), (None, root0))
        self.assertEquals(queue.requestSlot('foo', [], True), None)

        def _shorten(x):
            return x[len(root)+1:]
        self.assertEquals(sorted(queue.listChroots()), [_shorten(x) for x in (root0, root1)])
        self.assertEquals(sorted(queue.listOldChroots()), [])
示例#20
0
    def setup(self):
        util.mkdirChain(util.joinPaths(self.image_root, 'boot', 'grub'))
        # path to grub stage1/stage2 files in rPL/rLS
        util.copytree(
            util.joinPaths(self.image_root, 'usr', 'share', 'grub', '*', '*'),
            util.joinPaths(self.image_root, 'boot', 'grub'))
        # path to grub files in SLES
        if is_SUSE(self.image_root):
            util.copytree(
                util.joinPaths(self.image_root, 'usr', 'lib', 'grub', '*'),
                util.joinPaths(self.image_root, 'boot', 'grub'))
        if is_UBUNTU(self.image_root):
            # path to grub files in x86 Ubuntu
            util.copytree(
                util.joinPaths(self.image_root, 'usr', 'lib', 'grub', 'i386-pc', '*'),
                util.joinPaths(self.image_root, 'boot', 'grub'))
            # path to grub files in x86_64 Ubuntu
            util.copytree(
                util.joinPaths(self.image_root, 'usr', 'lib', 'grub', 'x86_64-pc', '*'),
                util.joinPaths(self.image_root, 'boot', 'grub'))
        util.mkdirChain(util.joinPaths(self.image_root, 'etc'))

        # Create a stub grub.conf
        self.writeConf()

        # Create the appropriate links
        if self._get_grub_conf() != 'menu.lst':
            os.symlink('grub.conf', util.joinPaths(
                self.image_root, 'boot', 'grub', 'menu.lst'))
            os.symlink('../boot/grub/grub.conf',
                       util.joinPaths(self.image_root, 'etc', 'grub.conf'))
        if is_SUSE(self.image_root):
            self._suse_grub_stub()
 def write_xml(self, data, subdir=None):
     if subdir:
         util.mkdirChain(subdir)
         os.chdir(subdir)
     for filename, xml in data.iteritems():
         with open(filename, 'w') as f:
             f.write(xml)
示例#22
0
    def _addDeviceNodes(self):
        if os.getuid():  # can only make device nodes as root
            util.mkdirChain("%s/dev" % self.root)
            return

        for devNode in self.devNodes:
            os.system("/sbin/MAKEDEV -d %s/dev/ -D /dev -x %s" % (self.root, devNode))
示例#23
0
    def testUserInfoRecipeCook(self):
        userInfoRecipe = """
class UserInfoRecipe(UserGroupInfoRecipe, BaseRequiresRecipe):
    name = 'userinfo'
    version = '1'
    abstractBaseClass = 1"""
        for stubComp in (
            'bash:runtime',
            'conary-build:lib',
            'conary-build:python',
            'conary-build:runtime',
            'conary:python',
            'conary:runtime',
            'coreutils:runtime',
            'dev:runtime',
            'filesystem:runtime',
            'findutils:runtime',
            'gawk:runtime',
            'grep:runtime',
            'python:lib',
            'python:runtime',
            'sed:runtime',
            'setup:runtime',
            'sqlite:lib',
):
            self.addComponent(stubComp)
            self.updatePkg(stubComp)
        laDir = os.path.join(self.cfg.lookaside, 'userinfo')
        util.mkdirChain(laDir)
        open(os.path.join(laDir, 'userinfo.recipe'), 'w').write('')
        self.cfg.baseClassDir = '/usr/share/conary/baseclasses'
        res = self.buildRecipe(userInfoRecipe, 'UserInfoRecipe')
        self.assertEquals(res[0][0][0], 'userinfo:recipe')
示例#24
0
文件: fsrepos.py 项目: tensor5/conary
    def __init__(self, serverNameList, troveStore, contentsDir, repositoryMap,
                 requireSigs = False, paranoidCommits = False):
        self.serverNameList = serverNameList
        self.paranoidCommits = paranoidCommits
        map = dict(repositoryMap)
        for serverName in serverNameList:
            map[serverName] = self
        self.troveStore = troveStore
        self.requireSigs = requireSigs

        storeType, paths = contentsDir
        if storeType == CfgContentStore.LEGACY:
            storeClass = DataStore
        elif storeType == CfgContentStore.SHALLOW:
            storeClass = ShallowDataStore
        elif storeType == CfgContentStore.FLAT:
            storeClass = FlatDataStore
        else:
            raise ValueError("Invalid contentsDir type %r" % (storeType,))

        stores = []
        for path in paths:
            util.mkdirChain(path)
            stores.append(storeClass(path))
        if len(stores) == 1:
            store = stores[0]
        else:
            store = DataStoreSet(*stores)

        DataStoreRepository.__init__(self, dataStore = store)
        AbstractRepository.__init__(self)
示例#25
0
    def testReadOnly(self):
        fooRun = self.addComponent('foo:runtime', '1',
                                   [('/foo', 'hello world!\n'),
                                    ('/bar', 'goodbye world!\n')])
        fileDict = dict((x[1], (x[2], x[3])) for x in fooRun.iterFileList())
        fooFile = fileDict['/foo']
        barFile = fileDict['/bar']
        repos = self.openRepository()
        cacheDir = self.workDir + '/cache'
        util.mkdirChain(cacheDir)
        store = repocache.RepositoryCache(cacheDir)
        # store it in the cache
        assert (store.getFileContents(
            repos, [fooFile])[0].get().read() == 'hello world!\n')
        store = repocache.RepositoryCache(cacheDir, readOnly=True)
        assert (store.getFileContents(
            repos, [barFile])[0].get().read() == 'goodbye world!\n')
        assert (len(os.listdir(cacheDir)) == 1)  # for /foo

        store.getTroves(repos, [fooRun.getNameVersionFlavor()])
        assert (len(os.listdir(cacheDir)) == 1)  # nothing added

        # now try adding that missing file.  Make sure we get /foo from
        # the cache by removing it from the repository.
        self.resetRepository()
        fooRun = self.addComponent('foo:runtime', '1',
                                   [('/bar', 'goodbye world!\n')])
        store = repocache.RepositoryCache(cacheDir, readOnly=False)
        assert (store.getFileContents(
            repos, [barFile])[0].get().read() == 'goodbye world!\n')
        assert (len(os.listdir(cacheDir)) == 2)  # /bar is now added

        store.getTroves(repos, [fooRun.getNameVersionFlavor()])
        assert (len(os.listdir(cacheDir)) == 3)  # fooRun now added
示例#26
0
 def touch(self, fn, contents=''):
     if os.path.exists(fn):
         return
     util.mkdirChain(os.path.dirname(fn))
     f = open(fn, 'w')
     f.write(contents)
     f.close()
示例#27
0
 def touch(self, fn):
     d = os.path.dirname(fn)
     if not os.path.exists(fn):
         util.mkdirChain(d)
         f = open(fn, 'w')
         f.write('')
         f.close()
示例#28
0
    def getRootFactory(self, cfg, buildReqList, crossReqList, bootstrapReqs,
            buildTrove):
        cfg = copy.deepcopy(cfg)
        cfg.threaded = False

        cfg.logFile = '/var/log/conary'
        cfg.dbPath = '/var/lib/conarydb'

        setArch, targetArch = flavorutil.getTargetArch(buildTrove.flavor)

        if not setArch:
            targetArch = None

        chrootClass = rootfactory.FullRmakeChroot
        util.mkdirChain(self.baseDir)
        copyInConary = (not targetArch
                        and not cfg.strictMode
                        and cfg.copyInConary)

        chroot = chrootClass(buildTrove,
                             self.chrootHelperPath,
                             cfg, self.serverCfg, buildReqList, crossReqList,
                             bootstrapReqs, self.logger,
                             csCache=self.csCache,
                             chrootCache=self.chrootCache,
                             copyInConary=copyInConary)
        buildLogPath = self.serverCfg.getBuildLogPath(buildTrove.jobId)
        chrootServer = rMakeChrootServer(chroot, targetArch,
                chrootQueue=self.queue, useTmpfs=self.serverCfg.useTmpfs,
                buildLogPath=buildLogPath, reuseRoots=cfg.reuseRoots,
                strictMode=cfg.strictMode, logger=self.logger,
                buildTrove=buildTrove, chrootCaps=self.serverCfg.chrootCaps)

        return chrootServer
示例#29
0
    def __init__(self, serverNameList, troveStore, contentsDir, repositoryMap,
                 requireSigs = False, paranoidCommits = False):
        self.serverNameList = serverNameList
        self.paranoidCommits = paranoidCommits
        map = dict(repositoryMap)
        for serverName in serverNameList:
            map[serverName] = self
        # XXX this client needs to die
        from conary import conarycfg
        self.reposSet = netclient.NetworkRepositoryClient(map,
                                    conarycfg.UserInformation())
        self.troveStore = troveStore

        self.requireSigs = requireSigs
        for dir in contentsDir:
            util.mkdirChain(dir)

        if len(contentsDir) == 1:
            store = DataStore(contentsDir[0])
        else:
            storeList = []
            for dir in contentsDir:
                storeList.append(DataStore(dir))

            store = DataStoreSet(*storeList)

        DataStoreRepository.__init__(self, dataStore = store)
        AbstractRepository.__init__(self)
示例#30
0
    def testAbortOnErrorInPreScript(self):
        """ This makes sure that the abortOnError option for rollbacks works.
        (CNY-3327).
        """
        failScript = """#!/bin/sh
touch %(root)s/tmp/failed;
exit 5
""" % dict(root=self.rootDir)
        succeedScript = """#!/bin/sh
touch %(root)s/tmp/succeeded
exit 0
"""% dict(root=self.rootDir)
        self.mimicRoot()

        try:
            util.mkdirChain(self.rootDir + '/tmp')

            self.addComponent('foo:runtime', '1.0', filePrimer=1)
            self.addCollection('group-foo', '1.0', [ ('foo:runtime', '1.0' ) ])

            self.addComponent('foo:runtime', '2.0', filePrimer=2)
            self.addCollection('group-foo', '2.0', [ ('foo:runtime', '2.0' ) ],
                               preRollbackScript = rephelp.RollbackScript(
                    script=failScript) )

            self.addComponent('foo:runtime', '3.0', filePrimer=3)
            self.addCollection('group-foo', '3.0', [ ('foo:runtime', '3.0' ) ],
                               preRollbackScript = rephelp.RollbackScript(
                    script=succeedScript) )

            self.updatePkg('group-foo=1.0')
            self.updatePkg('group-foo=2.0')
            self.updatePkg('group-foo=3.0')

            # this one should succeed without problems
            self.rollback(self.rootDir, 2, abortOnError=True)
            # this one should fail with a preScriptError
            self.assertRaises(database.PreScriptError, self.rollback,
                                  self.rootDir, 1, abortOnError=True)

            # now we do it the old way and everything should succeed
            self.addComponent('foo:runtime', '4.0', filePrimer=4)
            self.addCollection('group-foo', '4.0', [ ('foo:runtime', '4.0' ) ],
                               preRollbackScript = rephelp.RollbackScript(
                    script=failScript) )

            self.addComponent('foo:runtime', '5.0', filePrimer=5)
            self.addCollection('group-foo', '5.0', [ ('foo:runtime', '5.0' ) ],
                               preRollbackScript = rephelp.RollbackScript(
                    script=succeedScript ) )

            self.updatePkg('group-foo=4.0')
            self.updatePkg('group-foo=5.0')

            # these should succeed
            self.rollback(self.rootDir, 2)
            self.rollback(self.rootDir, 1)
            self.rollback(self.rootDir, 0)
        finally:
            self.realRoot()
示例#31
0
def _install():
    coverageLoc = os.environ.get('COVERAGE_PATH', None)
    if not coverageLoc:
        raise RuntimeError, 'cannot find coverage.py!'
    else:
        coverageLoc = coverageLoc + '/coverage.py'

    coverageDir = os.environ.get('COVERAGE_DIR', None)
    if not coverageDir:
        raise RuntimeError, 'COVERAGE_DIR must be set to a path for cache file'
    util.mkdirChain(coverageDir)

    if ('coverage' in sys.modules
        and (sys.modules['coverage'].__file__ == coverageLoc
             or sys.modules['coverage'].__file__ == coverageLoc + 'c')):
        coverage = sys.modules['coverage']
    else:
        coverage = imp.load_source('coverage', coverageLoc)
    the_coverage = coverage.the_coverage
    if hasattr(the_coverage, 'pid') and the_coverage.pid == os.getpid():
        _run(coverage)
        return
    elif hasattr(the_coverage, 'pid'):
        _reset(coverage)

    _installOsWrapper()
    _run(coverage)
    return
示例#32
0
    def writeDeviceMaps(self):
        # first write a grub device map
        filePath = self.filePath("boot/grub/device.map")
        util.mkdirChain(os.path.dirname(filePath))
        f = open(filePath, "w")
        hd0 = "/dev/sda"
        f.write("\n".join(("# this device map was generated by rBuilder", "(fd0) /dev/fd0", "(hd0) %s" % hd0, "")))
        f.close()

        # next write a blkid cache file
        dev = "/dev/sda1"
        devno = "0x0801"
        # get the uuid of the root filesystem
        p = subprocess.Popen(
            "tune2fs -l /dev/loop0 | grep UUID | awk '{print $3}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        stdout, stderr = p.communicate()
        uuid = stdout.strip()
        root = self.filesystems["/"]
        blkid = '<device DEVNO="%s" TIME="%s" LABEL="%s" ' 'UUID="%s" TYPE="%s">%s</device>\n' % (
            devno,
            int(time.time()),
            root.fsLabel,
            uuid,
            root.fsType,
            dev,
        )
        path = self.createFile("etc/blkid/blkid.tab", blkid)
        os.link(path, self.filePath("etc/blkid.tab"))
示例#33
0
def _install():
    coverageLoc = os.environ.get('COVERAGE_PATH', None)
    if not coverageLoc:
        raise RuntimeError, 'cannot find coverage.py!'
    else:
        coverageLoc = coverageLoc + '/coverage.py'

    coverageDir = os.environ.get('COVERAGE_DIR', None)
    if not coverageDir:
        raise RuntimeError, 'COVERAGE_DIR must be set to a path for cache file'
    util.mkdirChain(coverageDir)

    if ('coverage' in sys.modules
            and (sys.modules['coverage'].__file__ == coverageLoc
                 or sys.modules['coverage'].__file__ == coverageLoc + 'c')):
        coverage = sys.modules['coverage']
    else:
        coverage = imp.load_source('coverage', coverageLoc)
    the_coverage = coverage.the_coverage
    if hasattr(the_coverage, 'pid') and the_coverage.pid == os.getpid():
        _run(coverage)
        return
    elif hasattr(the_coverage, 'pid'):
        _reset(coverage)

    _installOsWrapper()
    _run(coverage)
    return
示例#34
0
 def testGetDaemon(self):
     raise testsuite.SkipTestException('Fails in bamboo')
     daemonClass = self.generateDaemonClass()
     util.mkdirChain(self.workDir + '/var/log')
     util.mkdirChain(self.workDir + '/var/lock')
     d = daemonClass()
     rv, txt = self.captureOutput(d.main, ['./daemontest', 'start'])
     assert (not rv)
     err, txt = self.captureOutput(d.main, ['./daemontest', 'start'],
                                   _returnException=True)
     assert (isinstance(err, SystemExit))
     assert (err.code == 1)
     assert (re.match(
         '[0-9:]+ - \[foobar\] - error: Daemon already running as pid [0-9]+',
         txt))
     pid = d.getPidFromLockFile()
     rv, txt = self.captureOutput(d.main, ['./daemontest', 'stop'])
     err, txt = self.captureOutput(d.main, ['./daemontest', 'stop'],
                                   _returnException=True)
     txt = open(self.workDir + '/var/log/foobar.log').read()
     assert (re.search(
         "[0-9/]+ [0-9:]+ [A-Z]* - \[foobar\] - warning: unable to open lockfile for reading: %s/var/lock/foobar.pid \(\[Errno 2\] No such file or directory: '%s/var/lock/foobar.pid'\)\n"
         "[0-9/]+ [0-9:]+ [A-Z]* - \[foobar\] - error: could not kill foobar: no pid found.\n"
         % (self.workDir, self.workDir), txt))
     assert (isinstance(err, SystemExit))
     assert (err.code == 1)
示例#35
0
    def extractMediaTemplate(self, topdir):
        tmpRoot = tempfile.mkdtemp(dir=constants.tmpDir)
        try:
            client = self.getConaryClient(\
                tmpRoot, getArchFlavor(self.baseFlavor).freeze())

            log.info("extracting ad-hoc content from " \
                  "media-template=%s" % client.cfg.installLabelPath[0].asString())
            uJob = self._getUpdateJob(client, "media-template")
            if uJob:
                client.applyUpdate(uJob, callback = self.callback)
                log.info("success: copying media template data to unified tree")

                # copy content into unified tree root. add recurse and no-deref
                # flags to command. following symlinks is really bad in this case.
                oldTemplateDir = os.path.join(tmpRoot,
                                              'usr', 'lib', 'media-template')
                if os.path.exists(oldTemplateDir):
                    call('cp', '-R', '--no-dereference', oldTemplateDir, topdir)
                for tDir in ('all', 'disc1'):
                    srcDir = os.path.join(tmpRoot, tDir)
                    destDir = os.path.join(topdir, 'media-template2')
                    if os.path.exists(srcDir):
                        util.mkdirChain(destDir)
                        call('cp', '-R', '--no-dereference', srcDir, destDir)
            else:
                log.info("media-template not found on repository")
        finally:
            util.rmtree(tmpRoot, ignore_errors = True)
示例#36
0
    def testReadOnly(self):
        fooRun = self.addComponent('foo:runtime', '1',
                                    [('/foo', 'hello world!\n'),
                                     ('/bar', 'goodbye world!\n')])
        fileDict = dict((x[1], (x[2], x[3])) for x in fooRun.iterFileList())
        fooFile = fileDict['/foo']
        barFile = fileDict['/bar']
        repos = self.openRepository()
        cacheDir = self.workDir + '/cache'
        util.mkdirChain(cacheDir)
        store = repocache.RepositoryCache(cacheDir)
        # store it in the cache
        assert(store.getFileContents(repos, [fooFile])[0].get().read()
                == 'hello world!\n')
        store = repocache.RepositoryCache(cacheDir, readOnly=True)
        assert(store.getFileContents(repos, [barFile])[0].get().read() 
                == 'goodbye world!\n')
        assert(len(os.listdir(cacheDir)) == 1) # for /foo

        store.getTroves(repos, [fooRun.getNameVersionFlavor()])
        assert(len(os.listdir(cacheDir)) == 1) # nothing added

        # now try adding that missing file.  Make sure we get /foo from
        # the cache by removing it from the repository.
        self.resetRepository()
        fooRun = self.addComponent('foo:runtime', '1',
                                   [('/bar', 'goodbye world!\n')])
        store = repocache.RepositoryCache(cacheDir, readOnly=False)
        assert(store.getFileContents(repos, [barFile])[0].get().read()
                == 'goodbye world!\n')
        assert(len(os.listdir(cacheDir)) == 2) # /bar is now added

        store.getTroves(repos, [fooRun.getNameVersionFlavor()])
        assert(len(os.listdir(cacheDir)) == 3) # fooRun now added
示例#37
0
    def restore(self,
                fileContents,
                root,
                target,
                journal=None,
                nameLookup=True,
                **kwargs):
        util.removeIfExists(target)

        if not journal and os.getuid(): return target

        util.mkdirChain(os.path.dirname(target))

        if journal:
            journal.mknod(root, target, self.lsTag, self.devt.major(),
                          self.devt.minor(), self.inode.perms(),
                          self.inode.owner(), self.inode.group())
        else:
            if self.lsTag == 'c':
                flags = stat.S_IFCHR
            else:
                flags = stat.S_IFBLK
            os.mknod(target, flags,
                     os.makedev(self.devt.major(), self.devt.minor()))

            return File.restore(self,
                                root,
                                target,
                                journal=journal,
                                nameLookup=nameLookup,
                                **kwargs)
        return target
示例#38
0
    def doMounts(self):
        """
        Mount contents, scratch, devices, etc. This is after the filesystem was
        unshared, so there's no need to ever unmount these -- when the
        container exits, they will be obliterated.
        """
        containerMounts = set()
        for resource, path, readOnly in self.mounts:
            target = os.path.join(self.path, path)
            mkdirChain(target)
            containerMounts.add(target)
            mountRes = resource.mount(target, readOnly)
            mountRes.release()
        for fstype, path in [('proc', '/proc'), ('sysfs', '/sys')]:
            path = self.path + path
            containerMounts.add(path)
            mount(fstype, path, fstype)

        # Try to umount stuff not in this container to avoid blocking those
        # things from being umounted, especially in the case of other
        # jobslaves' scratch disks if they are mounted in the outer OS.
        otherMounts = set()
        for line in open('/proc/mounts'):
            path = line.split()[1]
            otherMounts.add(path)
        otherMounts -= containerMounts
        otherMounts.discard('/')
        for path in otherMounts:
            log.debug("Unmounting %s", path)
            logCall(["/bin/umount", "-dn", path], ignoreErrors=True)
示例#39
0
    def sanityCheckForStart(self):
        currUser = pwd.getpwuid(os.getuid()).pw_name
        cfgPaths = ['logDir', 'lockDir', 'serverDir']
        socketPath = self.getSocketPath()
        if socketPath:
            if not os.access(os.path.dirname(socketPath), os.W_OK):
                log.error(
                    'cannot write to socketPath directory at %s - cannot start server'
                    % os.path.dirname(socketPath))
                sys.exit(1)

        ret = self._sanityCheckForSSL()
        if ret:
            sys.exit(ret)

        cfgPaths = ['buildDir', 'logDir', 'lockDir', 'serverDir']
        for path in cfgPaths:
            if not os.path.exists(self[path]):
                log.error(
                    '%s does not exist, expected at %s - cannot start server' %
                    (path, self[path]))
                sys.exit(1)
            if not os.access(self[path], os.W_OK):
                log.error(
                    'user "%s" cannot write to %s at %s - cannot start server'
                    % (currUser, path, self[path]))
                sys.exit(1)

        if self.useResolverCache:
            util.mkdirChain(self.getResolverCachePath())
示例#40
0
文件: files.py 项目: sweptr/conary
    def restore(self,
                fileContents,
                root,
                target,
                journal=None,
                sha1=None,
                nameLookup=True,
                **kwargs):

        keepTempfile = kwargs.get('keepTempfile', False)
        destTarget = target

        if fileContents is not None:
            # this is first to let us copy the contents of a file
            # onto itself; the unlink helps that to work
            src = fileContents.get()
            inFd = None

            if fileContents.isCompressed() and hasattr(src, '_fdInfo'):
                # inFd is None if we can't figure this information out
                # (for _LazyFile for instance)
                (inFd, inStart, inSize) = src._fdInfo()

            path, name = os.path.split(target)
            if not os.path.isdir(path):
                util.mkdirChain(path)

            # Uncompress to a temporary file, using the accelerated
            # implementation if possible.
            if inFd is not None and util.sha1Uncompress is not None:
                actualSha1, tmpname = util.sha1Uncompress(
                    inFd, inStart, inSize, path, name)
            else:
                if fileContents.isCompressed():
                    src = gzip.GzipFile(mode='r', fileobj=src)
                tmpfd, tmpname = tempfile.mkstemp(name, '.ct', path)
                try:
                    d = digestlib.sha1()
                    f = os.fdopen(tmpfd, 'w')
                    util.copyfileobj(src, f, digest=d)
                    f.close()
                    actualSha1 = d.digest()
                except:
                    os.unlink(tmpname)
                    raise

            if keepTempfile:
                # Make a hardlink "copy" for the caller to use
                destTarget = tmpname + '.ptr'
                os.link(tmpname, destTarget)
            try:
                os.rename(tmpname, target)
            except OSError, err:
                if err.args[0] != errno.EISDIR:
                    raise
                os.rmdir(target)
                os.rename(tmpname, target)

            if (sha1 is not None and sha1 != actualSha1):
                raise Sha1Exception(target)
示例#41
0
    def testRefreshRecipe(self):
        self.cfg.sourceSearchDir = self.workDir + '/source'
        self.buildCfg.sourceSearchDir = self.workDir + '/source'
        util.mkdirChain(self.cfg.sourceSearchDir)
        autoSourceFile = self.cfg.sourceSearchDir + '/autosource'
        self.writeFile(autoSourceFile, 'contents\n')
        self.makeSourceTrove('auto', autoSourceRecipe)
        os.chdir(self.workDir)
        self.checkout('auto')
        os.chdir('auto')

        self.writeFile(autoSourceFile, 'contents2\n')
        self.refresh()

        repos = self.openRmakeRepository()
        helper = self.getRmakeHelper()
        (n, v, f) = self.captureOutput(buildcmd.getTrovesToBuild,
                                       self.buildCfg,
                                       helper.getConaryClient(),
                                       ['auto.recipe'],
                                       message='foo')[0][0]
        trv = repos.getTrove(n, v, f)
        filesToGet = []
        for pathId, path, fileId, fileVersion in trv.iterFileList():
            if path == 'autosource':
                filesToGet.append((fileId, fileVersion))
        contents = repos.getFileContents(filesToGet)[0]
        assert (contents.get().read() == 'contents2\n')
示例#42
0
 def testGetCapsulesTroveList(self):
     # make sure that getCapsulesTroveList is at least not removed...
     from conary.lib import util
     d = tempfile.mkdtemp()
     util.mkdirChain(d + '/var/lib/conarydb/conarydb/')
     db = database.Database(d, '/var/lib/conarydb/conarydb')
     db.getCapsulesTroveList(db.iterAllTroves())
示例#43
0
    def testGzip(self):
        workdir = os.path.join(self.workDir, "archive")
        util.mkdirChain(workdir)
        archivePath = os.path.join(workdir, "file")
        f = file(archivePath, "w+")
        content = "0123456789abcdef"
        for i in range(1024):
            f.write(content)
        f.close()
        cmd = [ 'gzip', archivePath ]
        subprocess.call(cmd)
        archivePath += '.gz'
        a = Archive(archivePath, self._log)
        a.extract()
        self.failUnlessEqual(sorted(x.name for x in a),
            [ 'file' ])
        self.failUnlessEqual(sorted(x.size for x in a),
            [ 16384 ])
        member = list(a)[0]
        fobj = a.extractfile(member)

        self.failUnlessEqual(fobj.size, 16384)
        self.failUnlessEqual(fobj.read(16), "0123456789abcdef")
        self.failUnlessEqual(fobj.tell(), 16)
        fobj.seek(1)
        self.failUnlessEqual(fobj.tell(), 1)
        fobj.close()
示例#44
0
 def open(self, path, mode):
     if isinstance(path, int):
         logfd = path
     else:
         util.mkdirChain(os.path.dirname(path))
         logfd = os.open(path, os.O_CREAT | os.O_APPEND | os.O_WRONLY)
     self.fd = logfd
示例#45
0
 def testBuildIsosFailure(self):
     basedir = tempfile.mkdtemp()
     popen = os.popen
     rename = os.rename
     # tested function changes dirs.
     cwd = os.getcwd()
     try:
         os.popen = lambda *args, **kwargs: StringIO.StringIO('734003201')
         os.rename = lambda a, b: None
         topdir = os.path.join(basedir, 'topdir')
         self.touch(os.path.join(topdir, 'images', 'boot.iso'))
         disc1 = os.path.join(basedir, 'disc1')
         util.mkdirChain(disc1)
         g = self.getHandler(buildtypes.INSTALLABLE_ISO)
         g.basefilename = ''
         g.jobData['name'] = 'test build'
         g.jobData['project'] = {}
         g.jobData['project']['name'] = 'test project'
         g.jobData['project']['hostname'] = 'test'
         self.assertRaises(RuntimeError, g.buildIsos, topdir)
     finally:
         os.chdir(cwd)
         os.popen = popen
         os.rename = rename
         util.rmtree(basedir)
示例#46
0
文件: files.py 项目: tensor5/conary
 def restore(self, fileContents, root, target, journal=None, nameLookup=True,
             **kwargs):
     util.removeIfExists(target)
     util.mkdirChain(os.path.dirname(target))
     os.mkfifo(target)
     return File.restore(self, root, target, journal=journal,
         nameLookup=nameLookup, **kwargs)
示例#47
0
 def testBuildIsos(self):
     basedir = tempfile.mkdtemp()
     popen = os.popen
     rename = os.rename
     # tested function changes dirs.
     cwd = os.getcwd()
     try:
         os.popen = lambda *args, **kwargs: StringIO.StringIO('734003201')
         os.rename = lambda a, b: self.touch(b)
         topdir = os.path.join(basedir, 'topdir')
         self.touch(os.path.join(topdir, 'images', 'boot.iso'))
         disc1 = os.path.join(basedir, 'disc1')
         util.mkdirChain(disc1)
         disc2 = os.path.join(basedir, 'disc2')
         self.touch(os.path.join(disc2, 'isolinux', 'isolinux.bin'))
         util.mkdirChain(os.path.join(basedir, 'junk'))
         g = self.getHandler(buildtypes.INSTALLABLE_ISO)
         g.basefilename = 'testcase'
         g.jobData['name'] = 'test build'
         g.jobData['project'] = {}
         g.jobData['project']['name'] = 'test project'
         g.buildIsos(topdir)
         self.failIf(len(self.callLog) != 4, "incorrect number of calls")
     finally:
         os.chdir(cwd)
         os.popen = popen
         os.rename = rename
         util.rmtree(basedir)
示例#48
0
        def _install(jobList):
            self.cfg.flavor = []
            openpgpkey.getKeyCache().setPublicPath(
                                     self.cfg.root + '/root/.gnupg/pubring.gpg')
            openpgpkey.getKeyCache().setPrivatePath(
                                self.cfg.root + '/root/.gnupg/secring.gpg')
            self.cfg.pubRing = [self.cfg.root + '/root/.gnupg/pubring.gpg']
            client = conaryclient.ConaryClient(self.cfg)
            client.setUpdateCallback(self.callback)
            if self.csCache:
                changeSetList = self.csCache.getChangeSets(client.getRepos(),
                                                           jobList,
                                                           callback=self.callback)
            else:
                changeSetList = []

            updJob = client.newUpdateJob()
            try:
                client.prepareUpdateJob(updJob,
                    jobList, keepExisting=False, resolveDeps=False,
                    recurse=False, checkPathConflicts=False,
                    fromChangesets=changeSetList,
                    migrate=True)
            except conaryclient.update.NoNewTrovesError:
                # since we're migrating, this simply means there were no
                # operations to be performed
                pass
            else:
                util.mkdirChain(self.cfg.root + '/root')
                client.applyUpdate(updJob, replaceFiles=True,
                                   tagScript=self.cfg.root + '/root/tagscripts')
示例#49
0
 def touch(self, fn, contents = ''):
     if os.path.exists(fn):
         return
     util.mkdirChain(os.path.dirname(fn))
     f = open(fn, 'w')
     f.write(contents)
     f.close()
示例#50
0
 def _getFile(self, cfg, fieldName, configFile):
     if fieldName not in cfg:
         return None, None
     fieldVal = cfg[fieldName]
     configFile = os.path.join(self.id.rootDir, configFile)
     util.mkdirChain(os.path.dirname(configFile))
     return fieldVal, file(configFile, "w")
示例#51
0
文件: files.py 项目: tensor5/conary
 def restore(self, fileContents, root, target, journal=None, nameLookup=True,                **kwargs):
     util.removeIfExists(target)
     util.mkdirChain(os.path.dirname(target))
     os.symlink(self.target(), target)
     # utime() follows symlinks and Linux currently does not implement
     # lutimes()
     return File.restore(self, root, target, skipMtime=True, journal=journal,
         nameLookup=nameLookup, **kwargs)
示例#52
0
    def _addDeviceNodes(self):
        if os.getuid():  # can only make device nodes as root
            util.mkdirChain('%s/dev' % self.root)
            return

        for devNode in self.devNodes:
            os.system("/sbin/MAKEDEV -d %s/dev/ -D /dev -x %s" %
                      (self.root, devNode))
示例#53
0
 def _touchShadow(self):
     # Create shadow files with owner-writable permissions before RPM can
     # create them with no permissions. (RMK-1079)
     etc = os.path.join(self.root, 'etc')
     util.mkdirChain(etc)
     for name in (etc + '/shadow', etc + '/gshadow'):
         open(name, 'a').close()
         os.chmod(name, 0600)