Пример #1
0
    def testFixMultilibPathsLegacyCompat(self):
        # prove that conary-policy will work with older versions of conary
        r = self.getLegacyRecipe()
        path = os.path.join(os.path.sep, 'lib', 'foo.so')
        filePath = util.joinPaths(self.destdir, path)
        newPath = os.path.join(self.destdir, 'libXX', 'foo.so')
        symlinkPath = os.path.join(os.path.sep, 'lib', 'symlink.so')

        self.touch(filePath, contents = '\x7fELF fake of course...')
        os.symlink('foo.so', util.joinPaths(self.destdir, symlinkPath))

        r.macros.lib = 'libXX'
        r.FixupMultilibPaths()
        # we want to run just this policy
        m = magic.Magic(path, self.destdir)
        m.name = 'ELF'
        r.magic = {}
        r.magic[path] = m
        r.magic[symlinkPath] = m
        r._policyMap['FixupMultilibPaths'].test = lambda *args, **kwargs: True
        r._policyMap['FixupMultilibPaths'].dirmap = \
                {'/usr/lib' : '/usr/libXX', '/lib' : '/libXX'}
        self.captureOutput(r._policyMap['FixupMultilibPaths'].doProcess, r)

        # prove the policy did move the file
        self.assertFalse(not os.path.exists(newPath))
        self.assertFalse(os.path.exists(filePath))
        self.assertFalse(os.path.exists(util.joinPaths(self.destdir, symlinkPath)))
        self.assertFalse(not os.path.exists(util.joinPaths(self.destdir,
            'libXX', 'symlink.so')))

        # we expect that the move wasn't recorded
        self.assertEquals(r._pathTranslations, [])
Пример #2
0
    def testNormalizeManPages(self):
        r = self.getRecipe()
        path = os.path.join(os.path.sep, r.macros.mandir, 'man1', 'foo.1.gz')
        path2 = os.path.join(os.path.sep,
                r.macros.mandir, 'man1', 'bar.1.bz2')
        filePath = util.joinPaths(self.destdir, path)
        filePath2 = util.joinPaths(self.destdir, path2)
        self.touch(filePath)
        self.touch(filePath2[:-4], contents = 'text')
        g = gzip.open(filePath, 'w')
        g.write('some comrpessed data')
        g.close()
        self.captureOutput(os.system, 'bzip2 %s' % filePath2[:-4])
        r.NormalizeManPages()
        self.captureOutput(r._policyMap['NormalizeManPages'].doProcess, r)
        # prove the policy did move the file
        self.assertFalse(not os.path.exists(filePath))
        self.assertFalse(not os.path.exists(filePath2[:-4] + '.gz'))
        self.assertFalse(os.path.exists(filePath2))

        # we expect that moves were recorded
        # the order foo/bar get evaluated isn't stable. we'll just sort for
        # test purposes
        self.assertEquals(sorted(r._pathTranslations),
                sorted([('/usr/share/man/man1/foo.1.gz',
                    '/usr/share/man/man1/foo.1'),
                 ('/usr/share/man/man1/bar.1.bz2',
                     '/usr/share/man/man1/bar.1'),
                 ('/usr/share/man/man1/bar.1',
                     '/usr/share/man/man1/bar.1.gz'),
                 ('/usr/share/man/man1/foo.1',
                     '/usr/share/man/man1/foo.1.gz')]))
Пример #3
0
    def testNormalizeInfoPages(self):
        r = self.getRecipe()
        path = os.path.join(os.path.sep, r.macros.infodir, 'man1', 'foo.gz')
        filePath = util.joinPaths(self.destdir, path)
        newPath = os.path.join(os.path.sep, r.macros.infodir, 'foo.gz')
        self.touch(filePath)
        g = gzip.open(filePath, 'w')
        g.write('some comrpessed data')
        g.close()
        r.NormalizeInfoPages()
        m = magic.Magic(path, self.destdir)
        m.name = 'gzip'
        m.contents['compression'] = 5
        r.magic = {}
        r.magic[path] = m
        r.magic[newPath] = m
        r._policyMap['NormalizeInfoPages'].doProcess(r)
        #self.captureOutput(r._policyMap['NormalizeInfoPages'].doProcess, r)
        # prove the policy did move the file
        self.assertFalse(os.path.exists(filePath))
        self.assertFalse(not os.path.exists(util.joinPaths(self.destdir, newPath)))

        # we expect that moves were recorded
        self.assertEquals(r._pathTranslations,
                [('/usr/share/info/man1/foo.gz', '/usr/share/info/foo.gz')])
Пример #4
0
 def _mount_dev(self):
     # Temporarily bind-mount the jobslave /dev into the chroot so
     # grub2-install can see the loop device it's targeting.
     logCall("mount -o bind /dev %s/dev" %  self.image_root)
     # /etc/grub.d/10_linux tries to find the backing device for loop
     # devices, on the assumption that it's a block device with cryptoloop
     # on top. Replace losetup with a stub while running mkconfig so it
     # keeps the loop device name and all the right UUIDs get emitted.
     losetup = util.joinPaths(self.image_root, '/sbin/losetup')
     os.rename(losetup, losetup + '.bak')
     with open(losetup, 'w') as f_losetup:
         print >> f_losetup, '#!/bin/sh'
         print >> f_losetup, 'echo "$1"'
     os.chmod(losetup, 0755)
     # In order for the root device to be detected as a FS UUID and not
     # /dev/loop0 there needs to be a link in /dev/disk/by-uuid, which
     # doesn't happen with the jobmaster's containerized environment.
     link_path = None
     if self.root_device.uuid:
         link_path = util.joinPaths(self.image_root, '/dev/disk/by-uuid',
                 self.root_device.uuid)
         util.mkdirChain(os.path.dirname(link_path))
         util.removeIfExists(link_path)
         os.symlink(self.root_device.devPath, link_path)
     try:
         yield
     finally:
         try:
             if link_path:
                 os.unlink(link_path)
             os.rename(losetup + '.bak', losetup)
             logCall("umount %s/dev" %  self.image_root)
         except:
             pass
Пример #5
0
    def testNormalizeBz2InfoPages(self):
        r = self.getRecipe()
        path = os.path.join(os.path.sep, r.macros.infodir, 'man1', 'foo.bz2')
        filePath = util.joinPaths(self.destdir, path)
        newPath = os.path.join(os.path.sep, r.macros.infodir, 'foo.gz')
        self.touch(filePath[:-4])
        self.captureOutput(os.system, 'bzip2 %s' % filePath[:-4])

        r.NormalizeInfoPages()
        m = magic.Magic(path, self.destdir)
        m.name = 'bzip'
        m.contents['compression'] = 5
        r.magic = {}
        r.magic[path] = m
        r.magic[newPath] = m
        r.magic[newPath[:-3] + '.bz2'] = m
        r._policyMap['NormalizeInfoPages'].doProcess(r)
        #self.captureOutput(r._policyMap['NormalizeInfoPages'].doProcess, r)
        # prove the policy did move the file
        self.assertFalse(os.path.exists(filePath))
        self.assertFalse(not os.path.exists(util.joinPaths(self.destdir, newPath)))

        # we expect that moves were recorded
        self.assertEquals(r._pathTranslations,
                [('/usr/share/info/man1/foo.bz2',
                    '/usr/share/info/foo.bz2'),
                 ('/usr/share/info/foo.bz2',
                     '/usr/share/info/foo.gz')])
Пример #6
0
    def _correctInterp(self, m, path):
        destdir = self.recipe.macros.destdir
        d = util.joinPaths(destdir, path)

        interp = m.contents['interpreter']
        interpDir = os.path.dirname(interp)
        interpBase = os.path.basename(interp)

        found = False

        if not os.path.exists('/'.join((destdir, interp))) and not os.path.exists(interp):
            #try tro remove 'local' part
            if '/local/' in interp:
                normalized = interp.replace('/local', '')
                if os.path.exists('/'.join((destdir, normalized))) or os.path.exists(normalized):
                    found = True
                if not found:
                    cadidates = (
                                self.recipe.macros.bindir,
                                self.recipe.macros.sbindir,
                                self.recipe.macros.essentialbindir,
                                self.recipe.macros.essentialsbindir,
                                )
                    for i in cadidates:
                        if os.path.exists('/'.join((destdir, i, interpBase))):
                            normalized = util.joinPaths(i, interpBase)
                            found = True
                            break
                    if not found:
                        #try to find in '/bin', '/sbin', '/usr/bin', '/usr/sbin'
                        for i in '/usr/bin', '/bin', '/usr/sbin', '/sbin':
                            normalized = '/'.join((i, interpBase))
                            if os.path.exists(normalized):
                                found = True
                                break
                        if not found:
                            self.warn('The interpreter path %s in %s does not exist!', interp, path)

        # If the interp has symlinks along its dir path, rewrite to the real
        # path. Do not rewrite the name of the interpreter itself as that might
        # change the behavior of the program.
        if (not found and interp.startswith('/')
                and os.path.realpath(interpDir) != interpDir):
            normalized = '/'.join((os.path.realpath(interpDir), interpBase))
            found = True

        if found:
                line = m.contents['line']
                normalized = line.replace(interp, normalized)
                self._changeInterpLine(d, '#!' + normalized + '\n')
                self.info('changing %s to %s in %s',
                            line, normalized, path)

        return found
Пример #7
0
 def doFile(self, filename):
     source = util.joinPaths(self.builddir, filename)
     dest = util.joinPaths(self.destdir, filename)
     if os.path.exists(dest):
         return
     if not util.isregular(source):
         # will not be a directory, but might be a symlink or something
         return
     util.mkdirChain(os.path.dirname(dest))
     shutil.copy2(source, dest)
     os.chmod(dest, 0644)
     # this file should not be counted as making package non-empty
     self.recipe._autoCreatedFileCount += 1
Пример #8
0
    def testConfigAndRemoveRollback(self):
        self.addComponent("foo:runtime", "1.0", fileContents=[("/foo", "foo1.0"), ("/etc/foo", "foo1.0\n")])
        self.addComponent("foo:runtime", "1.1", fileContents=[("/foo", "foo1.0"), ("/etc/foo", "foo2.1\n")])
        self.addComponent("bar:runtime", "1.0", fileContents=[("/foo", "bar1.0")])

        self.updatePkg("foo:runtime=1.0")
        self.verifyFile(util.joinPaths(self.rootDir, "/foo"), "foo1.0")
        self.writeFile(util.joinPaths(self.rootDir, "/etc/foo"), "hmm\nfoo1.0\n")
        self.updatePkg(["foo:runtime", "bar:runtime"], replaceFiles=True)

        self.rollback(self.rootDir, 1)
        self.verifyFile(util.joinPaths(self.rootDir, "/etc/foo"), "hmm\nfoo1.0\n")
        self.verifyFile(util.joinPaths(self.rootDir, "/foo"), "foo1.0")
Пример #9
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        d = self.macros.destdir
        destlen = len(d)
        l = util.joinPaths(d, path)
        if not os.path.islink(l):
            m = self.recipe.magic[path]
            if m and m.name == 'ELF' and 'soname' in m.contents:
                if os.path.basename(path) == m.contents['soname']:
                    target = m.contents['soname']+'.something'
                else:
                    target = m.contents['soname']
                self.warn(
                    '%s is not a symlink but probably should be a link to %s',
                    path, target)
            return

        # store initial contents
        sopath = util.joinPaths(os.path.dirname(l), os.readlink(l))
        so = util.normpath(sopath)
        # find final file
        while os.path.islink(l):
            l = util.normpath(util.joinPaths(os.path.dirname(l),
                                             os.readlink(l)))

        p = util.joinPaths(d, path)
        linkpath = l[destlen:]
        m = self.recipe.magic[linkpath]

        if m and m.name == 'ELF' and 'soname' in m.contents:
            if so == linkpath:
                self.info('%s is final path, soname is %s;'
                    ' soname usually is symlink to specific implementation',
                    linkpath, m.contents['soname'])
            soname = util.normpath(util.joinPaths(
                        os.path.dirname(sopath), m.contents['soname']))
            s = soname[destlen:]
            try:
                os.stat(soname)
                if not os.path.islink(soname) and s not in self.nonSymlinkWarn:
                    self.nonSymlinkWarn.add(s)
                    self.info('%s has soname %s; best practice is that the'
                              ' filename that matches the soname is a symlink:'
                              ' soname -> soname.minorversion',
                              s, m.contents['soname'])
            except OSError:
                # the missing file case will be fixed up by other policy
                pass
Пример #10
0
def copytree(source, dest, exceptions=None):
    if not exceptions:
        exceptions = []
    for root, dirs, files in os.walk(source):
        root = root.replace(source, "")
        for f in files:
            if not [x for x in exceptions if re.match(x, util.joinPaths(root, f))]:
                copyfile(util.joinPaths(source, root, f), util.joinPaths(dest, root, f))
        for d in dirs:
            this_dir = util.joinPaths(dest, root, d)
            if not os.path.exists(this_dir) and not [x for x in exceptions if re.match(x, util.joinPaths(root, d))]:
                os.mkdir(this_dir)
                dStat = os.stat(util.joinPaths(source, root, d))
                os.chmod(this_dir, dStat[stat.ST_MODE])
Пример #11
0
    def addPluggableRequirements(self, path, fullpath, pkgFiles, macros):
        d = macros.destdir
        f = util.joinPaths(d, path)
        if not os.path.islink(f):
            return
        self._openDb()

        fullpath = util.joinPaths(d, path)
        contents = os.readlink(fullpath)
        if not contents.startswith(os.path.sep):
            # contents is normally a relative symlink thanks to
            # the RelativeSymlinks policy. if it's not, then we have an
            # absolute symlink, and we'll just use it directly
            contents = util.joinPaths(os.path.dirname(fullpath), contents)
        if contents.startswith(d):
            contents = contents[len(d):]
        if os.path.exists(util.joinPaths(d, contents)):
            # the file is provided by the destdir, don't search for it
            return

        troves = self.db.iterTrovesByPath(contents)
        if not troves:
            # If there's a file, conary doesn't own it. either way,
            # DanglingSymlinks will fire an error.
            return
        trv = troves[0]

        fileDep = deps.parseDep('file: %s' % contents)
        troveDep = deps.parseDep('trove: %s' % trv.getName())

        provides = trv.getProvides()
        if provides.satisfies(fileDep):
            self._addRequirement(path, contents, [], pkgFiles,
                    deps.FileDependencies)
            self.recipe.DanglingSymlinks(exceptions = re.escape(path),
                    allowUnusedFilters = True)
            if trv.getName() not in self.recipe.buildRequires:
                self.recipe.reportMissingBuildRequires(trv.getName())
        elif provides.satisfies(troveDep):
            self._addRequirement(path, trv.getName(), [], pkgFiles,
                    deps.TroveDependencies)
            # warn that a file dep would be better, but we'll settle for a
            # dep on the trove that contains the file
            self.warn("'%s' does not provide '%s', so a requirement on the " \
                    "trove itself was used to satisfy dangling symlink: %s"  %\
                    (trv.getName(), fileDep, path))
            self.recipe.DanglingSymlinks(exceptions = re.escape(path),
                    allowUnusedFilters = True)
            if trv.getName() not in self.recipe.buildRequires:
                self.recipe.reportMissingBuildRequires(trv.getName())
Пример #12
0
 def _moveToInfoRoot(self, file):
     infofilespath = '%(destdir)s/%(infodir)s' %self.macros
     fullfile = util.joinPaths(infofilespath, file)
     if os.path.isdir(fullfile):
         for subfile in os.listdir(fullfile):
             self._moveToInfoRoot(util.joinPaths(file, subfile))
         shutil.rmtree(fullfile)
     elif os.path.dirname(fullfile) != infofilespath:
         destPath = util.joinPaths(infofilespath,
                                   os.path.basename(fullfile))
         shutil.move(fullfile, destPath)
         try:
             self.recipe.recordMove(fullfile, destPath)
         except AttributeError:
             pass
Пример #13
0
 def _extractLayer(self, unpackDir, tarFile):
     util.mkdirChain(unpackDir)
     # Walk the files in the tar file, looking for .wh.*
     tf = tarfile.open(tarFile)
     toDeleteAfter = set()
     for tinfo in tf:
         bname = os.path.basename(tinfo.name)
         if bname.startswith('.wh.') and tinfo.mode == 0:
             util.rmtree(util.joinPaths(unpackDir,
                 os.path.dirname(tinfo.name), bname[4:]),
                     ignore_errors=True)
             toDeleteAfter.add(util.joinPaths(unpackDir, tinfo.name))
     logCall(["tar", "-C", unpackDir, "-xf", tarFile])
     for fname in toDeleteAfter:
         util.removeIfExists(fname)
Пример #14
0
    def testNormalizePkgConfigLegacy(self):
        r = self.getLegacyRecipe()
        path = os.path.join(os.path.sep, r.macros.datadir, 'pkgconfig', 'foo')
        filePath = util.joinPaths(self.destdir, path)
        newPath = util.joinPaths(self.destdir,
                r.macros.libdir, 'pkgconfig', 'foo')
        self.touch(filePath)
        r.NormalizePkgConfig()
        self.captureOutput(r._policyMap['NormalizePkgConfig'].doProcess, r)
        # prove the policy did move the file
        self.assertFalse(not os.path.exists(newPath))
        self.assertFalse(os.path.exists(filePath))

        # we expect that no move was recorded
        self.assertEquals(r._pathTranslations, [])
Пример #15
0
    def _correctEnv(self, m, path):
        destdir = self.recipe.macros.destdir
        d = util.joinPaths(destdir, path)

        interp = m.contents['interpreter']
        if interp.find('/bin/env') != -1: #finds /usr/bin/env too...
            line = m.contents['line']
            # rewrite to not have env
            wordlist = [ x for x in line.split() ]
            if len(wordlist) == 1:
                self.error("Interpreter is not given for %s in %s", wordlist[0], path)
                return
            wordlist.pop(0) # get rid of env
            # first look in package
            fullintpath = util.checkPath(wordlist[0], root=destdir)
            if fullintpath == None:
                # then look on installed system
                fullintpath = util.checkPath(wordlist[0])
            if fullintpath == None:
                self.error("Interpreter %s for file %s not found, could not convert from /usr/bin/env syntax", wordlist[0], path)
                return False

            wordlist[0] = fullintpath

            self._changeInterpLine(d, '#!'+" ".join(wordlist)+'\n')
            self.info('changing %s to %s in %s',
                        line, " ".join(wordlist), path)
            return True
        return False
Пример #16
0
    def install_mbr(self, root_dir, mbr_device, size):
        """
        Install grub into the MBR.
        """
        if not os.path.exists(util.joinPaths(self.image_root, self.grub_path)):
            log.info("grub not found. skipping setup.")
            return

        #  Assumed:
        # * raw hdd image at mbr_device is bind mounted at root_dir/disk.img
        # * The size requested is an integer multiple of the cylinder size
        bytesPerCylinder = self.geometry.bytesPerCylinder
        assert not (size % bytesPerCylinder), "The size passed in here must be cylinder aligned"
        cylinders = size / bytesPerCylinder

        # IMPORTANT: Use "rootnoverify" here, since some versions of grub
        # have trouble test-mounting the partition inside disk1.img (RBL-8193)
        grubCmds = "device (hd0) /disk.img\n" \
                   "geometry (hd0) %d %d %d\n" \
                   "rootnoverify (hd0,0)\n" \
                   "setup (hd0)" % (cylinders,
                        self.geometry.heads, self.geometry.sectors)

        logCall('echo -e "%s" | '
                'chroot %s sh -c "%s --no-floppy --batch"'
                % (grubCmds, root_dir, self.grub_path))
Пример #17
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        d = self.macros.destdir
        fullpath = util.joinPaths(d, path)
        if not (os.path.isfile(fullpath) and util.isregular(fullpath)):
            return
        f = file(fullpath)
        lines = f.readlines()
        f.close()
        foundChkconfig = False
        for line in lines:
            line = line.strip()
            if not line:
                continue
            if not line.startswith('#'):
                # chkconfig tag must come before any uncommented lines
                break
            if line.find('chkconfig:') != -1:
                foundChkconfig = True
                break
            if line.find('### BEGIN INIT INFO') != -1:
                foundChkconfig = True
                break
        if not foundChkconfig:
            self.warn("initscript %s must contain chkconfig information before any uncommented lines", path)
Пример #18
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        # heuristic parsing of a manpage filename to figure out its catagory
        mandir = self.recipe.macros.mandir
        d = self.recipe.macros.destdir
        basename = os.path.basename(path)

        stripped = basename
        if basename.endswith('.gz'):
            stripped =  basename.split('.', 2)[-2]

        if '.' not in stripped:
            self.logError(path)
            return

        num = stripped.split('.', 2)[-1]
        if len(num) > 1 or not num.isdigit():
            self.logError(path)
            return

        newPath = util.joinPaths(d, mandir, 'man'+num, basename)
        self.warn('Moving %s to %s', path, newPath)
        os.renames(d+path, newPath)
        try:
            self.recipe.recordMove(d + path, newPath)
        except AttributeError:
            pass
Пример #19
0
 def __init__(self, rbuilderUrl, user, pw, handle):
     _AbstractRbuilderClient.__init__(self, rbuilderUrl, user, pw, handle)
     scheme, _, _, host, port, path, _, _ = util.urlSplit(rbuilderUrl)
     path = util.joinPaths(path, 'api')
     self._url = util.urlUnsplit(
             (scheme, user, pw, host, port, path, None, None))
     self._api = None
Пример #20
0
 def setup(self):
     defaults = util.joinPaths(self.image_root, 'etc', 'default', 'grub')
     util.mkdirChain(os.path.dirname(defaults))
     if not os.path.exists(defaults) or not os.lstat(defaults).st_size:
         with open(defaults, 'w') as f_defaults:
             print >> f_defaults, '# Defaults set by rBuilder'
             print >> f_defaults, 'GRUB_DISABLE_RECOVERY=true'
Пример #21
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        d = util.joinPaths(self.recipe.macros.destdir, path)
        mode = os.lstat(d)[stat.ST_MODE]
        if stat.S_ISLNK(mode):
            # we'll process whatever this is pointing to whenever we
            # get there.
            return
        if not (mode & 0200):
            os.chmod(d, mode | 0200)
        f = file(d, 'r+')
        l = f.readlines()
        l = [x.replace('/lib/security/$ISA/', '') for x in l]
        stackRe = re.compile('(.*)required.*pam_stack.so.*service=(.*)')
        def removeStack(line):
            m = stackRe.match(line)
            if m:
                return '%s include %s\n'%(m.group(1), m.group(2))
            return line
        l = [removeStack(x) for x in l]
        f.seek(0)
        f.truncate(0) # we may have shrunk the file, avoid garbage
        f.writelines(l)
        f.close()
        os.chmod(d, mode)
Пример #22
0
    def remove(self, trv):
        SingleCapsuleOperation.remove(self, trv)

        # make sure everything was erased which should have been; RPM's
        # shared file handling means it may not erase things which we think
        # ought to be
        for trv in self.removes:
            dbFileObjs = self.db.getFileVersions(
                        [ (x[0], x[2], x[3]) for x in trv.iterFileList() ] )

            for (pathId, path, fileId, version), fileObj in \
                    itertools.izip(trv.iterFileList(), dbFileObjs):
                hasCapsule = trv.troveInfo.capsule.type() or False
                fullPath = util.joinPaths(self.root,  path)
                if not os.path.exists(fullPath):
                    continue

                if (fileObj.flags.isCapsuleAddition()):
                    # this was added to the package outside of the RPM;
                    # we don't have any responsibility for it
                    continue
                elif (fileObj.hasContents and
                      trove.conaryContents(hasCapsule, pathId, fileObj)):
                    # this content isn't part of the capsule; remember to put
                    # it back when RPM is done
                    self.preservePath(path)
                    continue

                fsFileObj = files.FileFromFilesystem(fullPath, pathId,
                                                     possibleMatch = fileObj)
                self.fsJob._remove(fsFileObj, path, fullPath,
                                   'removing rpm owned file %s',
                                   ignoreMissing = True)
Пример #23
0
    def loadPlugins(self):
        """
        Determine which capsule plugins are relevant to this system, and load
        them.

        This uses a simple test such as the existence of a directory to
        determine whether each plugin is useful. At some point the contents of
        the conary database should also be factored in, so that deleting the
        capsule target database and running a sync should erase all of those
        capsule troves.
        """
        db = self._db()
        for kind, (module, className, checkFunc
                ) in self.availablePlugins.iteritems():
            if kind in self._loadedPlugins:
                continue
            if isinstance(checkFunc, basestring):
                path = util.joinPaths(db.root, checkFunc)
                try:
                    if not os.stat(path).st_size:
                        continue
                except OSError:
                    continue
            else:
                if not checkFunc():
                    continue
            __import__(module)
            cls = getattr(sys.modules[module], className)
            self._loadedPlugins[kind] = cls(db)
Пример #24
0
def downloads_get(request):
    if not url_sign.verify_request(request.cfg, request):
        return web_exc.HTTPForbidden("Authorization for this request has "
                "expired or is not valid")
    sha1 = request.matchdict['sha1']
    dlfiles = request.db.query(DownloadFile).filter_by(file_sha1=sha1).all()
    if not dlfiles:
        return web_exc.HTTPNotFound()
    if 'cust_id' in request.matchdict:
        # URL is bound to a specific customer so re-check the entitlement, both
        # to make sure it is still valid and to check the client IP against any
        # GeoIP filters.
        if not request.filterFiles(dlfiles,
                cust_id=request.matchdict['cust_id']):
            log.warning("Rejected download after revalidating entitlement")
            return web_exc.HTTPForbidden()
    dlfile = dlfiles[0]
    path = joinPaths(request.cfg.downloadDir, dlfile.file_sha1)
    try:
        response = FileResponse(path, request=request,
                content_type='application/octet-stream')
    except (OSError, IOError), err:
        if err.args[0] == errno.ENOENT:
            log.warning("Download file %s is missing (basename='%s')", path,
                    dlfile.file_basename)
            return web_exc.HTTPNotFound()
        raise
Пример #25
0
 def install(self):
     """Generate grub2 configs"""
     cfgname = '/boot/grub2/grub.cfg'
     util.mkdirChain(os.path.dirname(util.joinPaths(self.image_root, cfgname)))
     with self._mount_dev():
         logCall('chroot %s grub2-mkconfig -o %s' % (self.image_root, cfgname))
     rdgen = dracut.DracutGenerator(self.image_root)
     rdgen.generateFromGrub2()
Пример #26
0
    def hasSystemModel(self):
        """
        Returns True if the system is modeled using a System Model

        @rtype: bool
        """
        modelPath = util.joinPaths(self.cfg.root, self.cfg.modelPath)
        return os.path.exists(modelPath)
Пример #27
0
    def _correctInterp(self, m, path):
        destdir = self.recipe.macros.destdir
        d = util.joinPaths(destdir, path)

        interp = m.contents['interpreter']
        interpBase = os.path.basename(interp)

        found = False

        if not os.path.exists('/'.join((destdir, interp))) and not os.path.exists(interp):
            #try tro remove 'local' part
            if '/local/' in interp:
                normalized = interp.replace('/local', '')
                if os.path.exists('/'.join((destdir, normalized))) or os.path.exists(normalized):
                    found = True
                if not found:
                    cadidates = (
                                self.recipe.macros.bindir,
                                self.recipe.macros.sbindir,
                                self.recipe.macros.essentialbindir,
                                self.recipe.macros.essentialsbindir,
                                )
                    for i in cadidates:
                        if os.path.exists('/'.join((destdir, i, interpBase))):
                            normalized = util.joinPaths(i, interpBase)
                            found = True
                            break
                    if not found:
                        #try to find in '/bin', '/sbin', '/usr/bin', '/usr/sbin'
                        for i in '/usr/bin', '/bin', '/usr/sbin', '/sbin':
                            normalized = '/'.join((i, interpBase))
                            if os.path.exists(normalized):
                                found = True
                                break
                        if not found:
                            self.warn('The interpreter path %s in %s does not exist!', interp, path)
       
        if found:
                line = m.contents['line']
                normalized = line.replace(interp, normalized)
                self._changeInterpLine(d, '#!' + normalized + '\n')
                self.info('changing %s to %s in %s',
                            line, normalized, path)

        return found
Пример #28
0
    def fileChanged(self, path):
        """
        check to see if the file has changed
        @param path: the path to check
        @return: FILE_MISSING, FILE_CHANGED, FILE_UNCHANGED, FILE_NEW
        @rtype: int
        """
        newPath = util.joinPaths(self.macros.destdir, path)
        if not util.exists(newPath):
            return self.FILE_MISSING

        from conary.build.recipe import RECIPE_TYPE_CAPSULE
        if self.recipe._recipeType == RECIPE_TYPE_CAPSULE:
            if not os.path.isfile(newPath):
                # for capsules we get everything but contents from
                # the capsule header
                return self.FILE_UNCHANGED

            # For derived capsule recipes we use the exploder to provide the old
            # sha1. For regular capsule recipes we use the capsuleFileSha1s map
            # to provide the old sha1.
            oldSha1=None
            if os.path.islink(newPath):
                oldSha1 = os.readlink(newPath)
            elif hasattr(self.recipe,'exploder'):
                oldf = self.recipe.exploder.fileObjMap.get(path,None)
                if oldf and oldf.hasContents:
                    oldSha1 = oldf.contents.sha1()
            else:
                capPaths = self.recipe._getCapsulePathsForFile(path)
                if not capPaths:
                    return self.FILE_NEW
                oldSha1 = self.recipe.capsuleFileSha1s[capPaths[0]][path]

            if oldSha1:
                if os.path.islink(newPath):
                    newSha1 = os.readlink(newPath)
                else:
                    newSha1 = sha1helper.sha1FileBin(newPath)
                if oldSha1 == newSha1:
                    return self.FILE_UNCHANGED
                return self.FILE_CHANGED
            return self.FILE_NEW

        oldMtime = self.recipe._derivedFiles.get(path, None)
        if os.path.islink(newPath):
            # symlinks are special, we compare the target of the link
            # instead of the mtime
            newMtime = os.readlink(newPath)
        else:
            newMtime = os.lstat(newPath).st_mtime
        if oldMtime:
            if oldMtime == newMtime:
                return self.FILE_UNCHANGED
            return self.FILE_CHANGED
        return self.FILE_NEW
Пример #29
0
 def generateOne(self, kver, rdPath):
     if not os.path.exists(util.joinPaths(self.image_root, '/sbin/btrfs')):
         self.DRACUT_MODULES.discard('btrfs')
     args = ['/usr/sbin/chroot', self.image_root, '/sbin/dracut',
             '--force',
             '--add=' + ' '.join(self.DRACUT_MODULES),
             '--add-drivers=' + ' '.join(self.MODULES),
             ]
     args.extend([rdPath, kver])
     logCall(args)
Пример #30
0
    def do(self):
        e = '%(destdir)s/%(sysconfdir)s/X11/app-defaults' % self.macros
        if not os.path.isdir(e):
            return

        x = '%(destdir)s/%(x11prefix)s/lib/X11/app-defaults' % self.macros
        self.warn('app-default files misplaced in'
                  ' %(sysconfdir)s/X11/app-defaults' % self.macros)
        if os.path.islink(x):
            util.remove(x)
        util.mkdirChain(x)
        for file in os.listdir(e):
            util.rename(util.joinPaths(e, file),
                        util.joinPaths(x, file))
            try:
                self.recipe.recordMove(util.joinPaths(e, file),
                        util.joinPaths(x, file))
            except AttributeError:
                pass
Пример #31
0
    def _merge(self):
        changeSet = ChangeSet()
        deleteDirs = set()
        doCommit = False
        # If this is not None then all ephemeral sources will still be fetched
        # but will be placed in this directory instead.
        if self.helper.plan.ephemeralSourceDir:
            ephDir = self.helper.makeEphemeralDir()
        else:
            ephDir = None

        def _addFile(path, contents, isText):
            if path in oldFiles:
                # Always recycle pathId if available.
                pathId, _, oldFileId, oldFileVersion = oldFiles[path]
            else:
                pathId = hashlib.md5(path).digest()
                oldFileId = oldFileVersion = None

            fileHelper = filetypes.RegularFile(contents=contents,
                    config=isText)
            fileStream = fileHelper.get(pathId)
            fileStream.flags.isSource(set=True)
            fileId = fileStream.fileId()

            # If the fileId matches, recycle the fileVersion too.
            if fileId == oldFileId:
                fileVersion = oldFileVersion
            else:
                fileVersion = newTrove.getVersion()

            filesToAdd[fileId] = (fileStream, fileHelper.contents, isText)
            newTrove.addFile(pathId, path, fileVersion, fileId)

        for package, (recipeText, recipeObj), oldTrove in zip(
                self.packages, self.recipes, self.oldTroves):

            filesToAdd = {}
            oldFiles = {}
            if oldTrove is not None:
                for pathId, path, fileId, fileVer in oldTrove.iterFileList():
                    oldFiles[path] = (pathId, path, fileId, fileVer)
            newTrove = Trove(package.name, package.nextVersion, deps.Flavor())
            newTrove.setFactory(package.targetConfig.factory)

            # Add upstream files to new trove. Recycle pathids from the old
            # version.
            # LAZY: assume that everything other than the recipe is binary.
            # Conary has a magic module, but it only accepts filenames!
            for path, contents in package.recipeFiles.iteritems():
                isText = path == package.getRecipeName()
                _addFile(path, contents, isText)

            # Collect requested auto sources from recipe. Unknown recipe types
            # will not be loaded so recipeObj will be the class, so assume
            # these have no sources.
            if not inspect.isclass(recipeObj):
                recipeFiles = dict((os.path.basename(x.getPath()), x)
                    for x in recipeObj.getSourcePathList())
                newFiles = set(x[1] for x in newTrove.iterFileList())

                needFiles = set(recipeFiles) - newFiles
                for autoPath in needFiles:
                    source = recipeFiles[autoPath]
                    if (autoPath in oldFiles
                            and not self.helper.plan.refreshSources
                            and not source.ephemeral):
                        # File exists in old version.
                        pathId, path, fileId, fileVer = oldFiles[autoPath]
                        newTrove.addFile(pathId, path, fileVer, fileId)
                        continue

                    if source.ephemeral and not ephDir:
                        continue

                    # File doesn't exist; need to create it.
                    if source.ephemeral:
                        laUrl = lookaside.laUrl(source.getPath())
                        tempDir = joinPaths(ephDir,
                                os.path.dirname(laUrl.filePath()))
                        mkdirChain(tempDir)
                    else:
                        tempDir = tempfile.mkdtemp()
                        deleteDirs.add(tempDir)
                    snapshot = _getSnapshot(self.helper, package, source,
                            tempDir)

                    if not source.ephemeral and snapshot:
                        autoPathId = hashlib.md5(autoPath).digest()
                        autoObj = FileFromFilesystem(snapshot, autoPathId)
                        autoObj.flags.isAutoSource(set=True)
                        autoObj.flags.isSource(set=True)
                        autoFileId = autoObj.fileId()

                        autoContents = filecontents.FromFilesystem(snapshot)
                        filesToAdd[autoFileId] = (autoObj, autoContents, False)
                        newTrove.addFile(autoPathId, autoPath,
                            newTrove.getVersion(), autoFileId)

            # If the old and new troves are identical, just use the old one.
            if oldTrove and _sourcesIdentical(
                    oldTrove, newTrove, [self.oldChangeSet, filesToAdd]):
                package.setDownstreamVersion(oldTrove.getVersion())
                log.debug('Skipped %s=%s', oldTrove.getName(),
                        oldTrove.getVersion())
                continue

            # Add files and contents to changeset.
            for fileId, (fileObj, fileContents, cfgFile) in filesToAdd.items():
                changeSet.addFileContents(fileObj.pathId(), fileObj.fileId(),
                    ChangedFileTypes.file, fileContents, cfgFile)
                changeSet.addFile(None, fileObj.fileId(), fileObj.freeze())

            # Create a changelog entry.
            changeLog = ChangeLog(
                name=self.helper.cfg.name, contact=self.helper.cfg.contact,
                message=self.helper.plan.commitMessage + '\n')
            newTrove.changeChangeLog(changeLog)

            # Calculate trove digests and add the trove to the changeset
            newTrove.invalidateDigests()
            newTrove.computeDigests()
            newTroveCs = newTrove.diff(None, absolute=True)[0]
            changeSet.newTrove(newTroveCs)
            doCommit = True

            package.setDownstreamVersion(newTrove.getVersion())
            log.debug('Created %s=%s', newTrove.getName(), newTrove.getVersion())

        if doCommit:
            cook.signAbsoluteChangesetByConfig(changeSet, self.helper.cfg)
            f = tempfile.NamedTemporaryFile(dir=os.getcwd(), suffix='.ccs',
                    delete=False)
            f.close()
            changeSet.writeToFile(f.name)
            try:
                self.helper.getRepos().commitChangeSet(changeSet)
            except:
                log.error("Error committing changeset to repository, "
                        "failed changeset is saved at %s", f.name)
                raise
            else:
                os.unlink(f.name)

        for path in deleteDirs:
            shutil.rmtree(path)
Пример #32
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        destdir = self.macros.destdir
        fullpath = util.joinPaths(destdir, path)
        mode = os.lstat(fullpath)[stat.ST_MODE]
        m = self.recipe.magic[path]
        if stat.S_ISREG(mode) and (not m or
                                   (m.name != "ELF" and m.name != "ar")):
            self.warn("non-object file with library name %s", path)
            return
        basename = os.path.basename(path)
        currentsubtree = self.currentsubtree % self.macros
        targetdir = self.dirmap[currentsubtree]
        # we want to append whatever path came after the currentsubtree -
        # e.g. if the original path is /usr/lib/subdir/libfoo.a,
        # we still need to add the /subdir/
        targetdir += os.path.dirname(path[len(currentsubtree):])
        target = util.joinPaths(targetdir, basename)
        fulltarget = util.joinPaths(destdir, target)
        if os.path.exists(fulltarget):
            tmode = os.lstat(fulltarget)[stat.ST_MODE]
            tm = self.recipe.magic[target]
            if (not stat.S_ISREG(mode) or not stat.S_ISREG(tmode)):
                # one or both might be symlinks, in which case we do
                # not want to touch this
                return
            if ('abi' in m.contents and 'abi' in tm.contents
                    and m.contents['abi'] != tm.contents['abi']):
                # path and target both exist and are of different abis.
                # This means that this is actually a multilib package
                # that properly contains both lib and lib64 items,
                # and we shouldn't try to fix them.
                return
            raise policy.PolicyError(
                "Conflicting library files %s and %s installed" %
                (path, target))
        self.warn('file %s found in wrong directory, attempting to fix...',
                  path)
        util.mkdirChain(destdir + targetdir)
        if stat.S_ISREG(mode):
            util.rename(destdir + path, fulltarget)
            try:
                self.recipe.recordMove(destdir + path, fulltarget)
            except AttributeError:
                pass
        else:
            # we should have a symlink that may need the contents changed
            contents = os.readlink(fullpath)
            if contents.find('/') == -1:
                # simply rename
                util.rename(destdir + path, destdir + target)
                try:
                    self.recipe.recordMove(destdir + path, fulltarget)
                except AttributeError:
                    pass
            else:
                # need to change the contents of the symlink to point to
                # the new location of the real file
                contentdir = os.path.dirname(contents)
                contenttarget = os.path.basename(contents)
                olddir = os.path.dirname(path)
                if contentdir.startswith('/'):
                    # absolute path
                    if contentdir == olddir:
                        # no need for a path at all, change to local relative
                        os.symlink(contenttarget, destdir + target)
                        os.remove(fullpath)
                        return
                if not contentdir.startswith('.'):
                    raise policy.PolicyError(
                        'Multilib: cannot fix relative path %s in %s -> %s\n'
                        'Library files should be in %s' %
                        (contentdir, path, contents, targetdir))
                # now deal with ..
                # first, check for relative path that resolves to same dir
                i = contentdir.find(olddir)
                if i != -1:
                    dotlist = contentdir[:i].split('/')
                    dirlist = contentdir[i + 1:].split('/')
                    if len(dotlist) == len(dirlist):
                        # no need for a path at all, change to local relative
                        os.symlink(contenttarget, destdir + target)
                        os.remove(fullpath)
                        return
                raise policy.PolicyError(
                    'Multilib: cannot fix relative path %s in %s -> %s\n'
                    'Library files should be in %s' %
                    (contentdir, path, contents, targetdir))
Пример #33
0
 def candidatePaths(self):
     d = self.recipe.macros.destdir
     for path in self.candidates.keys():
         fullpath = util.joinPaths(d, path)
         if os.path.exists(fullpath):
             yield (path, self.candidates[path])
Пример #34
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        d = self.macros.destdir
        f = util.joinPaths(d, path)
        if not os.path.islink(f):
            return

        recipe = self.recipe
        contents = os.readlink(f)
        if contents[0] == '/':
            self.warn(
                'Absolute symlink %s points to %s,'
                ' should probably be relative', path, contents)
            return
        abscontents = util.joinPaths(os.path.dirname(path), contents)
        # now resolve any intermediate symlinks
        dl = len(os.path.realpath(d))
        abscontents = os.path.realpath(d + abscontents)[dl:]
        ap = recipe.autopkg
        if abscontents in ap.pathMap:
            if ap.findComponent(abscontents) != ap.findComponent(path) and \
               not path.endswith('.so') and \
               not ap.findComponent(path).getName().endswith(':test'):
                # warn about suspicious cross-component symlink
                fromPkg = ap.findComponent(path)
                targetPkg = ap.findComponent(abscontents)

                found = False
                for depClass, dep in fromPkg.requires.iterDeps():
                    d = deps.DependencySet()
                    d.addDep(depClass, dep)
                    if targetPkg.provides.satisfies(d):
                        found = True
                        break

                if not found:
                    self.warn('symlink %s points from package %s to %s', path,
                              ap.findComponent(path).getName(),
                              ap.findComponent(abscontents).getName())
        else:
            for targetFilter, requirement in self.targetFilters:
                if targetFilter.match(abscontents):
                    # contents are an exception
                    self.info('allowing special dangling symlink %s -> %s',
                              path, contents)
                    if requirement:
                        self.info(
                            'automatically adding requirement'
                            ' %s for symlink %s', requirement, path)
                        # Requires has already run, touch this up
                        pkg = ap.findComponent(path)
                        if path not in pkg.requiresMap:
                            pkg.requiresMap[path] = deps.DependencySet()
                        pkg.requiresMap[path].addDep(
                            deps.TroveDependencies,
                            deps.Dependency(requirement, []))
                        f = pkg.getFile(path)
                        f.requires.set(pkg.requiresMap[path])
                        pkg.requires.union(f.requires())
                    return
            for pathName in recipe.autopkg.pathMap:
                if pathName.startswith(abscontents):
                    # a link to a subdirectory of a file that is
                    # packaged is still OK; this test is expensive
                    # and almost never needed, so put off till last
                    return
            self.error("Dangling symlink: %s points to non-existant %s (%s)" %
                       (path, contents, abscontents))
            # now that an error has been logged, we need to get rid of the file
            # so the rest of policy won't barf trying to access a file which
            # doesn't *really* exist (CNP-59)
            os.unlink(self.recipe.macros.destdir + path)
Пример #35
0
    def _addPhantomContents(self, changeSet, trv, header):
        """Fabricate files for the given RPM header"""
        for (path, owner, group, mode, size, rdev, flags, vflags, linkto,
             mtime) in itertools.izip(
                 header[rpmhelper.OLDFILENAMES],
                 header[rpmhelper.FILEUSERNAME],
                 header[rpmhelper.FILEGROUPNAME],
                 header[rpmhelper.FILEMODES],
                 header[rpmhelper.FILESIZES],
                 header[rpmhelper.FILERDEVS],
                 header[rpmhelper.FILEFLAGS],
                 header[rpmhelper.FILEVERIFYFLAGS],
                 header[rpmhelper.FILELINKTOS],
                 header[rpmhelper.FILEMTIMES],
             ):
            fullPath = util.joinPaths(self.root, path)
            fakestat = FakeStat(mode,
                                0,
                                None,
                                1,
                                owner,
                                group,
                                size,
                                mtime,
                                mtime,
                                mtime,
                                st_rdev=rdev,
                                linkto=linkto)
            pathId = os.urandom(16)

            # Adapted from conary.build.source.addCapsule.doRPM
            kind = 'regular'
            if flags & rpmhelper.RPMFILE_GHOST:
                kind = 'initial'
            elif flags & (rpmhelper.RPMFILE_CONFIG
                          | rpmhelper.RPMFILE_MISSINGOK
                          | rpmhelper.RPMFILE_NOREPLACE):
                if size:
                    kind = 'config'
                else:
                    kind = 'initial'
            elif vflags:
                if (stat.S_ISREG(mode)
                        and not (vflags & rpmhelper.RPMVERIFY_FILEDIGEST)
                        or (stat.S_ISLNK(mode)
                            and not (vflags & rpmhelper.RPMVERIFY_LINKTO))):
                    kind = 'initial'
            # Ignore failures trying to sha1 missing/inaccessible files as long
            # as those files are flagged initial contents (ghost)
            fileStream = files.FileFromFilesystem(fullPath,
                                                  pathId,
                                                  statBuf=fakestat,
                                                  sha1FailOk=True)
            if kind == 'config':
                fileStream.flags.isConfig(set=True)
            elif kind == 'initial':
                fileStream.flags.isInitialContents(set=True)
            else:
                assert kind == 'regular'

            # From conary.build.capsulepolicy.Payload
            if (isinstance(fileStream, files.RegularFile)
                    and not fileStream.flags.isConfig()
                    and not (fileStream.flags.isInitialContents()
                             and not fileStream.contents.size())):
                fileStream.flags.isEncapsulatedContent(set=True)

            fileId = fileStream.fileId()
            trv.addFile(pathId, path, trv.getVersion(), fileId)
            changeSet.addFile(None, fileId, fileStream.freeze())
            # Config file contents have to go into the database, so snag the
            # contents from the filesystem and put them in the changeset.
            if (fileStream.hasContents
                    and not fileStream.flags.isEncapsulatedContent()):
                if fileStream.contents.sha1() == sha1helper.sha1Empty:
                    # Missing/ghost config file. Hopefully it is supposed to be
                    # empty, but even if not then the fake SHA-1 will be the
                    # SHA-1 of the empty string since there's no hint of what
                    # it was supposed to be.
                    contents = filecontents.FromString('')
                else:
                    contents = filecontents.FromFilesystem(fullPath)
                changeSet.addFileContents(
                    pathId,
                    fileId,
                    contType=changeset.ChangedFileTypes.file,
                    contents=contents,
                    cfgFile=fileStream.flags.isConfig(),
                )
Пример #36
0
 def preProcess(self):
     m = self.recipe.macros
     self.builddir = m.builddir
     self.destdir = util.joinPaths(m.destdir, m.thisdocdir)
Пример #37
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        m = self.recipe.magic[path]
        if not m:
            return
        # FIXME: should be:
        #if (m.name == "ELF" or m.name == "ar") and \
        #   m.contents['hasDebug']):
        # but this has to wait until ewt writes debug detection
        # for archives as well as elf files
        if (m.name == "ELF" and m.contents['hasDebug']) or \
           (m.name == "ar"):
            oldmode = None
            fullpath = self.dm.destdir + path
            mode = os.lstat(fullpath)[stat.ST_MODE]
            if mode & 0600 != 0600:
                # need to be able to read and write the file to strip it
                oldmode = mode
                os.chmod(fullpath, mode | 0600)
            if self.debuginfo and m.name == 'ELF' and not path.endswith('.o'):

                dir = os.path.dirname(path)
                b = os.path.basename(path)
                if not b.endswith('.debug'):
                    b += '.debug'

                debuglibdir = '%(destdir)s%(debuglibdir)s' % self.dm + dir
                debuglibpath = util.joinPaths(debuglibdir, b)
                if os.path.exists(debuglibpath):
                    return

                self._openDb()
                if (_findProgPath(
                        self.macros.debugedit, self.db, self.recipe,
                        error=False) and _findProgPath(self.macros.strip,
                                                       self.db,
                                                       self.recipe,
                                                       error=False)):

                    # null-separated AND terminated list, so we need to throw
                    # away the last (empty) item before updating self.debugfiles
                    self.debugfiles |= set(
                        util.popen(
                            '%(debugedit)s -b %(topbuilddir)s -d %(debugsrcdir)s'
                            ' -l /dev/stdout ' % self.dm +
                            fullpath).read().split('\x00')[:-1])
                    util.mkdirChain(debuglibdir)
                    util.execute('%s -f %s %s' %
                                 (self.dm.strip, debuglibpath, fullpath))

            else:
                self._openDb()
                if m.name == 'ar' or path.endswith('.o'):
                    # just in case strip is eu-strip, which segfaults
                    # whenever it touches an ar archive, and seems to
                    # break some .o files
                    if _findProgPath(self.macros.strip_archive,
                                     self.db,
                                     self.recipe,
                                     error=False):
                        util.execute('%(strip_archive)s ' % self.dm + fullpath)
                else:
                    if _findProgPath(self.macros.strip,
                                     self.db,
                                     self.recipe,
                                     error=False):
                        util.execute('%(strip)s ' % self.dm + fullpath)

            del self.recipe.magic[path]
            if oldmode is not None:
                os.chmod(fullpath, oldmode)
Пример #38
0
    def install(self, flags, troveCs):
        ACTION_RESTORE = 1
        ACTION_SKIP = 2
        ACTION_CONFLICT = 3

        rc = SingleCapsuleOperation.install(self, flags, troveCs)
        if rc is None:
            # parent class thinks we should just ignore this troveCs; I'm
            # not going to argue with it (it's probably because the capsule
            # hasn't changed
            return None

        (oldTrv, trv) = rc
        trvInfo = troveCs.getNewNameVersionFlavor()
        oldTrvInfo = troveCs.getOldNameVersionFlavor()
        hasCapsule = troveCs.hasCapsule()

        # Updates the fsJob metadata for installing the current trove.
        # It assumes files are replaced on install, and complains if something
        # is in the way unless the appropriate flags are set. This is a very
        # much simplified version of FilesystemJob._singleTrove() which maps
        # out a complete install strategy for native packages. Note that
        # we walk all of the files in this trove, not just the new files
        # or the changed files, because RPM installs all of the files.
        toRestore = []

        changedByPathId = dict((x[0], x) for x in troveCs.getChangedFileList())

        # things which aren't change, new, or removed are unchanged
        unchangedByPathId = (set(x[0] for x in trv.iterFileList()) -
                             set(changedByPathId.iterkeys()) -
                             set(x[0] for x in troveCs.getNewFileList()) -
                             set(troveCs.getOldFileList()))

        l = []
        for oldFileInfo in troveCs.getChangedFileList():
            oldFileId, oldVersion = oldTrv.getFile(oldFileInfo[0])[1:3]
            l.append((oldFileInfo[0], oldFileId, oldVersion))

        for unchangedPathId in unchangedByPathId:
            unchangedFileId, unchangedFileVersion = \
                                    trv.getFile(unchangedPathId)[1:3]
            l.append((unchangedPathId, unchangedFileId, unchangedFileVersion))

        fileObjs = self.db.getFileVersions(l)
        fileObjsByPathId = dict([(x[0], y)
                                 for x, y in itertools.izip(l, fileObjs)])

        for fileInfo in trv.iterFileList():
            pathId, path, fileId, version = fileInfo

            if os.path.dirname(path) in self.netSharedPath:
                # we do nothing. really. nothing.
                #
                # we don't back it up. we don't mark it as removed in
                # our database. we don't look for conflicts. nothing.
                continue

            if pathId in changedByPathId:
                oldFileId = oldTrv.getFile(pathId)[1]
                fileChange = self.changeSet.getFileChange(oldFileId, fileId)
                if (oldFileId == fileId):
                    # only the version number changed; we don't need
                    # to merge anything here
                    fileObj = fileObjsByPathId[pathId]
                elif fileChange[0] == '\x01':
                    fileObj = fileObjsByPathId[pathId]
                    fileObj.twm(fileChange, fileObj)
                else:
                    fileObj = files.ThawFile(fileChange, pathId)
            elif pathId in unchangedByPathId:
                fileObj = fileObjsByPathId[pathId]
            else:
                # if it's not changed and it's not unchanged, it must be new
                fileStream = self.changeSet.getFileChange(None, fileId)
                fileObj = files.ThawFile(fileStream, pathId)

            absolutePath = util.joinPaths(self.root, path)

            if (fileObj.flags.isCapsuleAddition()):
                # this was added to the package outside of the RPM; we don't
                # have any responsibility for it
                continue
            elif (trove.conaryContents(hasCapsule, pathId, fileObj)
                  and fileObj.lsTag != 'd'):
                # this content isn't part of the capsule; remember to put
                # it back when RPM is done
                self.preservePath(path, unlink=True)
                continue

            s = util.lstat(absolutePath)
            if not s:
                # there is nothing in the way, so there is nothing which
                # concerns us here. Track the file for later.
                toRestore.append((fileInfo, fileObj))
                continue

            action = ACTION_CONFLICT

            existingOwners = list(
                self.db.iterFindPathReferences(path,
                                               justPresent=True,
                                               withStream=True))

            if existingOwners:
                # Don't complain about files owned by the previous version
                # of this trove.
                l = [x for x in existingOwners if x[0:3] == oldTrvInfo]
                if l:
                    existingOwners.remove(l[0])

                if not existingOwners:
                    action = ACTION_RESTORE
            elif stat.S_ISDIR(s.st_mode) and fileObj.lsTag == 'd':
                # Don't let existing directories stop us from taking over
                # ownership of the directory
                action = ACTION_RESTORE
            elif fileObj.flags.isInitialContents():
                # Initial contents files may be restored on top of things
                # already in the filesystem. They're ghosts or config files
                # and RPM will get the contents right either way, and we
                # should remove them either way.
                action = ACTION_RESTORE

            if action == ACTION_CONFLICT and not existingOwners:
                # Check for "conflicts" that might just be a view across a
                # symlink.
                if self.fsJob.findAliasedRemovals(absolutePath):
                    action = ACTION_RESTORE

            if action == ACTION_CONFLICT and existingOwners:
                if fileId in [x[4] for x in existingOwners]:
                    # The files share metadata same. Whatever it looks like on
                    # disk, RPM is going to blow it away with the new one.
                    for info in existingOwners:
                        self.fsJob.sharedFile(info[0], info[1], info[2],
                                              info[3])
                    action = ACTION_RESTORE
                elif path.startswith('/usr/share/doc/'):
                    # Mirror badness Red Hat patches into RPM for rhel4
                    # and rhel5
                    action = ACTION_RESTORE
                else:
                    existingFiles = [
                        files.ThawFile(x[5], pathId) for x in existingOwners
                    ]

                    compatibility = [
                        1 for x in existingFiles if fileObj.compatibleWith(x)
                    ]

                    if 1 in compatibility:
                        # files can be shared even though the fileId's
                        # are different
                        for info in existingOwners:
                            self.fsJob.sharedFile(info[0], info[1], info[2],
                                                  info[3])
                        action = ACTION_RESTORE
                    elif 1 in [
                            files.rpmFileColorCmp(x, fileObj)
                            for x in existingFiles
                    ]:
                        # rpm file colors and the default rpm setting for
                        # file color policy make elf64 files silently replace
                        # elf32 files. follow that behavior here.
                        #
                        # no, i'm not making this up
                        #
                        # yes, really
                        action = ACTION_SKIP
                    elif (self._checkReplaceManagedFiles(flags, path) or 1 in [
                            files.rpmFileColorCmp(fileObj, x)
                            for x in existingFiles
                    ]):
                        # The files are different. Bail unless we're supposed
                        # to replace managed files.
                        existingFile = files.FileFromFilesystem(
                            absolutePath, pathId)
                        for info in existingOwners:
                            self.fsJob.userRemoval(
                                fileObj=existingFile,
                                content=filecontents.FromFilesystem(
                                    absolutePath),
                                *info[0:4])
                        action = ACTION_RESTORE
                    else:
                        # it's not up to us to decide if this is a true
                        # conflict; the database layer will do that for
                        # us (see checkPathConflicts)
                        action = ACTION_RESTORE
            elif flags.replaceUnmanagedFiles:
                # we don't own it, but it's on disk. RPM will just write over
                # it and we have the flag saying we're good with that
                action = ACTION_RESTORE

            if action == ACTION_RESTORE:
                # We may proceed, and RPM will replace this file for us. We
                # need to track that it's being restored to avoid conflicts
                # with other restorations though.
                toRestore.append((fileInfo, fileObj))
            elif action == ACTION_CONFLICT:
                # The file exists already, we can't share it, and we're not
                # allowed to overwrite it.
                self._error(
                    errors.FileInWayError(util.normpath(path),
                                          troveCs.getName(),
                                          troveCs.getNewVersion(),
                                          troveCs.getNewFlavor()))
            else:
                assert (action == ACTION_SKIP)
                self.preservePath(path, unlink=False)
                self.fsJob.userRemoval(trv.getName(), trv.getVersion(),
                                       trv.getFlavor(), pathId)

        # toRestore is the list of what is going to be restored. We need to get
        # the fileObjects which will be created so we can track them in the
        # filesystem job. This lets the filesystem job look for resolveable
        # conflicts within this update. We handle newly created files first
        # and files which have changed (so we have to look up the diff)
        # a bit later.
        for fileInfo, fileObj in toRestore:
            fullPath = util.joinPaths(self.root, path)
            self.fsJob._restore(fileObj,
                                fullPath,
                                trvInfo,
                                "restoring %s from RPM",
                                restoreFile=False,
                                fileId=fileId)