Пример #1
0
    def getDbFile(self, dbtype):
        if dbtype != "primary":
            log.info2("Loading %s for %s...", dbtype, self.reponame)

        cachebase = os.path.join(self.config.cachedir, self.reponame)
        cachepath = os.path.join(self.config.cachedir, self.reponame, "sqlite")

        if not os.path.isdir(cachebase):
            os.makedirs(cachebase)
        if not os.path.isdir(cachepath):
            os.makedirs(cachepath)

        # check existing sqlite db
        dbfilename = os.path.join(cachepath, "%s.xml.gz.sqlite" % dbtype)
        if os.path.exists(dbfilename):
            try:
                csum, db = self.loadCache(dbfilename)
            except sqlite3.Error, e:
                log.error(e)
                csum = None
            if self.repomd.has_key(dbtype) and \
                   self.repomd[dbtype].has_key("checksum") and \
                   csum == self.repomd[dbtype]["checksum"]:
                setattr(self, "_%sdb" % dbtype, db)
                setattr(self, "_%sdb_cursor" % dbtype, db.cursor())
                return 1
Пример #2
0
def parseYumOptions(argv, yum):
    """Parse yum-like config options from argv to config.rpmconfig and RpmYum
    yum.

    Return a (possibly empty) list of non-option operands on success, None on
    error (if a help message should be written).  sys.exit () on --version,
    some invalid arguments or errors in config files."""

    # Argument parsing
    try:
        opts, args = getopt.getopt(argv, "?hvqc:r:yd:R:C", [
            "help", "verbose", "hash", "version", "quiet", "dbpath=", "root=",
            "installroot=", "force", "ignoresize", "ignorearch", "exactarch",
            "justdb", "test", "noconflicts", "fileconflicts", "nodeps",
            "signature", "noorder", "noscripts", "notriggers", "excludedocs",
            "excludeconfigs", "oldpackage", "autoerase", "autoeraseexclude=",
            "servicehack", "installpkgs=", "arch=", "archlist=",
            "checkinstalled", "rusage", "srpmdir=", "enablerepo=",
            "disablerepo=", "nocache", "cachedir=", "exclude=", "obsoletes",
            "noplugins", "diff", "verifyallconfig", "languages=",
            "releaseversion=", "disablerhn"
        ])
    except getopt.error, e:
        # FIXME: all to stderr
        log.error("Error parsing command-line arguments: %s", e)
        return None
Пример #3
0
 def readPrimary(self):
     # If we have either a local cache of the primary.xml.gz file or if
     # it is already local (nfs or local file system) we calculate it's
     # checksum and compare it with the one from repomd. If they are
     # the same we don't need to cache it again and can directly use it.
     if self.repomd.has_key("primary"):
         if not self.repomd["primary"].has_key("location"):
             print "Error primary has no location"
             return 0
         primary = self.repomd["primary"]["location"]
         #            if self.repomd["primary"].has_key("checksum"):
         #                (csum, destfile) = self.nc.checksum(primary, "sha")
         #                   csum == self.repomd["primary"]["checksum"]:
         #                filename = destfile
         #            else:
         filename = self.nc.cache(primary, 1)
         if not filename:
             print "Error can't find file for primary: " + primary
             return 0
         try:
             fd = PyGZIP(filename)
             ip = iterparse(fd, events=("start", "end"))
             ip = iter(ip)
         except IOError:
             log.error("Couldn't parse primary.xml")
             print "Error parsing primary.xml"
             return 0
         self._parse(ip)
     return 1
Пример #4
0
 def readPrimary(self):
     # If we have either a local cache of the primary.xml.gz file or if
     # it is already local (nfs or local file system) we calculate it's
     # checksum and compare it with the one from repomd. If they are
     # the same we don't need to cache it again and can directly use it.
     if self.repomd.has_key("primary"):
         if not self.repomd["primary"].has_key("location"):
             return 0
         primary = self.repomd["primary"]["location"]
         (csum, destfile) = self.nc.checksum(primary, "sha")
         if self.repomd["primary"].has_key("checksum") and \
                csum == self.repomd["primary"]["checksum"]:
             filename = destfile
         else:
             filename = self.nc.cache(primary, 1)
         if not filename:
             return 0
         try:
             fd = PyGZIP(filename)
             ip = iterparse(fd, events=("start","end"))
             ip = iter(ip)
         except IOError:
             log.error("Couldn't parse primary.xml")
             return 0
         self._parse(ip)
     return 1
Пример #5
0
    def getDbFile(self, dbtype):
        if dbtype != "primary":
            log.info2("Loading %s for %s...", dbtype, self.reponame)

        cachebase = os.path.join(self.config.cachedir, self.reponame)
        cachepath = os.path.join(self.config.cachedir, self.reponame, "sqlite")

        if not os.path.isdir(cachebase):
            os.makedirs(cachebase)
        if not os.path.isdir(cachepath):
            os.makedirs(cachepath)

        # check existing sqlite db
        dbfilename = os.path.join(cachepath, "%s.xml.gz.sqlite" % dbtype)
        if os.path.exists(dbfilename):
            try:
                csum, db = self.loadCache(dbfilename)
            except sqlite3.Error, e:
                log.error(e)
                csum = None
            if self.repomd.has_key(dbtype) and \
                   self.repomd[dbtype].has_key("checksum") and \
                   csum == self.repomd[dbtype]["checksum"]:
                setattr(self, "_%sdb" % dbtype, db)
                setattr(self, "_%sdb_cursor" % dbtype, db.cursor())
                return 1
Пример #6
0
def parseYumOptions(argv, yum):
    """Parse yum-like config options from argv to config.rpmconfig and RpmYum
    yum.

    Return a (possibly empty) list of non-option operands on success, None on
    error (if a help message should be written).  sys.exit () on --version,
    some invalid arguments or errors in config files."""

    # Argument parsing
    try:
      opts, args = getopt.getopt(argv, "?hvqc:r:yd:R:C",
        ["help", "verbose",
         "hash", "version", "quiet", "dbpath=", "root=", "installroot=",
         "force", "ignoresize", "ignorearch", "exactarch", "justdb", "test",
         "noconflicts", "fileconflicts", "nodeps", "signature", "noorder",
         "noscripts", "notriggers", "excludedocs", "excludeconfigs",
         "oldpackage", "autoerase", "autoeraseexclude=", "servicehack",
         "installpkgs=", "arch=", "archlist=", "checkinstalled", "rusage",
         "srpmdir=", "enablerepo=", "disablerepo=", "nocache", "cachedir=",
         "exclude=", "obsoletes", "noplugins", "diff", "verifyallconfig",
         "languages=", "releaseversion=", "disablerhn"])
    except getopt.error, e:
        # FIXME: all to stderr
        log.error("Error parsing command-line arguments: %s", e)
        return None
Пример #7
0
Файл: io.py Проект: kholia/pyrpm
 def getHeaderByIndexData(self, index, storedata):
     tag = index[0]
     # ignore duplicate entries as long as they are identical
     if self.hdr.has_key(tag):
         if self.hdr[tag] != self.__parseTag(index, storedata):
             log.error("%s: tag %d included twice", self.source, tag)
     else:
         self.hdr[tag] = self.__parseTag(index, storedata)
     return self.hdr[tag]
Пример #8
0
 def getHeaderByIndexData(self, index, storedata):
     tag = index[0]
     # ignore duplicate entries as long as they are identical
     if self.hdr.has_key(tag):
         if self.hdr[tag] != self.__parseTag(index, storedata):
             log.error("%s: tag %d included twice", self.source, tag)
     else:
         self.hdr[tag] = self.__parseTag(index, storedata)
     return self.hdr[tag]
Пример #9
0
def readRpmPackage(config, source, verify=None, hdronly=None,
                   db=None, tags=None):
    """Read RPM package from source and close it.

    tags, if defined, specifies tags to load.  Raise ValueError on invalid
    data, IOError."""

    pkg = package.RpmPackage(config, source, verify, hdronly, db)
    try:
        pkg.read(tags=tags)
        pkg.close()
    except (IOError, ValueError), e:
        log.error("%s: %s\n", pkg, e)
        return None
Пример #10
0
Файл: io.py Проект: kholia/pyrpm
    def getHeaderByIndex(self, idx, indexdata, storedata):
        """Parse value of tag idx.

        Return (tag number, tag data).  Raise ValueError on invalid data."""

        index = unpack("!4I", indexdata[idx*16:(idx+1)*16])
        tag = index[0]
        # ignore duplicate entries as long as they are identical
        if self.hdr.has_key(tag):
            if self.hdr[tag] != self.__parseTag(index, storedata):
                log.error("%s: tag %d included twice", self.source, tag)
        else:
            self.hdr[tag] = self.__parseTag(index, storedata)
        return (tag, self.hdr[tag])
Пример #11
0
    def getHeaderByIndex(self, idx, indexdata, storedata):
        """Parse value of tag idx.

        Return (tag number, tag data).  Raise ValueError on invalid data."""

        index = unpack("!4I", indexdata[idx * 16:(idx + 1) * 16])
        tag = index[0]
        # ignore duplicate entries as long as they are identical
        if self.hdr.has_key(tag):
            if self.hdr[tag] != self.__parseTag(index, storedata):
                log.error("%s: tag %d included twice", self.source, tag)
        else:
            self.hdr[tag] = self.__parseTag(index, storedata)
        return (tag, self.hdr[tag])
Пример #12
0
 def readPGPKeys(self):
     for url in self.key_urls:
         filename = self.nc.cache(url, 1)
         try:
             f = file(filename)
             key_data = f.read()
             f.close()
         except Exception, e:
             log.error("Error reading GPG key %s: %s", filename, e)
             continue
         try:
             key_data = openpgp.isolateASCIIArmor(key_data)
             keys = openpgp.parsePGPKeys(key_data)
         except Exception, e:
             log.error("Invalid GPG key %s: %s", url, e)
             continue
Пример #13
0
 def readRepoMD(self):
     # First we try and read the repomd file as a starting point.
     filename = self.nc.cache("repodata/repomd.xml", 1)
     if not filename:
         log.error("Couldn't open repomd.xml")
         return 0
     try:
         fd = open(filename)
         ip = iterparse(fd, events=("start", "end"))
         ip = iter(ip)
     except IOError:
         log.error("Couldn't parse repomd.xml")
         return 0
     # Create our network cache object
     self.repomd = self._parse(ip)
     return 1
Пример #14
0
 def readPGPKeys(self):
     for url in self.key_urls:
         filename = self.nc.cache(url, 1)
         try:
             f = file(filename)
             key_data = f.read()
             f.close()
         except Exception, e:
             log.error("Error reading GPG key %s: %s", filename, e)
             continue
         try:
             key_data = openpgp.isolateASCIIArmor(key_data)
             keys = openpgp.parsePGPKeys(key_data)
         except Exception, e:
             log.error("Invalid GPG key %s: %s", url, e)
             continue
Пример #15
0
 def readRepoMD(self):
     # First we try and read the repomd file as a starting point.
     filename = self.nc.cache("repodata/repomd.xml", 1)
     if not filename:
         log.error("Couldn't open repomd.xml")
         return 0
     try:
         fd = open(filename)
         ip = iterparse(fd, events=("start","end"))
         ip = iter(ip)
     except IOError:
         log.error("Couldn't parse repomd.xml")
         return 0
     # Create our network cache object
     self.repomd = self._parse(ip)
     return 1
Пример #16
0
    def createRepo(self):
        """Create repodata metadata for self.source.

        Return 1 on success, 0 on failure.  Assumes self.source is a local file
        system path without schema prefix."""

        import gzip, libxml2
        log.info1("Pass 1: Parsing package headers for file requires.")
        self.__readDir(self.source, "")
        filename = functions._uriToFilename(self.source)
        datapath = os.path.join(filename, "repodata")
        if not os.path.isdir(datapath):
            try:
                os.makedirs(datapath)
            except OSError, e:
                log.error("%s: Couldn't create repodata: %s", filename, e)
                return 0
Пример #17
0
    def readRpm(self, key, db, tags):
        pkg = RpmDBPackage(self.config, "dummy")
        pkg.reponame = "installed"
        pkg.key = key
        pkg.db = self
        data = db[key]
        try:
            val = unpack("I", key)[0]
        except struct.error:
            log.error("Invalid key %s in rpmdb", repr(key))
            return None

        if val == 0:
            return None

        try:
            (indexNo, storeSize) = unpack("!2I", data[0:8])
        except struct.error:
            log.error("Value for key %s in rpmdb is too short", repr(key))
            return None

        if len(data) < indexNo*16 + 8:
            log.error("Value for key %s in rpmdb is too short", repr(key))
            return None
        indexdata = unpack("!%sI" % (indexNo*4), data[8:indexNo*16+8])
        indexes = zip(indexdata[0::4], indexdata[1::4],
                      indexdata[2::4], indexdata[3::4])
        indexdata = {}
        for idx in indexes:
            if rpmtagname.has_key(idx[0]):
                indexdata[rpmtagname[idx[0]]] = idx
        pkg.indexdata = indexdata

        storedata = data[indexNo*16+8:]
        pkg["signature"] = {}

        # read the tags
        ok = self.readTags(pkg, tags, storedata)
        if not ok:
            return None

        # DON'T add epoch tag, we use pkg.getEpoch() to make it work properly
        #if not pkg.has_key("epoch"):
        #    pkg["epoch"] = [ 0 ]


        if pkg["name"] == "gpg-pubkey":
            return None  # FIXME
            try:
                keys = openpgp.parsePGPKeys(pkg["description"])
            except ValueError, e:
                log.error("Invalid key package %s: %s", pkg["name"], e)
                return None
            for k in keys:
                self.keyring.addKey(k)
            return None
Пример #18
0
    def createRepo(self):
        """Create repodata metadata for self.source.

        Return 1 on success, 0 on failure.  Assumes self.source is a local file
        system path without schema prefix."""

        import gzip, libxml2
        log.info1("Pass 1: Parsing package headers for file requires.")
        self.__readDir(self.source, "")
        filename = functions._uriToFilename(self.source)
        datapath = os.path.join(filename, "repodata")
        if not os.path.isdir(datapath):
            try:
                os.makedirs(datapath)
            except OSError, e:
                log.error("%s: Couldn't create repodata: %s", filename, e)
                return 0
Пример #19
0
def readRpmPackage(config,
                   source,
                   verify=None,
                   hdronly=None,
                   db=None,
                   tags=None):
    """Read RPM package from source and close it.

    tags, if defined, specifies tags to load.  Raise ValueError on invalid
    data, IOError."""

    pkg = package.RpmPackage(config, source, verify, hdronly, db)
    try:
        pkg.read(tags=tags)
        pkg.close()
    except (IOError, ValueError), e:
        log.error("%s: %s\n", pkg, e)
        return None
Пример #20
0
 def readComps(self):
     # Try to read a comps.xml file if there is any before we parse the
     # primary.xml
     if self.repomd.has_key("group"):
         if not self.repomd["group"].has_key("location"):
             log.error("Couldn't find proper location for comps.xml in repomd")
             return 0
         comps = self.repomd["group"]["location"]
         (csum, destfile) = self.nc.checksum(comps, "sha")
         if self.repomd["group"].has_key("checksum") and \
                csum == self.repomd["group"]["checksum"]:
             filename = destfile
         else:
             filename = self.nc.cache(comps, 1)
         if not filename:
             return 0
         try:
             self.comps = RpmCompsXML(self.config, filename)
             self.comps.read()
         except IOError:
             return 0
     return 1
Пример #21
0
 def readComps(self):
     # Try to read a comps.xml file if there is any before we parse the
     # primary.xml
     if self.repomd.has_key("group"):
         if not self.repomd["group"].has_key("location"):
             log.error(
                 "Couldn't find proper location for comps.xml in repomd")
             return 0
         comps = self.repomd["group"]["location"]
         (csum, destfile) = self.nc.checksum(comps, "sha")
         if self.repomd["group"].has_key("checksum") and \
                csum == self.repomd["group"]["checksum"]:
             filename = destfile
         else:
             filename = self.nc.cache(comps, 1)
         if not filename:
             return 0
         try:
             self.comps = RpmCompsXML(self.config, filename)
             self.comps.read()
         except IOError:
             return 0
     return 1
Пример #22
0
    def importFilelist(self):
        """Parse filelists.xml.gz if it was not parsed before.

        Return 1 on success, 0 on failure."""

        # We need to have successfully read a repo from one source before we
        # can import it's filelist.
        if not self.is_read:
            return 0
        if self.filelist_imported:
            return 1
        # Same as with primary.xml.gz: If we already have a local version and
        # it matches the checksum found in repomd then we don't need to
        # download it again.
        if self.repomd.has_key("filelists"):
            if not self.repomd["filelists"].has_key("location"):
                return 0
            filelists = self.repomd["filelists"]["location"]
            (csum, destfile) = self.nc.checksum(filelists, "sha")
            if self.repomd["filelists"].has_key("checksum") and \
                   csum == self.repomd["filelists"]["checksum"]:
                filename = destfile
            else:
                filename = self.nc.cache(filelists, 1)
            if not filename:
                return 0
            try:
                fd = PyGZIP(filename)
                ip = iterparse(fd, events=("start", "end"))
                ip = iter(ip)
            except IOError:
                log.error("Couldn't parse filelists.xml")
                return 0
            self._parse(ip)
            self.filelist_imported = 1
        return 1
Пример #23
0
    def readTags(self, pkg, tags, storedata=None):
        rpmio = io.RpmFileIO("dummy")

        if storedata is None:
            data = self.packages_db[pkg.key]
            storedata = data[len(pkg.indexdata)*16+8:]

        for tag in tags:
            if pkg.indexdata.has_key(tag):
                index = pkg.indexdata[tag]
            else:
                continue
            try:
                tagval = rpmio.getHeaderByIndexData(index, storedata)
            except ValueError, e:
                log.error("Invalid header entry %s in %s: %s", tag, index, e)
                return 0

            if tag.startswith("install_"):
                pkg["signature"][tag[8:]] = tagval

            if tag == "archivesize":
                pkg["signature"]["payloadsize"] = tagval
            pkg[tag] = tagval
Пример #24
0
    def importFilelist(self):
        """Parse filelists.xml.gz if it was not parsed before.

        Return 1 on success, 0 on failure."""

        # We need to have successfully read a repo from one source before we
        # can import it's filelist.
        if not self.is_read:
            return 0
        if self.filelist_imported:
            return 1
        # Same as with primary.xml.gz: If we already have a local version and
        # it matches the checksum found in repomd then we don't need to
        # download it again.
        if self.repomd.has_key("filelists"):
            if not self.repomd["filelists"].has_key("location"):
                return 0
            filelists = self.repomd["filelists"]["location"]
            (csum, destfile) = self.nc.checksum(filelists, "sha")
            if self.repomd["filelists"].has_key("checksum") and \
                   csum == self.repomd["filelists"]["checksum"]:
                filename = destfile
            else:
                filename = self.nc.cache(filelists, 1)
            if not filename:
                return 0
            try:
                fd = PyGZIP(filename)
                ip = iterparse(fd, events=("start","end"))
                ip = iter(ip)
            except IOError:
                log.error("Couldn't parse filelists.xml")
                return 0
            self._parse(ip)
            self.filelist_imported = 1
        return 1
Пример #25
0
class RhnChannelRepoDB(SqliteRepoDB):
    """
    Database for Red Hat Network repositories.
    """

    rhn_needed_headers = ['X-RHN-Server-Id',
                          'X-RHN-Auth-User-Id',
                          'X-RHN-Auth',
                          'X-RHN-Auth-Server-Time',
                          'X-RHN-Auth-Expire-Offset']

    def __init__(self, config, source, buildroot='', channelname='default', nc=None):
        self.http_headers = { }
        self.__setupRhnHttpHeaders()
        SqliteRepoDB.__init__(self, config, source, buildroot, channelname, nc)
        self.nc.setHeaders(self.http_headers, channelname)
        self.nc.setCallback(self.__ncCallback, channelname)
        self.authtime = time.time() + int(float(self.http_headers['X-RHN-Auth-Expire-Offset']))
        if self.http_headers['X-RHN-Auth-User-Id'] == '':
            self.http_headers['X-RHN-Auth-User-Id'] = '\nX-libcurl-Empty-Header-Workaround: *'
        self.http_headers['Pragma'] = 'no-cache'
        self.http_headers['X-RHN-Transport-Capability'] = 'follow-redirects=3'


    def __setupRhnHttpHeaders(self):
        """ Set up self.http_headers with needed RHN X-RHN-blah headers """

        try:
            li = up2dateAuth.getLoginInfo()
        except up2dateErrors.RhnServerException, e:
            raise yum.Errors.RepoError(str(e))

        # TODO:  do evalution on li auth times to see if we need to obtain a
        # new session...

        for header in RhnChannelRepoDB.rhn_needed_headers:
            if not li.has_key(header):
                log.error("Missing required login information for RHN: %s" % header)
                raise ValueError
            self.http_headers[header] = li[header]
        if self.http_headers['X-RHN-Auth-User-Id'] == '':
            self.http_headers['X-RHN-Auth-User-Id'] = '\nX-libcurl-Empty-Header-Workaround: *'
        self.http_headers['Pragma'] = 'no-cache'
        self.http_headers['X-RHN-Transport-Capability'] = 'follow-redirects=3'
Пример #26
0
def getFreeDiskspace(config, operations):
    """Check there is enough disk space for operations, a list of
    (operation, RpmPackage).

    Use RpmConfig config.  Return 1 if there is enough space (with 30 MB
    slack), 0 otherwise (after warning the user)."""

    if config.ignoresize:
        return 1
    freehash = {
    }  # device number => [currently counted free bytes, block size]
    # device number => [minimal encountered free bytes, block size]
    minfreehash = {}
    dirhash = {}  # directory name => device number
    mountpoint = {}  # device number => mount point path
    ret = 1

    if config.buildroot:
        br = config.buildroot
    else:
        br = "/"
    for (op, pkg) in operations:
        if op == OP_UPDATE or op == OP_INSTALL or op == OP_FRESHEN:
            try:
                pkg.reread(config.diskspacetags)
            except Exception, e:
                log.error("Error rereading package: %s: %s", pkg.source, e)
                return 0
        dirnames = pkg["dirnames"]
        if dirnames == None:
            continue
        for dirname in dirnames:
            while dirname[-1:] == "/" and len(dirname) > 1:
                dirname = dirname[:-1]
            if dirname in dirhash:
                continue
            dnames = []
            devname = br + dirname
            while 1:
                dnames.append(dirname)
                try:
                    dev = os.stat(devname).st_dev
                    break
                except OSError:
                    dirname = os.path.dirname(dirname)
                    devname = os.path.dirname(devname)
                    if dirname in dirhash:
                        dev = dirhash[dirname]
                        break
            for d in dnames:
                dirhash[d] = dev
            if dev not in freehash:
                statvfs = os.statvfs(devname)
                freehash[dev] = [statvfs[0] * statvfs[4], statvfs[0]]
                minfreehash[dev] = [statvfs[0] * statvfs[4], statvfs[0]]
            if dev not in mountpoint:
                fulldir = os.path.normpath(br + "/" + dirname)
                while len(fulldir) > 0:
                    if os.path.ismount(fulldir):
                        mountpoint[dev] = dirname
                        break
                    dirname = os.path.dirname(dirname)
                    fulldir = os.path.dirname(fulldir)
        dirindexes = pkg["dirindexes"]
        filesizes = pkg["filesizes"]
        filemodes = pkg["filemodes"]
        if not dirindexes or not filesizes or not filemodes:
            continue
        for i in xrange(len(dirindexes)):
            if not S_ISREG(filemodes[i]):
                continue
            dirname = dirnames[dirindexes[i]]
            while dirname[-1:] == "/" and len(dirname) > 1:
                dirname = dirname[:-1]
            dev = freehash[dirhash[dirname]]
            mdev = minfreehash[dirhash[dirname]]
            filesize = ((filesizes[i] / dev[1]) + 1) * dev[1]
            if op == OP_ERASE:
                dev[0] += filesize
            else:
                dev[0] -= filesize
            if mdev[0] > dev[0]:
                mdev[0] = dev[0]
        for (dev, val) in minfreehash.iteritems():
            # Less than 30MB space left on device?
            if val[0] < 31457280:
                log.debug1("%s: Less than 30MB of diskspace left on %s",
                           pkg.getNEVRA(), mountpoint[dev])
        pkg.close()
        pkg.clear(ntags=config.nevratags)
Пример #27
0
            if op == OP_ERASE:
                dev[0] += filesize
            else:
                dev[0] -= filesize
            if mdev[0] > dev[0]:
                mdev[0] = dev[0]
        for (dev, val) in minfreehash.iteritems():
            # Less than 30MB space left on device?
            if val[0] < 31457280:
                log.debug1("%s: Less than 30MB of diskspace left on %s",
                          pkg.getNEVRA(), mountpoint[dev])
        pkg.close()
        pkg.clear(ntags=config.nevratags)
    for (dev, val) in minfreehash.iteritems():
        if val[0] < 31457280:
            log.error("%sMB more diskspace required on %s for operation",
                      30 - val[0]/1024/1024, mountpoint[dev])
            ret = 0
    return ret

def int2str(val, binary=True):
    """Convert an integer to a string of the format X[.Y] [SI prefix]"""
    units = "kMGTPEZYND"
    #small_units = "munpfazy"
    if binary:
        divider = 1024
    else:
        divider = 1000

    if val>999:
        mantissa = float(val)
        exponent = -1
Пример #28
0
def getFreeDiskspace(config, operations):
    """Check there is enough disk space for operations, a list of
    (operation, RpmPackage).

    Use RpmConfig config.  Return 1 if there is enough space (with 30 MB
    slack), 0 otherwise (after warning the user)."""

    if config.ignoresize:
        return 1
    freehash = {} # device number => [currently counted free bytes, block size]
    # device number => [minimal encountered free bytes, block size]
    minfreehash = {}
    dirhash = {}                        # directory name => device number
    mountpoint = { }                    # device number => mount point path
    ret = 1

    if config.buildroot:
        br = config.buildroot
    else:
        br = "/"
    for (op, pkg) in operations:
        if op == OP_UPDATE or op == OP_INSTALL or op == OP_FRESHEN:
            try:
                pkg.reread(config.diskspacetags)
            except Exception, e:
                log.error("Error rereading package: %s: %s", pkg.source, e)
                return 0
        dirnames = pkg["dirnames"]
        if dirnames == None:
            continue
        for dirname in dirnames:
            while dirname[-1:] == "/" and len(dirname) > 1:
                dirname = dirname[:-1]
            if dirname in dirhash:
                continue
            dnames = []
            devname = br + dirname
            while 1:
                dnames.append(dirname)
                try:
                    dev = os.stat(devname).st_dev
                    break
                except OSError:
                    dirname = os.path.dirname(dirname)
                    devname = os.path.dirname(devname)
                    if dirname in dirhash:
                        dev = dirhash[dirname]
                        break
            for d in dnames:
                dirhash[d] = dev
            if dev not in freehash:
                statvfs = os.statvfs(devname)
                freehash[dev] = [statvfs[0] * statvfs[4], statvfs[0]]
                minfreehash[dev] = [statvfs[0] * statvfs[4], statvfs[0]]
            if dev not in mountpoint:
                fulldir = os.path.normpath(br+"/"+dirname)
                while len(fulldir) > 0:
                    if os.path.ismount(fulldir):
                        mountpoint[dev] = dirname
                        break
                    dirname = os.path.dirname(dirname)
                    fulldir = os.path.dirname(fulldir)
        dirindexes = pkg["dirindexes"]
        filesizes = pkg["filesizes"]
        filemodes = pkg["filemodes"]
        if not dirindexes or not filesizes or not filemodes:
            continue
        for i in xrange(len(dirindexes)):
            if not S_ISREG(filemodes[i]):
                continue
            dirname = dirnames[dirindexes[i]]
            while dirname[-1:] == "/" and len(dirname) > 1:
                dirname = dirname[:-1]
            dev = freehash[dirhash[dirname]]
            mdev = minfreehash[dirhash[dirname]]
            filesize = ((filesizes[i] / dev[1]) + 1) * dev[1]
            if op == OP_ERASE:
                dev[0] += filesize
            else:
                dev[0] -= filesize
            if mdev[0] > dev[0]:
                mdev[0] = dev[0]
        for (dev, val) in minfreehash.iteritems():
            # Less than 30MB space left on device?
            if val[0] < 31457280:
                log.debug1("%s: Less than 30MB of diskspace left on %s",
                          pkg.getNEVRA(), mountpoint[dev])
        pkg.close()
        pkg.clear(ntags=config.nevratags)
Пример #29
0
    # -----------------------------------------------------------------------

    if install_flag == 1:
        operation = pyrpm.RpmResolver.OP_INSTALL
    elif update_flag == 1:
        operation = pyrpm.RpmResolver.OP_UPDATE
    else: # erase_flag
        operation = pyrpm.RpmResolver.OP_ERASE

    instlist = pyrpm.RpmResolver(installed, operation)

    for r in rpms:
        ret = instlist.append(r)
            
        if ret == pyrpm.RpmResolver.ALREADY_INSTALLED:
            log.error("Package %s is already installed", r.getNEVRA())
            sys.exit(ret)
        elif ret == pyrpm.RpmResolver.OLD_PACKAGE:
            log.info1("%s: A newer package is already installed",
                        r.getNEVRA())
        elif ret == pyrpm.RpmResolver.NOT_INSTALLED:
            log.error("Package %s is not installed", r.getNEVRA())
            sys.exit(ret)
        elif ret == pyrpm.RpmResolver.UPDATE_FAILED:
            log.error("Update of %s failed", r.getNEVRA())
            sys.exit(ret)
        elif ret == pyrpm.RpmResolver.OBSOLETE_FAILED:
            log.error("%s: Uninstall of obsolete failed", r.getNEVRA())
            sys.exit(ret)
        # else: all ok
Пример #30
0
        try:
            if "basenames" in tags and pkg.has_key("oldfilenames"):
                pkg.generateFileNames()
            if "providename" in tags:
                pkg["provides"] = pkg.getProvides()
            if "requirename" in tags:
                pkg["requires"] = pkg.getRequires()
            if "obsoletename" in tags:
                pkg["obsoletes"] = pkg.getObsoletes()
            if "conflictname" in tags:
                pkg["conflicts"] = pkg.getConflicts()
            if "triggername" in tags:
                pkg["triggers"] = pkg.getTriggers()
        except ValueError, e:
            log.error("Error in package %s: %s", pkg.getNEVRA(), e)
            return 0
        return 1

    def write(self):
        return 1

    def addPkg(self, pkg):
        self.basenames_cache.clear()
        result = self._addPkg(pkg)
        if result and pkg["obsoletes"] and self.obsoletes_list is not None:
            p = self.getPkgById(result)
            self.obsoletes_list.addPkg(p)
        return bool(result)

    def _addPkg(self, pkg):
Пример #31
0
            if op == OP_ERASE:
                dev[0] += filesize
            else:
                dev[0] -= filesize
            if mdev[0] > dev[0]:
                mdev[0] = dev[0]
        for (dev, val) in minfreehash.iteritems():
            # Less than 30MB space left on device?
            if val[0] < 31457280:
                log.debug1("%s: Less than 30MB of diskspace left on %s",
                           pkg.getNEVRA(), mountpoint[dev])
        pkg.close()
        pkg.clear(ntags=config.nevratags)
    for (dev, val) in minfreehash.iteritems():
        if val[0] < 31457280:
            log.error("%sMB more diskspace required on %s for operation",
                      30 - val[0] / 1024 / 1024, mountpoint[dev])
            ret = 0
    return ret


def int2str(val, binary=True):
    """Convert an integer to a string of the format X[.Y] [SI prefix]"""
    units = "kMGTPEZYND"
    #small_units = "munpfazy"
    if binary:
        divider = 1024
    else:
        divider = 1000

    if val > 999:
        mantissa = float(val)