Exemple #1
0
    def _cache_channel_packages_short(self, channel_id, key, last_modified):
        """ Caches the short package entries for channel_id """
        # Create a temporary file
        temp_stream = tempfile.TemporaryFile()
        # Always compress the result
        compress_level = 5
        stream = gzip.GzipFile(None, "wb", compress_level, temp_stream)
        writer = xmlWriter.XMLWriter(stream=stream)

        # Fetch packages
        h = rhnSQL.prepare(self._query_get_channel_packages)
        h.execute(channel_id=channel_id)
        package_ids = h.fetchall_dict() or []
        # Sort packages
        package_ids.sort(lambda a, b: cmp(a['package_id'], b['package_id']))

        dumper = SatelliteDumper(writer,
                                 ShortPackagesDumper(writer, package_ids))
        dumper.dump()
        writer.flush()
        # We're done with the stream object
        stream.close()
        del stream
        temp_stream.seek(0, 0)
        # Set the value in the cache. We don't recompress the result since
        # it's already compressed
        rhnCache.set(key,
                     temp_stream.read(),
                     modified=last_modified,
                     compressed=0,
                     raw=1)
        return self._normalize_compressed_stream(temp_stream)
Exemple #2
0
    def test_opening_uncompressed_data_as_compressed(self):
      "Should return None, opening uncompressed data as compressed"
      rhnCache.set(self.key, self.content, raw=1)

      self.assertEqual(None, rhnCache.get(self.key, compressed=1, raw=1))

      self._cleanup(self.key)
Exemple #3
0
    def test_opening_uncompressed_data_as_compressed(self):
      "Should return None, opening uncompressed data as compressed"
      rhnCache.set(self.key, self.content, raw=1)

      self.assertEqual(None, rhnCache.get(self.key, compressed=1, raw=1))

      self._cleanup(self.key)
Exemple #4
0
    def test_opening_raw_data_as_pickled(self):
      "Should return None, opening uncompressed data as compressed"
      rhnCache.set(self.key, "12345", raw=1)

      self.assertEqual(None, rhnCache.get(self.key, raw=0))

      self._cleanup(self.key)
Exemple #5
0
def get(name, modified=None, raw=None, compressed=None):
    # Check to see if the entry is in the database, with the right version
    h = _fetch_cursor(key=name, modified=modified)

    row = h.fetchone_dict()

    if not row:
        # Key not found
        return None

    if modified and row['delta'] != 0:
        # Different version
        log_debug(4, "database cache: different version")
        return None

    if modified is None:
        # The caller doesn't care about the modified time, but we do, since we
        # want to fetch the same version from the disk cache
        modified = row['modified']

    if rhnCache.has_key(name, modified):
        # We have the value
        log_debug(4, "Filesystem cache HIT")
        return rhnCache.get(name, modified=modified, raw=raw)

    log_debug(4, "Filesystem cache MISS")

    # The disk cache doesn't have this key at all, or it's a modified value
    # Fetch the value from the database

    v = row['value']
    # Update the accessed field
    rhnSQL.Procedure("rhn_cache_update_accessed")(name)

    if compressed:
        io = cStringIO.StringIO()

        io.write(rhnSQL.read_lob(v))
        io.seek(0, 0)

        # XXX For about 40M of compressed data sometimes we get:
        # zlib.error: Error -3 while decompressing: incomplete dynamic bit lengths tree
        v = gzip.GzipFile(None, "r", 0, io)

    try:
        data = v.read()
    except (ValueError, IOError, gzip.zlib.error) as e:
        # XXX poking at gzip.zlib may not be that well-advised
        log_error("rhnDatabaseCache: gzip error for key %s: %s" % (name, e))
        # Ignore this entry in the database cache, it has invalid data
        return None

    # We store the data in the database cache, in raw format
    rhnCache.set(name, data, modified=modified, raw=1)

    # Unpickle the data, unless raw access was requested
    if not raw:
        return cPickle.loads(data)

    return data
Exemple #6
0
    def test_opening_raw_data_as_pickled(self):
      "Should return None, opening uncompressed data as compressed"
      rhnCache.set(self.key, "12345", raw=1)

      self.assertEqual(None, rhnCache.get(self.key, raw=0))

      self._cleanup(self.key)
Exemple #7
0
    def _cache_channel_packages_short(self, channel_id, key, last_modified):
        """ Caches the short package entries for channel_id """
        # Create a temporary file
        temp_stream = tempfile.TemporaryFile()
        # Always compress the result
        compress_level = 5
        stream = gzip.GzipFile(None, "wb", compress_level, temp_stream)
        writer = xmlWriter.XMLWriter(stream=stream)

        # Fetch packages
        h = rhnSQL.prepare(self._query_get_channel_packages)
        h.execute(channel_id=channel_id)
        package_ids = h.fetchall_dict() or []
        # Sort packages
        package_ids.sort(lambda a, b: cmp(a['package_id'], b['package_id']))

        dumper = SatelliteDumper(writer,
                                 ShortPackagesDumper(writer, package_ids))
        dumper.dump()
        writer.flush()
        # We're done with the stream object
        stream.close()
        del stream
        temp_stream.seek(0, 0)
        # Set the value in the cache. We don't recompress the result since
        # it's already compressed
        rhnCache.set(key, temp_stream.read(), modified=last_modified,
                     compressed=0, raw=1)
        return self._normalize_compressed_stream(temp_stream)
Exemple #8
0
    def test_cache_5(self):
        content = self.content * 10
        timestamp = '20041110001122'

        self._cleanup(self.key)
        rhnCache.set(self.key, content, modified=timestamp)

        self.failUnless(rhnCache.has_key(self.key))
        self.failUnless(rhnCache.has_key(self.key, modified=timestamp))
        self.failIf(rhnCache.has_key(self.key, modified='20001122112233'))
        self._cleanup(self.key)
    def test_cache_5(self):
        content = self.content * 10
        timestamp = '20041110001122'

        self._cleanup(self.key)
        rhnCache.set(self.key, content, modified=timestamp)

        self.failUnless(rhnCache.has_key(self.key))
        self.failUnless(rhnCache.has_key(self.key, modified=timestamp))
        self.failIf(rhnCache.has_key(self.key, modified='20001122112233'))
        self._cleanup(self.key)
Exemple #10
0
    def test_cache_5(self):
        content = self.content * 10
        timestamp = '20041110001122'

        self._cleanup(self.key)
        rhnCache.set(self.key, content, modified=timestamp)

        self.assertTrue(rhnCache.has_key(self.key))
        self.assertTrue(rhnCache.has_key(self.key, modified=timestamp))
        self.assertFalse(rhnCache.has_key(self.key, modified='20001122112233'))
        self._cleanup(self.key)
Exemple #11
0
    def _test(self, key, content, **modifiers):
        # Blow it away
        rhnCache.CACHEDIR = '/tmp/rhn'
        self._cleanup(key)
        rhnCache.set(key, content, **modifiers)
        self.failUnless(rhnCache.has_key(key))
        content2 = rhnCache.get(key, **modifiers)
        self.assertEqual(content, content2)

        self._cleanup(key)
        self.failIf(rhnCache.has_key(key))
        return (key, content)
Exemple #12
0
    def _test(self, key, content, **modifiers):
        # Blow it away
        rhnCache.CACHEDIR = '/tmp/rhn'
        self._cleanup(key)
        rhnCache.set(key, content, **modifiers)
        self.failUnless(rhnCache.has_key(key))
        content2 = rhnCache.get(key, **modifiers)
        self.assertEqual(content, content2)

        self._cleanup(key)
        self.failIf(rhnCache.has_key(key))
        return (key, content)
Exemple #13
0
 def cache_set(self, object_id, value, timestamp=None):
     # Get the key
     key = self._get_key(object_id)
     return rhnCache.set(key,
                         value,
                         modified=timestamp,
                         raw=0,
                         compressed=self._compressed)
Exemple #14
0
    def no_test_as_streams_1(self):
        "Tests storing and retrieval as streams"
        t = tempfile.TemporaryFile()
        content = self.content * 100
        t.write(content)
        t.seek(0, 0)

        self._cleanup(self.key)
        rhnCache.set(self.key, None, raw=1, stream=t)
        self.failUnless(rhnCache.has_key(self.key))

        ss = rhnCache.get(self.key, as_stream=1)
        self.failUnless(hasattr(ss, "read"))
        content2 = ss.read()

        self.assertEquals(content, content2)
        self._cleanup(self.key)
Exemple #15
0
    def _getHeaderFromFile(self, filePath, stat_info=None):
        """ Wraps around common.rhnRepository's method, adding a caching layer
        If stat_info was already passed, don't re-stat the file
        """
        log_debug(3, filePath)
        if not CFG.CACHE_PACKAGE_HEADERS:
            return rhnRepository.Repository._getHeaderFromFile(
                self, filePath, stat_info=stat_info)
        # Ignore stat_info for now - nobody sets it anyway
        stat_info = None
        try:
            stat_info = os.stat(filePath)
        except:
            raise_with_tb(
                rhnFault(
                    17,
                    "Unable to read package %s" % os.path.basename(filePath)),
                sys.exc_info()[2])
        lastModified = stat_info[stat.ST_MTIME]

        # OK, file exists, check the cache
        cache_key = os.path.normpath("headers/" + filePath)
        header = rhnCache.get(cache_key,
                              modified=lastModified,
                              raw=1,
                              compressed=1)
        if header:
            # We're good to go
            log_debug(2, "Header cache HIT for %s" % filePath)
            extra_headers = {
                'X-RHN-Package-Header': os.path.basename(filePath),
            }
            self._set_last_modified(lastModified, extra_headers=extra_headers)
            return header
        log_debug(3, "Header cache MISS for %s" % filePath)
        header = rhnRepository.Repository._getHeaderFromFile(
            self, filePath, stat_info=stat_info)
        if header:
            rhnCache.set(cache_key,
                         header,
                         modified=lastModified,
                         raw=1,
                         compressed=1)
        return header
Exemple #16
0
 def cache_set(self, params, value):
     log_debug(4, params)
     last_modified = self._get_last_modified(params)
     key = self._get_key(params)
     if not self.use_database_cache:
         set_cache = rhnCache.set(key, value, modified=last_modified, \
                     raw=1, user='******', group='apache', mode=0755)
         return set_cache
     return rhnDatabaseCache.set(key, value, modified=last_modified, raw=1,
         compressed=1)
Exemple #17
0
 def cache_set(self, params, value):
     log_debug(4, params)
     last_modified = self._get_last_modified(params)
     key = self._get_key(params)
     user = '******'
     group = 'apache'
     if rhnLib.isSUSE():
         user = '******'
         group = 'www'
     return rhnCache.set(key, value, modified=last_modified,
                         raw=1, user=user, group=group, mode=int('0755', 8))
Exemple #18
0
 def cache_set(self, params, value):
     log_debug(4, params)
     last_modified = self._get_last_modified(params)
     key = self._get_key(params)
     user = '******'
     group = 'apache'
     if rhnLib.isSUSE():
         user = '******'
         group = 'www'
     return rhnCache.set(key, value, modified=last_modified,
                         raw=1, user=user, group=group, mode=int('0755', 8))
Exemple #19
0
 def cache_set(self, params, value):
     log_debug(4, params)
     last_modified = self._get_last_modified(params)
     key = self._get_key(params)
     return rhnCache.set(key,
                         value,
                         modified=last_modified,
                         raw=1,
                         user='******',
                         group='apache',
                         mode=0755)
Exemple #20
0
    def _getHeaderFromFile(self, filePath, stat_info=None):
        """ Wraps around common.rhnRepository's method, adding a caching layer
        If stat_info was already passed, don't re-stat the file
        """
        log_debug(3, filePath)
        if not CFG.CACHE_PACKAGE_HEADERS:
            return rhnRepository.Repository._getHeaderFromFile(self, filePath,
                                                               stat_info=stat_info)
        # Ignore stat_info for now - nobody sets it anyway
        stat_info = None
        try:
            stat_info = os.stat(filePath)
        except:
            raise rhnFault(17, "Unable to read package %s"
                               % os.path.basename(filePath)), None, sys.exc_info()[2]
        lastModified = stat_info[stat.ST_MTIME]

        # OK, file exists, check the cache
        cache_key = os.path.normpath("headers/" + filePath)
        header = rhnCache.get(cache_key, modified=lastModified, raw=1,
                              compressed=1)
        if header:
            # We're good to go
            log_debug(2, "Header cache HIT for %s" % filePath)
            extra_headers = {
                'X-RHN-Package-Header': os.path.basename(filePath),
            }
            self._set_last_modified(lastModified, extra_headers=extra_headers)
            return header
        log_debug(3, "Header cache MISS for %s" % filePath)
        header = rhnRepository.Repository._getHeaderFromFile(self, filePath,
                                                             stat_info=stat_info)
        if header:
            rhnCache.set(cache_key, header, modified=lastModified, raw=1,
                         compressed=1)
        return header
Exemple #21
0
    def poll_packages(self, release, server_arch, timestamp=0, uuid=None):
        log_debug(1, release, server_arch, timestamp, uuid)

        # make sure we're dealing with strings here
        release = str(release)
        server_arch = rhnLib.normalize_server_arch(server_arch)
        timestamp = str(timestamp)
        uuid = str(uuid)

        # get a list of acceptable channels
        channel_list = []

        channel_list = rhnChannel.applet_channels_for_uuid(uuid)

        # it's possible the tie between uuid and rhnServer.id wasn't yet
        # made, default to normal behavior
        if not channel_list:
            channel_list = rhnChannel.get_channel_for_release_arch(
                release, server_arch)
            channel_list = [channel_list]
        # bork if no channels returned
        if not channel_list:
            log_debug(
                8, "No channels for release = '%s', arch = '%s', uuid = '%s'" %
                (release, server_arch, uuid))
            return {'last_modified': 0, 'contents': []}

        last_channel_changed_ts = max(
            [a["last_modified"] for a in channel_list])

        # make satellite content override a cache caused by hosted
        last_channel_changed_ts = str(LongType(last_channel_changed_ts) + 1)

        # gotta be careful about channel unsubscriptions...
        client_cache_invalidated = None

        # we return rhnServer.channels_changed for each row
        # in the satellite case, pluck it off the first...
        if "server_channels_changed" in channel_list[0]:
            sc_ts = channel_list[0]["server_channels_changed"]

            if sc_ts and (sc_ts >= last_channel_changed_ts):
                client_cache_invalidated = 1

        if (last_channel_changed_ts <=
                timestamp) and (not client_cache_invalidated):
            # XXX: I hate these freaking return codes that return
            # different members in the dictinary depending on what
            # sort of data you get
            log_debug(3, "Client has current data")
            return {'use_cached_copy': 1}

        # we'll have to return something big - compress
        rhnFlags.set("compress_response", 1)

        # Mark the response as being already XMLRPC-encoded
        rhnFlags.set("XMLRPC-Encoded-Response", 1)

        # next, check the cache if we have something with this timestamp
        label_list = [str(a["id"]) for a in channel_list]
        label_list.sort()
        log_debug(4, "label_list", label_list)
        cache_key = "applet-poll-%s" % string.join(label_list, "-")

        ret = rhnCache.get(cache_key, last_channel_changed_ts)
        if ret:  # we have a good entry with matching timestamp
            log_debug(3, "Cache HIT for", cache_key)
            return ret

        # damn, need to do some real work from chip's requirements:
        # The package list should be an array of hashes with the keys
        # nvre, name, version, release, epoch, errata_advisory,
        # errata_id, with the errata fields being empty strings if the
        # package isn't from an errata.
        ret = {'last_modified': last_channel_changed_ts, 'contents': []}

        # we search for packages only in the allowed channels - build
        # the SQL helper string and dictionary to make the foo IN (
        # list ) constructs use bind variables
        qlist = []
        qdict = {}
        for c in channel_list:
            v = c["id"]
            k = "channel_%s" % v
            qlist.append(":%s" % k)
            qdict[k] = v
        qlist = string.join(qlist, ", ")

        # This query is kind of big. One of these days I'm gonna start
        # pulling them out and transforming them into views. We can
        # also simulate this using several functions exposed out of
        # rhnChannel, but there is no difference in speed because we
        # need to do more than one query; besides, we cache the hell
        # out of it
        h = rhnSQL.prepare("""
        select distinct
            pn.name,
            pe.version,
            pe.release,
            pe.epoch,
            e_sq.errata_advisory,
            e_sq.errata_synopsis,
            e_sq.errata_id
        from
            rhnPackageName pn,
            rhnPackageEVR pe,
            rhnChannelNewestPackage cnp
        left join
            (   select  sq_e.id as errata_id,
                        sq_e.synopsis as errata_synopsis,
                        sq_e.advisory as errata_advisory,
                        sq_ep.package_id
                from
                        rhnErrata sq_e,
                        rhnErrataPackage sq_ep,
                        rhnChannelErrata sq_ce
                where   sq_ce.errata_id = sq_ep.errata_id
                        and sq_ce.errata_id = sq_e.id
                        and sq_ce.channel_id in ( %s )
            ) e_sq
          on cnp.package_id = e_sq.package_id
        where
            cnp.channel_id in ( %s )
        and cnp.name_id = pn.id
        and cnp.evr_id = pe.id
        """ % (qlist, qlist))
        h.execute(**qdict)

        plist = h.fetchall_dict()

        if not plist:
            # We've set XMLRPC-Encoded-Response above
            ret = xmlrpclib.dumps((ret, ), methodresponse=1)
            return ret

        contents = {}

        for p in plist:
            for k in list(p.keys()):
                if p[k] is None:
                    p[k] = ""
            p["nevr"] = "%s-%s-%s:%s" % (p["name"], p["version"], p["release"],
                                         p["epoch"])
            p["nvr"] = "%s-%s-%s" % (p["name"], p["version"], p["release"])

            pkg_name = p["name"]

            if pkg_name in contents:
                stored_pkg = contents[pkg_name]

                s = [
                    stored_pkg["name"], stored_pkg["version"],
                    stored_pkg["release"], stored_pkg["epoch"]
                ]

                n = [p["name"], p["version"], p["release"], p["epoch"]]

                log_debug(7, "comparing vres", s, n)
                if rhn_rpm.nvre_compare(s, n) < 0:
                    log_debug(7, "replacing %s with %s" % (pkg_name, p))
                    contents[pkg_name] = p
                else:
                    # already have a higher vre stored...
                    pass
            else:
                log_debug(7, "initial store for %s" % pkg_name)
                contents[pkg_name] = p

        ret["contents"] = list(contents.values())

        # save it in the cache
        # We've set XMLRPC-Encoded-Response above
        ret = xmlrpclib.dumps((ret, ), methodresponse=1)
        rhnCache.set(cache_key, ret, last_channel_changed_ts)

        return ret
Exemple #22
0
        # XXX For about 40M of compressed data sometimes we get:
        # zlib.error: Error -3 while decompressing: incomplete dynamic bit lengths tree
        v = gzip.GzipFile(None, "r", 0, io)

    try:
        data = v.read()
    except (ValueError, IOError, gzip.zlib.error), e:
        # XXX poking at gzip.zlib may not be that well-advised
        log_error("rhnDatabaseCache: gzip error for key %s: %s" % (
            name, e))
        # Ignore this entry in the database cache, it has invalid data
        return None

    # We store the data in the database cache, in raw format
    rhnCache.set(name, data, modified=modified, raw=1)
    
    # Unpickle the data, unless raw access was requested
    if not raw:
        return cPickle.loads(data)

    return data


def delete(name):
    # Uses the stored procedure. Quite simple
    rhnSQL.Procedure("rhn_cache_delete")(name)
    # Delete it from the disk cache too, just in case
    rhnCache.delete(name)
    
# We only set the database cache value
Exemple #23
0
    def poll_packages(self, release, server_arch, timestamp = 0, uuid = None):
        log_debug(1, release, server_arch, timestamp, uuid)

        # make sure we're dealing with strings here
        release = str(release)
        server_arch = rhnLib.normalize_server_arch(server_arch)
        timestamp = str(timestamp)
        uuid = str(uuid)

        # get a list of acceptable channels
        channel_list = []

        channel_list = rhnChannel.applet_channels_for_uuid(uuid)

        # it's possible the tie between uuid and rhnServer.id wasn't yet
        # made, default to normal behavior
        if not channel_list:
            channel_list = rhnChannel.get_channel_for_release_arch(release,
                                                                   server_arch)
            channel_list = [channel_list]
        # bork if no channels returned
        if not channel_list:
            log_debug(8, "No channels for release = '%s', arch = '%s', uuid = '%s'" % (
                release, server_arch, uuid))
            return { 'last_modified' : 0, 'contents' : [] }

        last_channel_changed_ts = max(map(lambda a: a["last_modified"], channel_list))

        # make satellite content override a cache caused by hosted
        last_channel_changed_ts = str(long(last_channel_changed_ts) + 1)

        # gotta be careful about channel unsubscriptions...
        client_cache_invalidated = None

        # we return rhnServer.channels_changed for each row
        # in the satellite case, pluck it off the first...
        if channel_list[0].has_key("server_channels_changed"):
           sc_ts = channel_list[0]["server_channels_changed"]

           if sc_ts and (sc_ts >= last_channel_changed_ts):
               client_cache_invalidated = 1


        if (last_channel_changed_ts <= timestamp) and (not client_cache_invalidated):
            # XXX: I hate these freaking return codes that return
            # different members in the dictinary depending on what
            # sort of data you get
            log_debug(3, "Client has current data")
            return { 'use_cached_copy' : 1 }

        # we'll have to return something big - compress
        rhnFlags.set("compress_response", 1)

        # Mark the response as being already XMLRPC-encoded
        rhnFlags.set("XMLRPC-Encoded-Response", 1)

        # next, check the cache if we have something with this timestamp
        label_list = map(lambda a: str(a["id"]), channel_list)
        label_list.sort()
        log_debug(4, "label_list", label_list)
        cache_key = "applet-poll-%s" % string.join(label_list, "-")

        ret = rhnCache.get(cache_key, last_channel_changed_ts)
        if ret: # we have a good entry with matching timestamp
            log_debug(3, "Cache HIT for", cache_key)
            return ret

        # damn, need to do some real work from chip's requirements:
        # The package list should be an array of hashes with the keys
        # nvre, name, version, release, epoch, errata_advisory,
        # errata_id, with the errata fields being empty strings if the
        # package isn't from an errata.
        ret = { 'last_modified' : last_channel_changed_ts, 'contents' : [] }

        # we search for packages only in the allowed channels - build
        # the SQL helper string and dictionary to make the foo IN (
        # list ) constructs use bind variables
        qlist = []
        qdict = {}
        for c in channel_list:
            v = c["id"]
            k = "channel_%s" % v
            qlist.append(":%s" % k)
            qdict[k] = v
        qlist = string.join(qlist, ", ")

        # This query is kind of big. One of these days I'm gonna start
        # pulling them out and transforming them into views. We can
        # also simulate this using several functions exposed out of
        # rhnChannel, but there is no difference in speed because we
        # need to do more than one query; besides, we cache the hell
        # out of it
        h = rhnSQL.prepare("""
        select distinct
            pn.name,
            pe.version,
            pe.release,
            pe.epoch,
            e_sq.errata_advisory,
            e_sq.errata_synopsis,
            e_sq.errata_id
        from
            rhnPackageName pn,
            rhnPackageEVR pe,
            rhnChannelNewestPackage cnp
        left join
	    (	select	sq_e.id as errata_id,
			sq_e.synopsis as errata_synopsis,
			sq_e.advisory as errata_advisory,
			sq_ep.package_id
		from
			rhnErrata sq_e,
			rhnErrataPackage sq_ep,
			rhnChannelErrata sq_ce
		where	sq_ce.errata_id = sq_ep.errata_id
			and sq_ce.errata_id = sq_e.id
			and sq_ce.channel_id in ( %s )
	    ) e_sq
          on cnp.package_id = e_sq.package_id
        where
            cnp.channel_id in ( %s )
        and cnp.name_id = pn.id
        and cnp.evr_id = pe.id
        """ % (qlist, qlist))
        h.execute(**qdict)

        plist = h.fetchall_dict()

        if not plist:
            # We've set XMLRPC-Encoded-Response above
            ret = xmlrpclib.dumps((ret, ), methodresponse=1)
            return ret

        contents = {}

        for p in plist:
            for k in p.keys():
                if p[k] is None:
                    p[k] = ""
            p["nevr"] = "%s-%s-%s:%s" % (
                 p["name"], p["version"], p["release"], p["epoch"])
            p["nvr"] = "%s-%s-%s" % (p["name"], p["version"], p["release"])

            pkg_name = p["name"]

            if contents.has_key(pkg_name):
                stored_pkg = contents[pkg_name]

                s = [ stored_pkg["name"],
                      stored_pkg["version"],
                      stored_pkg["release"],
                      stored_pkg["epoch"] ]

                n = [ p["name"],
                      p["version"],
                      p["release"],
                      p["epoch"] ]

                log_debug(7, "comparing vres", s, n)
                if rhn_rpm.nvre_compare(s, n) < 0:
                    log_debug(7, "replacing %s with %s" % (pkg_name, p))
                    contents[pkg_name] = p
                else:
                    # already have a higher vre stored...
                    pass
            else:
                log_debug(7, "initial store for %s" % pkg_name)
                contents[pkg_name] = p

        ret["contents"] = contents.values()

        # save it in the cache
        # We've set XMLRPC-Encoded-Response above
        ret = xmlrpclib.dumps((ret, ), methodresponse=1)
        rhnCache.set(cache_key, ret, last_channel_changed_ts)

        return ret
Exemple #24
0
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from spacewalk.common import rhnCache

key = "/var/goo/goo"

data = "0123456789" * 1024 * 1024

rhnCache.set(key, data, compressed=1, raw=1)
assert data == rhnCache.get(key, compressed=1, raw=1)

rhnCache.set(key, "12345", raw=1)
# Should return None, opening uncompressed data as compressed
assert None == rhnCache.get(key, compressed=1, raw=1)

# Should return None, opening raw data as pickled
assert None == rhnCache.get(key, raw=0)
Exemple #25
0
 def cache_set(self, params, value):
     log_debug(4, params)
     last_modified = self._get_last_modified(params)
     key = self._get_key(params)
     return rhnCache.set(key, value, modified=last_modified, raw=1, user="******", group="apache", mode=0755)
Exemple #26
0
    def sync(self, update_repodata=True):
        """Trigger a reposync"""
        failed_packages = 0
        sync_error = 0
        if not self.urls:
            sync_error = -1
        start_time = datetime.now()
        for (repo_id, url, repo_label) in self.urls:
            log(0, "Repo URL: %s" % url)
            plugin = None

            # If the repository uses a uln:// URL, switch to the ULN plugin, overriding the command-line
            if url.startswith("uln://"):
                self.repo_plugin = self.load_plugin("uln")

            # pylint: disable=W0703
            try:
                if repo_label:
                    repo_name = repo_label
                else:
                    # use modified relative_url as name of repo plugin, because
                    # it used as name of cache directory as well
                    relative_url = '_'.join(url.split('://')[1].split('/')[1:])
                    repo_name = relative_url.replace("?", "_").replace(
                        "&", "_").replace("=", "_")

                plugin = self.repo_plugin(url,
                                          repo_name,
                                          org=str(self.org_id or ''),
                                          channel_label=self.channel_label)

                if update_repodata:
                    plugin.clear_cache()

                if repo_id is not None:
                    keys = rhnSQL.fetchall_dict("""
                        select k1.key as ca_cert, k2.key as client_cert, k3.key as client_key
                        from rhncontentsource cs inner join
                             rhncontentsourcessl csssl on cs.id = csssl.content_source_id inner join
                             rhncryptokey k1 on csssl.ssl_ca_cert_id = k1.id left outer join
                             rhncryptokey k2 on csssl.ssl_client_cert_id = k2.id left outer join
                             rhncryptokey k3 on csssl.ssl_client_key_id = k3.id
                        where cs.id = :repo_id
                        """,
                                                repo_id=int(repo_id))
                    if keys:
                        ssl_set = get_single_ssl_set(
                            keys, check_dates=self.check_ssl_dates)
                        if ssl_set:
                            plugin.set_ssl_options(ssl_set['ca_cert'],
                                                   ssl_set['client_cert'],
                                                   ssl_set['client_key'])
                        else:
                            raise ValueError(
                                "No valid SSL certificates were found for repository."
                            )

                if not self.no_packages:
                    ret = self.import_packages(plugin, repo_id, url)
                    failed_packages += ret
                    self.import_groups(plugin, url)

                if not self.no_errata:
                    self.import_updates(plugin, url)

                # only for repos obtained from the DB
                if self.sync_kickstart and repo_label:
                    try:
                        self.import_kickstart(plugin, repo_label)
                    except:
                        rhnSQL.rollback()
                        raise
            except Exception:
                e = sys.exc_info()[1]
                log2(0, 0, "ERROR: %s" % e, stream=sys.stderr)
                log2disk(0, "ERROR: %s" % e)
                # pylint: disable=W0104
                sync_error = -1
            if plugin is not None:
                plugin.clear_ssl_cache()
        # Update cache with package checksums
        rhnCache.set(checksum_cache_filename, self.checksum_cache)
        if self.regen:
            taskomatic.add_to_repodata_queue_for_channel_package_subscription(
                [self.channel_label], [], "server.app.yumreposync")
            taskomatic.add_to_erratacache_queue(self.channel_label)
        self.update_date()
        rhnSQL.commit()

        # update permissions
        fileutils.createPath(os.path.join(
            CFG.MOUNT_POINT,
            'rhn'))  # if the directory exists update ownership only
        for root, dirs, files in os.walk(os.path.join(CFG.MOUNT_POINT, 'rhn')):
            for d in dirs:
                fileutils.setPermsPath(os.path.join(root, d), group='apache')
            for f in files:
                fileutils.setPermsPath(os.path.join(root, f), group='apache')
        elapsed_time = datetime.now() - start_time
        log(
            0, "Sync of channel completed in %s." %
            str(elapsed_time).split('.')[0])
        # if there is no global problems, but some packages weren't synced
        if sync_error == 0 and failed_packages > 0:
            sync_error = failed_packages
        return elapsed_time, sync_error
Exemple #27
0
 def cache_set(self, object_id, value, timestamp=None):
     # Get the key
     key = self._get_key(object_id)
     return rhnCache.set(key, value, modified=timestamp, raw=0,
         compressed=self._compressed)
Exemple #28
0
 def __setitem__(self, key, val):
     rkey = self._compute_key(key)
     return rhnCache.set(rkey, val)
Exemple #29
0
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
# 
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation. 
#
from spacewalk.common import rhnCache

key = "/var/goo/goo"

data = "0123456789" * 1024 * 1024

rhnCache.set(key, data, compressed=1, raw=1)
assert data == rhnCache.get(key, compressed=1, raw=1)

rhnCache.set(key, "12345", raw=1)
# Should return None, opening uncompressed data as compressed
assert None == rhnCache.get(key, compressed=1, raw=1)

# Should return None, opening raw data as pickled
assert None == rhnCache.get(key, raw=0)
Exemple #30
0
        io.seek(0, 0)

        # XXX For about 40M of compressed data sometimes we get:
        # zlib.error: Error -3 while decompressing: incomplete dynamic bit lengths tree
        v = gzip.GzipFile(None, "r", 0, io)

    try:
        data = v.read()
    except (ValueError, IOError, gzip.zlib.error), e:
        # XXX poking at gzip.zlib may not be that well-advised
        log_error("rhnDatabaseCache: gzip error for key %s: %s" % (name, e))
        # Ignore this entry in the database cache, it has invalid data
        return None

    # We store the data in the database cache, in raw format
    rhnCache.set(name, data, modified=modified, raw=1)

    # Unpickle the data, unless raw access was requested
    if not raw:
        return cPickle.loads(data)

    return data


def delete(name):
    # Uses the stored procedure. Quite simple
    rhnSQL.Procedure("rhn_cache_delete")(name)
    # Delete it from the disk cache too, just in case
    rhnCache.delete(name)