def is_expired(self):
     """
     Returns true if this cache is expired.
     """
     if self.__expire_time is None:
         return False
     else:
         return LongType(time.time()) >= self.__expire_time
예제 #2
0
 def _write_gzip_header(self):
     self.fileobj.write('\037\213')
     self.fileobj.write('\010')
     # no flags
     self.fileobj.write('\x00')
     write32u(self.fileobj, LongType(0))
     self.fileobj.write('\002')
     self.fileobj.write('\377')
    def _load_state(self):
        """
        Loads the last hypervisor state from disk.
        """
        # Attempt to open up the cache file.
        cache_file = None
        try:
            cache_file = open(CACHE_DATA_PATH, 'rb')
        except IOError:
            ioe = sys.exc_info()[1]
            # Couldn't open the cache file.  That's ok, there might not be one.
            # We'll only complain if debugging is enabled.
            self._log_debug("Could not open cache file '%s': %s" % \
                               (CACHE_DATA_PATH, str(ioe)))

        # Now, if a previous state was cached, load it.
        state = {}
        if cache_file:
            try:
                state = cPickle.load(cache_file)
            except cPickle.PickleError:
                pe = sys.exc_info()[1]
                # Strange.  Possibly, the file is corrupt.  We'll load an empty
                # state instead.
                self._log_debug("Error occurred while loading state: %s" % \
                                    str(pe))
            except EOFError:
                self._log_debug("Unexpected EOF. Probably an empty file.")
                cache_file.close()

            cache_file.close()

        if state:
            self._log_debug("Loaded state: %s" % repr(state))

            self.__expire_time = LongType(state['expire_time'])

            # If the cache is expired, set the old data to None so we force
            # a refresh.
            if self.is_expired():
                self.__old_domain_data = None
                os.unlink(CACHE_DATA_PATH)
            else:
                self.__old_domain_data = state['domain_data']

        else:
            self.__old_domain_data = None
            self.__expire_time = None
    def _save_state(self):
        """
        Saves the given polling state to disk.
        """
        # First, ensure that the proper parent directory is created.
        cache_dir_path = os.path.dirname(CACHE_DATA_PATH)
        if not os.path.exists(cache_dir_path):
            os.makedirs(cache_dir_path, int('0700', 8))

        state = {}
        state['domain_data'] = self.__new_domain_data
        if self.__expire_time is None or self.is_expired():
            state['expire_time'] = LongType(time.time()) + CACHE_EXPIRE_SECS
        else:
            state['expire_time'] = self.__expire_time

        # Now attempt to open the file for writing.  We'll just overwrite
        # whatever's already there.  Also, let any exceptions bounce out.
        cache_file = open(CACHE_DATA_PATH, "wb")
        cPickle.dump(state, cache_file)
        cache_file.close()
예제 #5
0
    def __convert_properties(self, properties):
        """ This function normalizes and converts the values of some properties to
            format consumable by the server.
        """
        # Attempt to normalize the UUID.
        if PropertyType.UUID in properties:
            uuid = properties[PropertyType.UUID]
            if uuid:
                uuid_as_number = string.atol(uuid, 16)

                if uuid_as_number == 0:
                    # If the UUID is a bunch of null bytes, we will convert it
                    # to None.  This will allow us to interact with the
                    # database properly, since the database assumes a null UUID
                    # when the system is a host.
                    properties[PropertyType.UUID] = None
                else:
                    # Normalize the UUID.  We don't know how it will appear
                    # when it comes from the client, so we'll convert it to a
                    # normal form.
                    # if UUID had leading 0, we must pad 0 again #429192
                    properties[PropertyType.UUID] = "%032x" % uuid_as_number
            else:
                properties[PropertyType.UUID] = None

        # The server only cares about certain types of states.
        if PropertyType.STATE in properties:
            state = properties[PropertyType.STATE]
            properties[PropertyType.STATE] = CLIENT_SERVER_STATE_MAP[state]

        # We must send the memory across as a string because XMLRPC can only
        # handle up to 32 bit numbers.  RAM can easily exceed that limit these
        # days.
        if PropertyType.MEMORY in properties:
            memory = properties[PropertyType.MEMORY]
            properties[PropertyType.MEMORY] = LongType(memory)
예제 #6
0
    def poll_packages(self, release, server_arch, timestamp=0, uuid=None):
        log_debug(1, release, server_arch, timestamp, uuid)

        # make sure we're dealing with strings here
        release = str(release)
        server_arch = rhnLib.normalize_server_arch(server_arch)
        timestamp = str(timestamp)
        uuid = str(uuid)

        # get a list of acceptable channels
        channel_list = []

        channel_list = rhnChannel.applet_channels_for_uuid(uuid)

        # it's possible the tie between uuid and rhnServer.id wasn't yet
        # made, default to normal behavior
        if not channel_list:
            channel_list = rhnChannel.get_channel_for_release_arch(
                release, server_arch)
            channel_list = [channel_list]
        # bork if no channels returned
        if not channel_list:
            log_debug(
                8, "No channels for release = '%s', arch = '%s', uuid = '%s'" %
                (release, server_arch, uuid))
            return {'last_modified': 0, 'contents': []}

        last_channel_changed_ts = max(
            [a["last_modified"] for a in channel_list])

        # make satellite content override a cache caused by hosted
        last_channel_changed_ts = str(LongType(last_channel_changed_ts) + 1)

        # gotta be careful about channel unsubscriptions...
        client_cache_invalidated = None

        # we return rhnServer.channels_changed for each row
        # in the satellite case, pluck it off the first...
        if "server_channels_changed" in channel_list[0]:
            sc_ts = channel_list[0]["server_channels_changed"]

            if sc_ts and (sc_ts >= last_channel_changed_ts):
                client_cache_invalidated = 1

        if (last_channel_changed_ts <=
                timestamp) and (not client_cache_invalidated):
            # XXX: I hate these freaking return codes that return
            # different members in the dictinary depending on what
            # sort of data you get
            log_debug(3, "Client has current data")
            return {'use_cached_copy': 1}

        # we'll have to return something big - compress
        rhnFlags.set("compress_response", 1)

        # Mark the response as being already XMLRPC-encoded
        rhnFlags.set("XMLRPC-Encoded-Response", 1)

        # next, check the cache if we have something with this timestamp
        label_list = [str(a["id"]) for a in channel_list]
        label_list.sort()
        log_debug(4, "label_list", label_list)
        cache_key = "applet-poll-%s" % string.join(label_list, "-")

        ret = rhnCache.get(cache_key, last_channel_changed_ts)
        if ret:  # we have a good entry with matching timestamp
            log_debug(3, "Cache HIT for", cache_key)
            return ret

        # damn, need to do some real work from chip's requirements:
        # The package list should be an array of hashes with the keys
        # nvre, name, version, release, epoch, errata_advisory,
        # errata_id, with the errata fields being empty strings if the
        # package isn't from an errata.
        ret = {'last_modified': last_channel_changed_ts, 'contents': []}

        # we search for packages only in the allowed channels - build
        # the SQL helper string and dictionary to make the foo IN (
        # list ) constructs use bind variables
        qlist = []
        qdict = {}
        for c in channel_list:
            v = c["id"]
            k = "channel_%s" % v
            qlist.append(":%s" % k)
            qdict[k] = v
        qlist = string.join(qlist, ", ")

        # This query is kind of big. One of these days I'm gonna start
        # pulling them out and transforming them into views. We can
        # also simulate this using several functions exposed out of
        # rhnChannel, but there is no difference in speed because we
        # need to do more than one query; besides, we cache the hell
        # out of it
        h = rhnSQL.prepare("""
        select distinct
            pn.name,
            pe.version,
            pe.release,
            pe.epoch,
            e_sq.errata_advisory,
            e_sq.errata_synopsis,
            e_sq.errata_id
        from
            rhnPackageName pn,
            rhnPackageEVR pe,
            rhnChannelNewestPackage cnp
        left join
            (   select  sq_e.id as errata_id,
                        sq_e.synopsis as errata_synopsis,
                        sq_e.advisory as errata_advisory,
                        sq_ep.package_id
                from
                        rhnErrata sq_e,
                        rhnErrataPackage sq_ep,
                        rhnChannelErrata sq_ce
                where   sq_ce.errata_id = sq_ep.errata_id
                        and sq_ce.errata_id = sq_e.id
                        and sq_ce.channel_id in ( %s )
            ) e_sq
          on cnp.package_id = e_sq.package_id
        where
            cnp.channel_id in ( %s )
        and cnp.name_id = pn.id
        and cnp.evr_id = pe.id
        """ % (qlist, qlist))
        h.execute(**qdict)

        plist = h.fetchall_dict()

        if not plist:
            # We've set XMLRPC-Encoded-Response above
            ret = xmlrpclib.dumps((ret, ), methodresponse=1)
            return ret

        contents = {}

        for p in plist:
            for k in list(p.keys()):
                if p[k] is None:
                    p[k] = ""
            p["nevr"] = "%s-%s-%s:%s" % (p["name"], p["version"], p["release"],
                                         p["epoch"])
            p["nvr"] = "%s-%s-%s" % (p["name"], p["version"], p["release"])

            pkg_name = p["name"]

            if pkg_name in contents:
                stored_pkg = contents[pkg_name]

                s = [
                    stored_pkg["name"], stored_pkg["version"],
                    stored_pkg["release"], stored_pkg["epoch"]
                ]

                n = [p["name"], p["version"], p["release"], p["epoch"]]

                log_debug(7, "comparing vres", s, n)
                if rhn_rpm.nvre_compare(s, n) < 0:
                    log_debug(7, "replacing %s with %s" % (pkg_name, p))
                    contents[pkg_name] = p
                else:
                    # already have a higher vre stored...
                    pass
            else:
                log_debug(7, "initial store for %s" % pkg_name)
                contents[pkg_name] = p

        ret["contents"] = list(contents.values())

        # save it in the cache
        # We've set XMLRPC-Encoded-Response above
        ret = xmlrpclib.dumps((ret, ), methodresponse=1)
        rhnCache.set(cache_key, ret, last_channel_changed_ts)

        return ret
예제 #7
0
    def populate(self,
                 header,
                 size,
                 checksum_type,
                 checksum,
                 path=None,
                 org_id=None,
                 header_start=None,
                 header_end=None,
                 channels=[]):

        # XXX is seems to me that this is the place that 'source_rpm' is getting
        # set
        for f in list(self.keys()):
            field = f
            if f in self.tagMap:
                field = self.tagMap[f]
                if not field:
                    # Unsupported
                    continue

            # get the db field value from the header
            val = header[field]
            if f == 'build_time':
                if type(val) in (IntType, LongType):
                    # A UNIX timestamp
                    val = gmtime(val)
            if f == 'payload_size':
                if val is None:
                    # use longarchivesize header field for rpms with archive > 4GB
                    if ('longarchivesize'
                            in header) and (header['longarchivesize'] > 0):
                        val = header['longarchivesize']
                elif val < 0:
                    # workaround for older rpms where signed
                    # attributes go negative for size > 2G
                    val = LongType(val) + 2**32
            elif val:
                # Convert to strings
                if isinstance(val, UnicodeType):
                    val = to_string(val)
                else:
                    val = str(val)
            elif val == []:
                val = None
            self[f] = val

        self['package_size'] = size
        self['checksum_type'] = checksum_type
        self['checksum'] = checksum
        self['checksums'] = {checksum_type: checksum}
        self['path'] = path
        self['org_id'] = org_id
        self['header_start'] = header_start
        self['header_end'] = header_end
        self['last_modified'] = localtime(time.time())
        if 'sigmd5' in self:
            if self['sigmd5']:
                self['sigchecksum_type'] = 'md5'
                self['sigchecksum'] = self['sigmd5']
            del (self['sigmd5'])

        # Fix some of the information up
        vendor = self['vendor']
        if not vendor:
            self['vendor'] = 'Not defined'
        payloadFormat = self['payload_format']
        if payloadFormat is None:
            self['payload_format'] = 'cpio'
        if self['payload_size'] is None:
            self['payload_size'] = 0
        return self
예제 #8
0
def is_host_uuid(uuid):
    uuid = eval('0x%s' % uuid)
    return LongType(uuid) == 0