def dump_channel_packages_short( self, channel_label, last_modified, filepath=None, validate_channels=False, send_headers=False, open_stream=True ): log_debug(2, channel_label) if validate_channels: channels = self._validate_channels(channel_labels=[channel_label]) channel_obj = channels[channel_label] else: channels = channel_label channel_obj = channels db_last_modified = int(rhnLib.timestamp(channel_obj["last_modified"])) last_modified = int(rhnLib.timestamp(last_modified)) log_debug(3, "last modified", last_modified, "db last modified", db_last_modified) if last_modified != db_last_modified: raise rhnFault(3013, "The requested channel version does not match" " the upstream version", explain=0) channel_id = channel_obj["channel_id"] if filepath: key = filepath else: key = "xml-channel-packages/rhn-channel-%d.data" % channel_id # Try to get everything off of the cache val = rhnCache.get(key, compressed=0, raw=1, modified=last_modified) if val is None: # Not generated yet log_debug(4, "Cache MISS for %s (%s)" % (channel_label, channel_id)) stream = self._cache_channel_packages_short(channel_id, key, last_modified) else: log_debug(4, "Cache HIT for %s (%s)" % (channel_label, channel_id)) temp_stream = tempfile.TemporaryFile() temp_stream.write(val) temp_stream.flush() stream = self._normalize_compressed_stream(temp_stream) # Copy the results to the output stream # They shold be already compressed if they were requested to be # compressed buffer_size = 16384 # Send the HTTP headers - but don't init the compressed stream since # we send the data ourselves if send_headers: self._send_headers(init_compressed_stream=0) if open_stream: self._raw_stream = open(key, "w") while 1: buff = stream.read(buffer_size) if not buff: break try: self._raw_stream.write(buff) except IOError: log_error("Client disconnected prematurely") self.close() raise ClosedConnectionError, None, sys.exc_info()[2] # We're done if open_stream: self._raw_stream.close() return 0
def _check_file_timestamp(self, filename, timestamp): if timestamp is None: # No timestamp specified return 1 timestamp = rhnLib.timestamp(timestamp) file_timestamp = os.stat(filename)[stat.ST_MTIME] if timestamp == file_timestamp: return 1 return 0
def has_key(name, modified=None): fname = _fname(name) if modified is not None: modified = timestamp(modified) if not os.access(fname, os.R_OK): return False # the file exists, so os.stat should not raise an exception statinfo = os.stat(fname) if modified is not None and statinfo[ST_MTIME] != modified: return False return True
def __init__(self, name, modified=None, user='******', group='root', mode=int('0755', 8)): if modified: self.modified = timestamp(modified) else: self.modified = None self.fname = _fname(name) self.fd = self.get_fd(name, user, group, mode) self.closed = False
def dump_srpms(self): print "Dumping srpms" dumper = xmlDiskDumper.SourceRPMDumper(self.options.mountpoint, server=self.server, compression=self.compression) channels = self._load_channels() packages = self._get_channel_object_ids(channels, 'source-packages') for package in packages: print "Dumping srpm", package last_modified = package['last_modified'] last_modified = rhnLib.timestamp(last_modified) dumper.setID(package) dumper.set_utime(last_modified) dumper.dump(force=self.options.force)
def getObsoletes(self, version): """ Returns a list of packages that obsolete other packages """ log_debug(3, self.channelName, version) # Check to see if the version they are requesting is the latest # check the validity of what the client thinks about this channel # or blow up self.__check_channel(version) obsoletes = rhnChannel.list_obsoletes(self.channelName) # Set the transport options transportOptions = rhnFlags.get('outputTransportOptions') transportOptions['Last-Modified'] = rfc822time(timestamp(version)) rhnFlags.set("compress_response", 1) return obsoletes
def listAllPackagesComplete(self, version): """ Creates and/or serves up a cached copy of all the packages for this channel including requires, obsoletes, conflicts, etc. """ log_debug(3, self.channelName, version) # Check to see if the version they are requesting is the latest # check the validity of what the client thinks about this channel # or blow up self.__check_channel(version) packages = rhnChannel.list_all_packages_complete(self.channelName) # transport options... transportOptions = rhnFlags.get('outputTransportOptions') transportOptions['Last-Modified'] = rfc822time(timestamp(version)) rhnFlags.set("compress_response", 1) return packages
def _fetch_cursor(key=None, modified=None): if modified is not None: modified = timestamp(modified) # Computing the number of seconds since Jan 1 1970 h = rhnSQL.prepare(""" select c.key_id, c.value, nvl( (c.modified - TO_DATE('1970-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS')) * 86400 - :modified, 0) delta, (c.modified - TO_DATE('1970-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS')) * 86400 modified from rhnCache c where c.key_id = LOOKUP_CACHE_KEY(:key) """) h.execute(key=key, modified=modified) return h
def _set_file_timestamp(self, filename, timestamp): if timestamp is None: return timestamp = rhnLib.timestamp(timestamp) os.utime(filename, (timestamp, timestamp))
def _dbtime2timestamp(val): return int(rhnLib.timestamp(val))
def set(name, value, modified = None, raw = None, compressed = None): if modified is not None: modified = timestamp(modified) if raw: val = value else: val = cPickle.dumps(value, 1) if compressed: # Since most of the data is kept in memory anyway, don't bother to # write it to a temp file at this point - it's probably much smaller # anyway io = cStringIO.StringIO() f = gzip.GzipFile(None, "w", 5, io) f.write(val) f.close() val = io.getvalue() io.close() data_length = len(val) chunk_size = 32512 chunks = int(math.ceil(float(data_length) / chunk_size)) #if chunks > 256: # raise Exception, "Data too big" plsql_template = r""" DECLARE PRAGMA AUTONOMOUS_TRANSACTION; blob_val BLOB; modified_date DATE; now DATE := sysdate; our_key_id number; %s BEGIN our_key_id := lookup_cache_key(:key); BEGIN SELECT value INTO blob_val FROM rhnCache WHERE key_id = our_key_id FOR UPDATE OF value; EXCEPTION WHEN NO_DATA_FOUND THEN -- The entry is not here yet, let's create it INSERT INTO rhnCache (key_id, value, created, modified) VALUES (our_key_id, EMPTY_BLOB(), sysdate, sysdate) RETURNING value INTO blob_val; END; -- If we want to write less data than currently available, trim the blob IF :data_len < DBMS_LOB.getlength(blob_val) THEN DBMS_LOB.TRIM(blob_val, :data_len); END IF; %s -- Now update last_modified and last_accessed if :modified IS NULL THEN modified_date := now; ELSE modified_date := TO_DATE('1970-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS') + :modified / 86400; END IF; UPDATE rhnCache SET modified = modified_date WHERE key_id = our_key_id; -- Update accessed too UPDATE rhnCacheTimestamps SET accessed = now WHERE key_id = our_key_id; if SQL%%ROWCOUNT = 0 THEN -- No entry in rhnCacheTimestamps; insert it INSERT INTO rhnCacheTimestamps (key_id, accessed) VALUES (our_key_id, now); END IF; COMMIT; END; """ decl_template = " arg_%s LONG RAW := :val_%s;" dbms_lob_template = " DBMS_LOB.WRITE(blob_val, %s, %s, arg_%s);" indices = range(chunks) start_pos = map(lambda x, cs=chunk_size: x * cs + 1, indices) sizes = [ chunk_size ] * (chunks - 1) + \ [ 'length(rawtohex(arg_%s)) / 2' % (chunks - 1) ] query = plsql_template % ( string.join( map(lambda x, y, t=decl_template: t % (x, y), indices, indices), "\n" ), string.join( map(lambda x, y, z, t=dbms_lob_template: t % (x, y, z), sizes, start_pos, indices), "\n" ), ) params = { 'modified' : modified, 'data_len' : data_length, 'key' : name, } for i in indices: start = i * chunk_size end = (i + 1) * chunk_size params['val_%s' % i] = rhnSQL.types.LONG_BINARY(val[start:end]) h = rhnSQL.prepare(query) tries = 3 while tries: tries = tries - 1 try: apply(h.execute, (), params) except rhnSQL.SQLSchemaError, e: if e.errno == 1: # Unique constraint violated - probably someone else was # doing the same thing at the same time # Try again continue # No errors - we're done # We're done break
def __init__(self, relative_path, timestamp, file_size): self.relative_path = relative_path self.timestamp = rhnLib.timestamp(timestamp) self.file_size = file_size self.full_path = os.path.join(CFG.MOUNT_POINT, self.relative_path) self.buffer_size = CFG.BUFFER_SIZE
def dump_channel_packages_short(self, channel_label, last_modified, filepath=None, validate_channels=False, send_headers=False, open_stream=True): log_debug(2, channel_label) if validate_channels: channels = self._validate_channels(channel_labels=[channel_label]) channel_obj = channels[channel_label] else: channels = channel_label channel_obj = channels db_last_modified = int(rhnLib.timestamp(channel_obj['last_modified'])) last_modified = int(rhnLib.timestamp(last_modified)) log_debug(3, "last modified", last_modified, "db last modified", db_last_modified) if last_modified != db_last_modified: raise rhnFault(3013, "The requested channel version does not match" " the upstream version", explain=0) channel_id = channel_obj['channel_id'] if filepath: key = filepath else: key = "xml-channel-packages/rhn-channel-%d.data" % channel_id # Try to get everything off of the cache val = rhnCache.get(key, compressed=0, raw=1, modified=last_modified) if val is None: # Not generated yet log_debug(4, "Cache MISS for %s (%s)" % (channel_label, channel_id)) stream = self._cache_channel_packages_short( channel_id, key, last_modified) else: log_debug(4, "Cache HIT for %s (%s)" % (channel_label, channel_id)) temp_stream = tempfile.TemporaryFile() temp_stream.write(val) temp_stream.flush() stream = self._normalize_compressed_stream(temp_stream) # Copy the results to the output stream # They shold be already compressed if they were requested to be # compressed buffer_size = 16384 # Send the HTTP headers - but don't init the compressed stream since # we send the data ourselves if send_headers: self._send_headers(init_compressed_stream=0) if open_stream: self._raw_stream = open(key, "w") while 1: buff = stream.read(buffer_size) if not buff: break try: self._raw_stream.write(buff) except IOError: log_error("Client disconnected prematurely") self.close() raise_with_tb(ClosedConnectionError, sys.exc_info()[2]) # We're done if open_stream: self._raw_stream.close() return 0
def _test(self, t, dstshift=0): t = int(t) tstr = self._str(t) t2 = int(rhnLib.timestamp(tstr)) return (t + dstshift == t2), t, tstr, t2
def set(name, value, modified=None, raw=None, compressed=None): if modified is not None: modified = timestamp(modified) if raw: val = value else: val = cPickle.dumps(value, 1) if compressed: # Since most of the data is kept in memory anyway, don't bother to # write it to a temp file at this point - it's probably much smaller # anyway io = cStringIO.StringIO() f = gzip.GzipFile(None, "w", 5, io) f.write(val) f.close() val = io.getvalue() io.close() data_length = len(val) chunk_size = 32512 chunks = int(math.ceil(float(data_length) / chunk_size)) #if chunks > 256: # raise Exception, "Data too big" plsql_template = r""" DECLARE PRAGMA AUTONOMOUS_TRANSACTION; blob_val BLOB; modified_date DATE; now DATE := sysdate; our_key_id number; %s BEGIN our_key_id := lookup_cache_key(:key); BEGIN SELECT value INTO blob_val FROM rhnCache WHERE key_id = our_key_id FOR UPDATE OF value; EXCEPTION WHEN NO_DATA_FOUND THEN -- The entry is not here yet, let's create it INSERT INTO rhnCache (key_id, value, created, modified) VALUES (our_key_id, EMPTY_BLOB(), sysdate, sysdate) RETURNING value INTO blob_val; END; -- If we want to write less data than currently available, trim the blob IF :data_len < DBMS_LOB.getlength(blob_val) THEN DBMS_LOB.TRIM(blob_val, :data_len); END IF; %s -- Now update last_modified and last_accessed if :modified IS NULL THEN modified_date := now; ELSE modified_date := TO_DATE('1970-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS') + :modified / 86400; END IF; UPDATE rhnCache SET modified = modified_date WHERE key_id = our_key_id; -- Update accessed too UPDATE rhnCacheTimestamps SET accessed = now WHERE key_id = our_key_id; if SQL%%ROWCOUNT = 0 THEN -- No entry in rhnCacheTimestamps; insert it INSERT INTO rhnCacheTimestamps (key_id, accessed) VALUES (our_key_id, now); END IF; COMMIT; END; """ decl_template = " arg_%s LONG RAW := :val_%s;" dbms_lob_template = " DBMS_LOB.WRITE(blob_val, %s, %s, arg_%s);" indices = range(chunks) start_pos = map(lambda x, cs=chunk_size: x * cs + 1, indices) sizes = [ chunk_size ] * (chunks - 1) + \ [ 'length(rawtohex(arg_%s)) / 2' % (chunks - 1) ] query = plsql_template % ( string.join( map(lambda x, y, t=decl_template: t % (x, y), indices, indices), "\n"), string.join( map(lambda x, y, z, t=dbms_lob_template: t % (x, y, z), sizes, start_pos, indices), "\n"), ) params = { 'modified': modified, 'data_len': data_length, 'key': name, } for i in indices: start = i * chunk_size end = (i + 1) * chunk_size params['val_%s' % i] = rhnSQL.types.LONG_BINARY(val[start:end]) h = rhnSQL.prepare(query) tries = 3 while tries: tries = tries - 1 try: apply(h.execute, (), params) except rhnSQL.SQLSchemaError, e: if e.errno == 1: # Unique constraint violated - probably someone else was # doing the same thing at the same time # Try again continue # No errors - we're done # We're done break