def _execute_(self, args, kwargs): """ Oracle specific execution of the query. """ # TODO: args appears unused, raise exception if we see any? # Only copy the arguments we're interested in _p = UserDictCase(kwargs) params = {} # Check that all required parameters were provided: # NOTE: bindnames() is Oracle specific: for k in self._real_cursor.bindnames(): if not _p.has_key(k): # Raise the fault ourselves raise sql_base.SQLError(1008, 'Not all variables bound', k) params[k] = to_string(_p[k]) # cx_Oracle expects the first arg to be the statement and no # positional args: try: self._real_cursor.execute(*(None, ), **params) except cx_Oracle.OperationalError: e = sys.exc_info()[1] raise sql_base.SQLError("Cannot execute SQL statement: %s" % str(e)) self.description = self._real_cursor.description return self._real_cursor.rowcount
def __merge(self, component = None): """ merge the config options between the default comp dictionaries and the file we're parsing now """ # Caches this component's configuration options if component is None: component = self.__component opts = UserDictCase() comps = parse_comps(component) for comp in comps: if not self.__defaults.has_key(comp): warn('key not found in config default dict', comp) continue opts.update(self.__defaults[comp]) # Now load the specific stuff, and perform syntax checking too for comp in comps: if not self.__parsedConfig.has_key(comp): # No such entry in the config file continue for key, (values, _lineno_) in self.__parsedConfig[comp].items(): ## we don't really want to force every item in the ## config file to have a default value first. If we do, ## uncomment this section #if not opts.has_key(key): # Unknown keyword # warn("Warning: in file %s, line %s: unknown " # "option name `%s'" % (self.filename, lineno, key)) # continue opts[key] = values # and now save it self.__configs[component] = opts
class Device(GenericDevice): """ This is the base Device class that supports instantiation from a dictionarry. the __init__ takes the dictionary as its argument, together with a list of valid fields to recognize and with a mapping for dictionary keys into valid field names for self.data The fields are required to know what fields we have in the table. The mapping allows transformation from whatever comes in to valid fields in the table Looks complicated but it isn't -- gafton """ def __init__(self, fields, dict=None, mapping=None): GenericDevice.__init__(self) x = {} for k in fields: x[k] = None self.data = UserDictCase(x) if not dict: return # make sure we get a UserDictCase to work with if type(dict) == type({}): dict = UserDictCase(dict) if mapping is None or type(mapping) == type({}): mapping = UserDictCase(mapping) if not isinstance(dict, UserDictCase) or not isinstance(mapping, UserDictCase): log_error("Argument passed is not a dictionary", dict, mapping) raise TypeError("Argument passed is not a dictionary", dict, mapping) # make sure we have a platform for k in dict.keys(): if dict[k] == "": dict[k] = None if self.data.has_key(k): self.data[k] = dict[k] continue if mapping.has_key(k): # the mapping dict might tell us to lose some fields if mapping[k] is not None: self.data[mapping[k]] = dict[k] else: log_error("Unknown HW key =`%s'" % k, dict.dict(), mapping.dict()) # The try-except is added just so that we can send e-mails try: raise KeyError("Don't know how to parse key `%s''" % k, dict.dict()) except: Traceback(mail=1) # Ignore this key continue # clean up this data try: for k in self.data.keys(): if type(self.data[k]) == type("") and len(self.data[k]): self.data[k] = string.strip(self.data[k]) if not len(self.data[k]): continue if self.data[k][0] == '"' and self.data[k][-1] == '"': self.data[k] = self.data[k][1:-1] except IndexError: raise_with_tb(IndexError("Can not process data = %s, key = %s" % (repr(self.data), k)), sys.exc_info()[2])
def __init__(self, dict, db, sql, rowid, cache=None): UserDictCase.__init__(self, dict) if not isinstance(db, sql_base.Database): raise TypeError("Second argument needs to be a database handle") self.__db = db self.__sql = sql self.__rowid = rowid self.__cache = cache
def add_hardware(self, hardware): """ add new hardware """ log_debug(4, hardware) if not hardware: return -1 if type(hardware) == type({}): hardware = UserDictCase(hardware) if not isinstance(hardware, UserDictCase): log_error("argument type is not hash: %s" % hardware) raise TypeError, "This function requires a hash as an argument" # validation is important hw_class = hardware.get("class") if hw_class is None: return -1 hw_class = string.lower(hw_class) class_type = None if hw_class in ["video", "audio", "audio_hd", "usb", "other", "hd", "floppy", "mouse", "modem", "network", "cdrom", "scsi", "unspec", "scanner", "tape", "capture", "raid", "socket", "keyboard", "printer", "firewire", "ide"]: class_type = HardwareDevice elif hw_class == "cpu": class_type = CPUDevice elif hw_class == "netinfo": class_type = NetworkInformation elif hw_class == "memory": class_type = MemoryInformation elif hw_class == "dmi": class_type = DMIInformation elif hw_class == "installinfo": class_type = InstallInformation elif hw_class == "netinterfaces": class_type = NetIfaceInformation else: log_error("UNKNOWN CLASS TYPE `%s'" % hw_class) # Same trick: try-except and raise the exception so that Traceback # can send the e-mail try: raise KeyError, "Unknwon class type `%s' for hardware '%s'" % ( hw_class, hardware) except: Traceback(mail=1) return # create the new device new_dev = class_type(hardware) if self.__hardware.has_key(class_type): _l = self.__hardware[class_type] else: _l = self.__hardware[class_type] = [] _l.append(new_dev) self.__changed = 1 return 0
def __init__(self, db, table, hashname, hashval=None): UserDictCase.__init__(self) if not isinstance(db, sql_base.Database): raise rhnException("Argument db is not a database instance", db) self.db = db self.table = table self.hashname = string.lower(hashname) # and the data dictionary self.data = {} # is this a real entry (ie, use insert or update) self.real = 0 if hashval is not None: # if we have to load an entry already... self.load(hashval)
def __init__(self, req): rhnHandler.__init__(self) dumper.XML_Dumper.__init__(self) self.headers_out = UserDictCase() self._raw_stream = req self._raw_stream.content_type = 'application/octet-stream' self.compress_level = 0 # State machine self._headers_sent = 0 self._is_closed = 0 self._compressed_stream = None self.functions = [ 'arches', 'arches_extra', 'channel_families', 'channels', 'get_comps', 'get_modules', 'channel_packages_short', 'packages_short', 'packages', 'source_packages', 'errata', 'blacklist_obsoletes', 'product_names', 'get_rpm', 'kickstartable_trees', 'get_ks_file', 'orgs', ] self.system_id = None self._channel_family_query_template = """ select cfm.channel_family_id, 0 quantity from rhnChannelFamilyMembers cfm, rhnChannel c, rhnChannelFamily cf where cfm.channel_id = c.id and c.label in (%s) and cfm.channel_family_id = cf.id and cf.label != 'rh-public' and (cf.org_id in (%s) or cf.org_id is null) union select id channel_family_id, NULL quantity from rhnChannelFamily where label = 'rh-public' """ self._channel_family_query_public = """ select id channel_family_id, 0 quantity from rhnChannelFamily where org_id in (%s) or org_id is null """ self._channel_family_query = None
def __init__(self, fields, dict=None, mapping=None): GenericDevice.__init__(self) x = {} for k in fields: x[k] = None self.data = UserDictCase(x) if not dict: return # make sure we get a UserDictCase to work with if type(dict) == type({}): dict = UserDictCase(dict) if mapping is None or type(mapping) == type({}): mapping = UserDictCase(mapping) if not isinstance(dict, UserDictCase) or \ not isinstance(mapping, UserDictCase): log_error("Argument passed is not a dictionary", dict, mapping) raise TypeError("Argument passed is not a dictionary", dict, mapping) # make sure we have a platform for k in dict.keys(): if dict[k] == '': dict[k] = None if self.data.has_key(k): self.data[k] = dict[k] continue if mapping.has_key(k): # the mapping dict might tell us to lose some fields if mapping[k] is not None: self.data[mapping[k]] = dict[k] else: log_error("Unknown HW key =`%s'" % k, dict.dict(), mapping.dict()) # The try-except is added just so that we can send e-mails try: raise KeyError("Don't know how to parse key `%s''" % k, dict.dict()) except: Traceback(mail=1) # Ignore this key continue # clean up this data try: for k in self.data.keys(): if type(self.data[k]) == type("") and len(self.data[k]): self.data[k] = string.strip(self.data[k]) if not len(self.data[k]): continue if self.data[k][0] == '"' and self.data[k][-1] == '"': self.data[k] = self.data[k][1:-1] except IndexError: raise IndexError, "Can not process data = %s, key = %s" % ( repr(self.data), k), sys.exc_info()[2]
def _executemany(self, *args, **kwargs): if not kwargs: return 0 params = UserDictCase(kwargs) # First break all the incoming keyword arg lists into individual # hashes: all_kwargs = [] for key in params.keys(): if len(all_kwargs) < len(params[key]): for i in range(len(params[key])): all_kwargs.append({}) i = 0 for val in params[key]: all_kwargs[i][key] = val i = i + 1 self._real_cursor.executemany(self.sql, all_kwargs) self.description = self._real_cursor.description rowcount = self._real_cursor.rowcount return rowcount
def __init__(self, transfer=0, encoding=0, refreshCallback=None, progressCallback=None, use_datetime=None, timeout=None): self._use_builtin_types = False self._transport_flags = {'transfer' : 0, 'encoding' : 0} self.set_transport_flags(transfer=transfer, encoding=encoding) self._headers = UserDictCase() self.verbose = 0 self.connection = None self.method = "POST" self._lang = None self.refreshCallback = refreshCallback self.progressCallback = progressCallback self.bufferSize = 16384 self.headers_in = None self.response_status = None self.response_reason = None self._redirected = None self._use_datetime = use_datetime self.timeout = timeout
class Transport(xmlrpclib.Transport): user_agent = "rhn.rpclib.py/%s" % __version__ def __init__(self, transfer=0, encoding=0, refreshCallback=None, progressCallback=None, use_datetime=None, timeout=None): self._use_builtin_types = False self._transport_flags = {'transfer': 0, 'encoding': 0} self.set_transport_flags(transfer=transfer, encoding=encoding) self._headers = UserDictCase() self.verbose = 0 self.connection = None self.method = "POST" self._lang = None self.refreshCallback = refreshCallback self.progressCallback = progressCallback self.bufferSize = 16384 self.headers_in = None self.response_status = None self.response_reason = None self._redirected = None self._use_datetime = use_datetime self.timeout = timeout # set the progress callback def set_progress_callback(self, progressCallback, bufferSize=16384): self.progressCallback = progressCallback self.bufferSize = bufferSize # set the refresh callback def set_refresh_callback(self, refreshCallback): self.refreshCallback = refreshCallback # set the buffer size # The bigger this is, the faster the read is, but the more seldom is the # progress callback called def set_buffer_size(self, bufferSize): if bufferSize is None: # No buffer size specified; go with 16k bufferSize = 16384 self.bufferSize = bufferSize # set the request method def set_method(self, method): if method not in ("GET", "POST"): raise IOError("Unknown request method %s" % method) self.method = method # reset the transport options def set_transport_flags(self, transfer=None, encoding=None, **kwargs): # For backwards compatibility, we keep transfer and encoding as # positional parameters (they could come in as kwargs easily) self._transport_flags.update(kwargs) if transfer is not None: self._transport_flags['transfer'] = transfer if encoding is not None: self._transport_flags['encoding'] = encoding self.validate_transport_flags() def get_transport_flags(self): return self._transport_flags.copy() def validate_transport_flags(self): # Transfer and encoding are guaranteed to be there transfer = self._transport_flags.get('transfer') transfer = lookupTransfer(transfer, strict=1) self._transport_flags['transfer'] = transfer encoding = self._transport_flags.get('encoding') encoding = lookupEncoding(encoding, strict=1) self._transport_flags['encoding'] = encoding # Add arbitrary additional headers. def set_header(self, name, arg): if type(arg) in [type([]), type(())]: # Multivalued header self._headers[name] = [str(a) for a in arg] else: self._headers[name] = str(arg) def add_header(self, name, arg): if name in self._headers: vlist = self._headers[name] if not isinstance(vlist, ListType): vlist = [vlist] else: vlist = self._headers[name] = [] vlist.append(str(arg)) def clear_headers(self): self._headers.clear() def get_connection(self, host): if self.verbose: print("Connecting via http to %s" % (host, )) if self.timeout: return connections.HTTPConnection(host, timeout=self.timeout) else: return connections.HTTPConnection(host) def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request # XXX: automatically compute how to send depending on how much data # you want to send # XXX Deal with HTTP/1.1 if necessary self.verbose = verbose # implement BASIC HTTP AUTHENTICATION host, extra_headers, x509 = self.get_host_info(host) if not extra_headers: extra_headers = [] # Establish the connection connection = self.get_connection(host) # Setting the user agent. Only interesting for SSL tunnels, in any # other case the general headers are good enough. connection.set_user_agent(self.user_agent) if self.verbose: connection.set_debuglevel(self.verbose - 1) # Get the output object to push data with req = Output(connection=connection, method=self.method) req.set_transport_flags(**self._transport_flags) # Add the extra headers req.set_header('User-Agent', self.user_agent) for header, value in list(self._headers.items()) + extra_headers: # Output.set_header correctly deals with multivalued headers now req.set_header(header, value) # Content-Type req.set_header("Content-Type", "text/xml") req.process(request_body) # Host and Content-Length are set by HTTP*Connection for h in ['Content-Length', 'Host']: req.clear_header(h) headers, fd = req.send_http(host, handler) if self.verbose: print("Incoming headers:") for header, value in headers.items(): print("\t%s : %s" % (header, value)) if fd.status in (301, 302): self._redirected = headers["Location"] self.response_status = fd.status return None # Save the headers self.headers_in = headers self.response_status = fd.status self.response_reason = fd.reason return self._process_response(fd, connection) def _process_response(self, fd, connection): # Now use the Input class in case we get an enhanced response resp = Input(self.headers_in, progressCallback=self.progressCallback, bufferSize=self.bufferSize) fd = resp.decode(fd) if isinstance(fd, InputStream): # When the File object goes out of scope, so will the InputStream; # that will eventually call the connection's close() method and # cleanly reap it f = File(fd.fd, fd.length, fd.name, bufferSize=self.bufferSize, progressCallback=self.progressCallback) # Set the File's close method to the connection's # Note that calling the HTTPResponse's close() is not enough, # since the main socket would remain open, and this is # particularily bad with SSL f.close = connection.close return f # We can safely close the connection now; if we had an # application/octet/stream (for which Input.read passes the original # socket object), Input.decode would return an InputStream, # so we wouldn't reach this point connection.close() return self.parse_response(fd) # Give back the new URL if redirected def redirected(self): return self._redirected # Rewrite parse_response to provide refresh callbacks def parse_response(self, f): # read response from input file, and parse it p, u = self.getparser() while 1: response = f.read(1024) if not response: break if self.refreshCallback: self.refreshCallback() if self.verbose: print("body:", repr(response)) p.feed(response) f.close() p.close() return u.close() def setlang(self, lang): self._lang = lang
def __init__(self, uri, transport=None, encoding=None, verbose=0, proxy=None, username=None, password=None, refreshCallback=None, progressCallback=None, timeout=None): # establish a "logical" server connection # # First parse the proxy information if available # if proxy != None: (ph, pp, pu, pw) = get_proxy_info(proxy) if pp is not None: proxy = "%s:%s" % (ph, pp) else: proxy = ph # username and password will override whatever was passed in the # URL if pu is not None and username is None: username = pu if pw is not None and password is None: password = pw self._uri = sstr(uri) self._refreshCallback = None self._progressCallback = None self._bufferSize = None self._proxy = proxy self._username = username self._password = password self._timeout = timeout if len(__version__.split()) > 1: self.rpc_version = __version__.split()[1] else: self.rpc_version = __version__ self._reset_host_handler_and_type() if transport is None: self._allow_redirect = 1 transport = self.default_transport(self._type, proxy, username, password, timeout) else: # # dont allow redirect on unknow transports, that should be # set up independantly # self._allow_redirect = 0 self._redirected = None self.use_handler_path = 1 self._transport = transport self._trusted_cert_files = [] self._lang = None self._encoding = encoding self._verbose = verbose self.set_refresh_callback(refreshCallback) self.set_progress_callback(progressCallback) # referer, which redirect us to new handler self.send_handler = None self._headers = UserDictCase()
def __init__(self, fields, dict=None, mapping=None): GenericDevice.__init__(self) x = {} for k in fields: x[k] = None self.data = UserDictCase(x) if not dict: return # make sure we get a UserDictCase to work with if type(dict) == type({}): dict = UserDictCase(dict) if mapping is None or type(mapping) == type({}): mapping = UserDictCase(mapping) if not isinstance(dict, UserDictCase) or \ not isinstance(mapping, UserDictCase): log_error("Argument passed is not a dictionary", dict, mapping) raise TypeError("Argument passed is not a dictionary", dict, mapping) # make sure we have a platform for k in list(dict.keys()): if dict[k] == '': dict[k] = None if self.data.has_key(k): self.data[k] = dict[k] continue if mapping.has_key(k): # the mapping dict might tell us to lose some fields if mapping[k] is not None: self.data[mapping[k]] = dict[k] else: log_error("Unknown HW key =`%s'" % k, dict.dict(), mapping.dict()) # The try-except is added just so that we can send e-mails try: raise KeyError("Don't know how to parse key `%s''" % k, dict.dict()) except: Traceback(mail=1) # Ignore this key continue # clean up this data try: for k in list(self.data.keys()): if type(self.data[k]) == type("") and len(self.data[k]): self.data[k] = self.data[k].strip() if not len(self.data[k]): continue if self.data[k][0] == '"' and self.data[k][-1] == '"': self.data[k] = self.data[k][1:-1] except IndexError: raise_with_tb( IndexError("Can not process data = %s, key = %s" % (repr(self.data), k)), sys.exc_info()[2])
class ServerWrapper(Packages, Hardware, History): """ This is a middle class that ties all the subclasses together, plus it provides a cleaner way to keep all the wrapper functions in one place. The main Server class is based on this one and it looks a little bit cleaner that way. """ def __init__(self): self.server = UserDictCase() Packages.__init__(self) History.__init__(self) Hardware.__init__(self) def __repr__(self): return "<%s instance>" % (self.__class__,) def set_value(self, name, value): """ update a value in self.server """ if name is None or value is None: return -1 self.server[name] = value return 0 ### # PACKAGES ### def add_package(self, entry): """ Wrappers for the similar functions from Packages class that supplementaly require a valid sysid. """ return Packages.add_package(self, self.server.get("id"), entry) def delete_package(self, entry): return Packages.delete_package(self, self.server.get("id"), entry) def dispose_packages(self): return Packages.dispose_packages(self, self.server["id"]) def save_packages(self, schedule=1): """ wrapper for the Packages.save_packages_byid() which requires the sysid """ ret = self.save_packages_byid(self.server["id"], schedule=schedule) # this function is primarily called from outside # so we have to commit here rhnSQL.commit() return ret ### # HARDWARE ### def delete_hardware(self): """ Wrappers for the similar functions from Hardware class """ return Hardware.delete_hardware(self, self.server.get("id")) def save_hardware(self): """ wrapper for the Hardware.save_hardware_byid() which requires the sysid """ ret = self.save_hardware_byid(self.server["id"]) # this function is primarily called from outside # so we have to commit here rhnSQL.commit() return ret def reload_hardware(self): """ wrapper for the Hardware.reload_hardware_byid() which requires the sysid """ ret = self.reload_hardware_byid(self.server["id"]) return ret ### # HISTORY ### def save_history(self): ret = self.save_history_byid(self.server["id"]) # this function is primarily called from outside # so we have to commit here rhnSQL.commit() return ret
def __init__(self): self.server = UserDictCase() Packages.__init__(self) History.__init__(self) Hardware.__init__(self) SolarisPatches.__init__(self)
def __local_GET_handler(self, req): """ GETs: authenticate user, and service local GETs. if not a local fetch, return None """ log_debug(2, 'request method: %s' % req.method) # Early test to check if this is a request the proxy can handle # Can we serve this request? if req.method != "GET" or not CFG.PKG_DIR: # Don't know how to handle this return None # Tiny-url kickstart requests (for server kickstarts, aka not profiles) # have been name munged and we've already sent a HEAD request to the # Satellite to get a checksum for the rpm so we can find it in the # squid cache. # Original url looks like /ty/bSWE7qIq/Packages/policycoreutils-2.0.83 # -19.39.el6.x86_64.rpm which gets munged to be /ty-cksm/ddb43838ad58 # d74dc95badef543cd96459b8bb37ff559339de58ec8dbbd1f18b/Packages/polic # ycoreutils-2.0.83-19.39.el6.x86_64.rpm args = req.path_info.split('/') # urlparse returns a ParseResult, index 2 is the path if re.search('^' + URI_PREFIX_KS_CHECKSUM, urlparse(self.rhnParent)[2]): # We *ONLY* locally cache RPMs for kickstarts if len(args) < 3 or args[2] != 'Packages': return None req_type = 'tinyurl' reqident = args[1] reqaction = 'getPackage' reqparams = [args[-1]] self.cachedClientInfo = UserDictCase() elif (len(args) > 3 and args[1] == 'dist'): # This is a kickstart request req_type = 'ks-dist' reqident, reqaction, reqparams = self._split_ks_url(req) self.cachedClientInfo = UserDictCase() else: # Some other type of request (req_type, reqident, reqaction, reqparams) = self._split_url(req) if req_type is None or (req_type not in ['$RHN', 'GET-REQ', 'tinyurl', 'ks-dist']): # not a traditional RHN GET (i.e., it is an arbitrary get) # XXX: there has to be a more elegant way to do this return None # kickstarts don't auth... if req_type in ['$RHN', 'GET-REQ']: # --- AUTH. CHECK: # Check client authentication. If not authenticated, throw # an exception. token = self.__getSessionToken() self.__checkAuthSessionTokenCache(token, reqident) # Is this channel local? for ch in self.authChannels: channel, _version, _isBaseChannel, isLocalChannel = ch[:4] if channel == reqident and str(isLocalChannel) == '1': # Local channel break else: # Not a local channel return None # --- LOCAL GET: localFlist = CFG.PROXY_LOCAL_FLIST or [] if reqaction not in localFlist: # Not an action we know how to handle return None # We have a match; we'll try to serve packages from the local # repository log_debug(3, "Retrieve from local repository.") log_debug(3, req_type, reqident, reqaction, reqparams) result = self.__callLocalRepository(req_type, reqident, reqaction, reqparams) if result is None: log_debug(3, "Not available locally; will try higher up the chain.") else: # Signal that we have to XMLRPC encode the response in apacheHandler rhnFlags.set("NeedEncoding", 1) return result
def add_hardware(self, hardware): """ add new hardware """ log_debug(4, hardware) if not hardware: return -1 if type(hardware) == type({}): hardware = UserDictCase(hardware) if not isinstance(hardware, UserDictCase): log_error("argument type is not hash: %s" % hardware) raise TypeError("This function requires a hash as an argument") # validation is important hw_class = hardware.get("class") if hw_class is None: return -1 hw_class = string.lower(hw_class) class_type = None if hw_class in [ "video", "audio", "audio_hd", "usb", "other", "hd", "floppy", "mouse", "modem", "network", "cdrom", "scsi", "unspec", "scanner", "tape", "capture", "raid", "socket", "keyboard", "printer", "firewire", "ide" ]: class_type = HardwareDevice elif hw_class == "cpu": class_type = CPUDevice elif hw_class == "memory": class_type = MemoryInformation elif hw_class == "dmi": class_type = DMIInformation elif hw_class == "installinfo": class_type = InstallInformation elif hw_class == "netinterfaces": class_type = NetIfaceInformation elif hw_class == "fqdn": class_type = FQDNInformation elif hw_class == "sysinfo": # special case: we got info about a virtual host # where this system is running on SystemInformation(hardware, self) return 0 elif hw_class == "machineinfo": MachineInformation(self.server["id"], self.server["name"], hardware) return 0 else: log_error("UNKNOWN CLASS TYPE `%s'" % hw_class) # Same trick: try-except and raise the exception so that Traceback # can send the e-mail try: raise KeyError("Unknown class type `%s' for hardware '%s'" % (hw_class, hardware)) except: Traceback(mail=1) return # create the new device new_dev = class_type(hardware) if class_type in self.__hardware: _l = self.__hardware[class_type] else: _l = self.__hardware[class_type] = [] _l.append(new_dev) self.__changed = 1 return 0
def __init__(self, uri, transport=None, encoding=None, verbose=0, proxy=None, username=None, password=None, refreshCallback=None, progressCallback=None, timeout=None): # establish a "logical" server connection # # First parse the proxy information if available # if proxy != None: (ph, pp, pu, pw) = get_proxy_info(proxy) if pp is not None: proxy = "%s:%s" % (ph, pp) else: proxy = ph # username and password will override whatever was passed in the # URL if pu is not None and username is None: username = pu if pw is not None and password is None: password = pw self._uri = sstr(uri) self._refreshCallback = None self._progressCallback = None self._bufferSize = None self._proxy = proxy self._username = username self._password = password self._timeout = timeout if len(__version__.split()) > 1: self.rpc_version = __version__.split()[1] else: self.rpc_version = __version__ self._reset_host_handler_and_type() if transport is None: self._allow_redirect = 1 transport = self.default_transport(self._type, proxy, username, password, timeout) else: # # dont allow redirect on unknow transports, that should be # set up independantly # self._allow_redirect = 0 self._redirected = None self.use_handler_path = 1 self._transport = transport self._trusted_cert_files = [] self._lang = None self._encoding = encoding self._verbose = verbose self.set_refresh_callback(refreshCallback) self.set_progress_callback(progressCallback) # referer, which redirect us to new handler self.send_handler=None self._headers = UserDictCase()
class Server: """uri [,options] -> a logical connection to an XML-RPC server uri is the connection point on the server, given as scheme://host/target. The standard implementation always supports the "http" scheme. If SSL socket support is available (Python 2.0), it also supports "https". If the target part and the slash preceding it are both omitted, "/RPC2" is assumed. The following options can be given as keyword arguments: transport: a transport factory encoding: the request encoding (default is UTF-8) verbose: verbosity level proxy: use an HTTP proxy username: username for authenticated HTTP proxy password: password for authenticated HTTP proxy All 8-bit strings passed to the server proxy are assumed to use the given encoding. """ # Default factories _transport_class = transports.Transport _transport_class_https = transports.SafeTransport _transport_class_proxy = transports.ProxyTransport _transport_class_https_proxy = transports.SafeProxyTransport def __init__(self, uri, transport=None, encoding=None, verbose=0, proxy=None, username=None, password=None, refreshCallback=None, progressCallback=None, timeout=None): # establish a "logical" server connection # # First parse the proxy information if available # if proxy != None: (ph, pp, pu, pw) = get_proxy_info(proxy) if pp is not None: proxy = "%s:%s" % (ph, pp) else: proxy = ph # username and password will override whatever was passed in the # URL if pu is not None and username is None: username = pu if pw is not None and password is None: password = pw self._uri = sstr(uri) self._refreshCallback = None self._progressCallback = None self._bufferSize = None self._proxy = proxy self._username = username self._password = password self._timeout = timeout if len(__version__.split()) > 1: self.rpc_version = __version__.split()[1] else: self.rpc_version = __version__ self._reset_host_handler_and_type() if transport is None: self._allow_redirect = 1 transport = self.default_transport(self._type, proxy, username, password, timeout) else: # # dont allow redirect on unknow transports, that should be # set up independantly # self._allow_redirect = 0 self._redirected = None self.use_handler_path = 1 self._transport = transport self._trusted_cert_files = [] self._lang = None self._encoding = encoding self._verbose = verbose self.set_refresh_callback(refreshCallback) self.set_progress_callback(progressCallback) # referer, which redirect us to new handler self.send_handler=None self._headers = UserDictCase() def default_transport(self, type, proxy=None, username=None, password=None, timeout=None): if proxy: if type == 'https': transport = self._transport_class_https_proxy(proxy, proxyUsername=username, proxyPassword=password, timeout=timeout) else: transport = self._transport_class_proxy(proxy, proxyUsername=username, proxyPassword=password, timeout=timeout) else: if type == 'https': transport = self._transport_class_https(timeout=timeout) else: transport = self._transport_class(timeout=timeout) return transport def allow_redirect(self, allow): self._allow_redirect = allow def redirected(self): if not self._allow_redirect: return None return self._redirected def set_refresh_callback(self, refreshCallback): self._refreshCallback = refreshCallback self._transport.set_refresh_callback(refreshCallback) def set_buffer_size(self, bufferSize): self._bufferSize = bufferSize self._transport.set_buffer_size(bufferSize) def set_progress_callback(self, progressCallback, bufferSize=16384): self._progressCallback = progressCallback self._transport.set_progress_callback(progressCallback, bufferSize) def _req_body(self, params, methodname): return xmlrpclib.dumps(params, methodname, encoding=self._encoding) def get_response_headers(self): if self._transport: return self._transport.headers_in return None def get_response_status(self): if self._transport: return self._transport.response_status return None def get_response_reason(self): if self._transport: return self._transport.response_reason return None def get_content_range(self): """Returns a dictionary with three values: length: the total length of the entity-body (can be None) first_byte_pos: the position of the first byte (zero based) last_byte_pos: the position of the last byte (zero based) The range is inclusive; that is, a response 8-9/102 means two bytes """ headers = self.get_response_headers() if not headers: return None content_range = headers.get('Content-Range') if not content_range: return None arr = filter(None, content_range.split()) assert arr[0] == "bytes" assert len(arr) == 2 arr = arr[1].split('/') assert len(arr) == 2 brange, total_len = arr if total_len == '*': # Per RFC, the server is allowed to use * if the length of the # entity-body is unknown or difficult to determine total_len = None else: total_len = int(total_len) start, end = brange.split('-') result = { 'length' : total_len, 'first_byte_pos' : int(start), 'last_byte_pos' : int(end), } return result def accept_ranges(self): headers = self.get_response_headers() if not headers: return None if 'Accept-Ranges' in headers: return headers['Accept-Ranges'] return None def _reset_host_handler_and_type(self): """ Reset the attributes: self._host, self._handler, self._type according the value of self._uri. """ # get the url type, uri = splittype(self._uri) if type is None: raise MalformedURIError("missing protocol in uri") # with a real uri passed in, uri will now contain "//hostname..." so we # need at least 3 chars for it to maybe be ok... if len(uri) < 3 or uri[0:2] != "//": raise MalformedURIError if type != None: self._type = type.lower() else: self._type = type if self._type not in ("http", "https"): raise IOError("unsupported XML-RPC protocol") self._host, self._handler = splithost(uri) if not self._handler: self._handler = "/RPC2" def _strip_characters(self, *args): """ Strip characters, which are not allowed according: http://www.w3.org/TR/2006/REC-xml-20060816/#charsets From spec: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */ """ regexp = r'[\x00-\x09]|[\x0b-\x0c]|[\x0e-\x1f]' result=[] for item in args: item_type = type(item) if item_type == StringType or item_type == UnicodeType: item = re.sub(regexp, '', sstr(item)) elif item_type == TupleType: item = tuple(self._strip_characters(i) for i in item) elif item_type == ListType: item = [self._strip_characters(i) for i in item] elif item_type == DictType or item_type == DictionaryType: item = dict([(self._strip_characters(name, val)) for name, val in item.items()]) # else: some object - should take care of himself # numbers - are safe result.append(item) if len(result) == 1: return result[0] else: return tuple(result) def _request(self, methodname, params): """ Call a method on the remote server we can handle redirections. """ # the loop is used to handle redirections redirect_response = 0 retry = 0 self._reset_host_handler_and_type() while 1: if retry >= MAX_REDIRECTIONS: raise InvalidRedirectionError( "Unable to fetch requested Package") # Clear the transport headers first self._transport.clear_headers() for k, v in self._headers.items(): self._transport.set_header(k, v) self._transport.add_header("X-Info", 'RPC Processor (C) Red Hat, Inc (version %s)' % self.rpc_version) # identify the capability set of this client to the server self._transport.set_header("X-Client-Version", 1) if self._allow_redirect: # Advertise that we follow redirects #changing the version from 1 to 2 to support backward compatibility self._transport.add_header("X-RHN-Transport-Capability", "follow-redirects=3") if redirect_response: self._transport.add_header('X-RHN-Redirect', '0') if self.send_handler: self._transport.add_header('X-RHN-Path', self.send_handler) request = self._req_body(self._strip_characters(params), methodname) try: response = self._transport.request(self._host, \ self._handler, request, verbose=self._verbose) save_response = self._transport.response_status except xmlrpclib.ProtocolError: if self.use_handler_path: raise else: save_response = sys.exc_info()[1].errcode self._redirected = None retry += 1 if save_response == 200: # exit redirects loop and return response break elif save_response not in (301, 302): # Retry pkg fetch self.use_handler_path = 1 continue # rest of loop is run only if we are redirected (301, 302) self._redirected = self._transport.redirected() self.use_handler_path = 0 redirect_response = 1 if not self._allow_redirect: raise InvalidRedirectionError("Redirects not allowed") if self._verbose: print("%s redirected to %s" % (self._uri, self._redirected)) typ, uri = splittype(self._redirected) if typ != None: typ = typ.lower() if typ not in ("http", "https"): raise InvalidRedirectionError( "Redirected to unsupported protocol %s" % typ) # # We forbid HTTPS -> HTTP for security reasons # Note that HTTP -> HTTPS -> HTTP is allowed (because we compare # the protocol for the redirect with the original one) # if self._type == "https" and typ == "http": raise InvalidRedirectionError( "HTTPS redirected to HTTP is not supported") self._host, self._handler = splithost(uri) if not self._handler: self._handler = "/RPC2" # Create a new transport for the redirected service and # set up the parameters on the new transport del self._transport self._transport = self.default_transport(typ, self._proxy, self._username, self._password, self._timeout) self.set_progress_callback(self._progressCallback) self.set_refresh_callback(self._refreshCallback) self.set_buffer_size(self._bufferSize) self.setlang(self._lang) if self._trusted_cert_files != [] and \ hasattr(self._transport, "add_trusted_cert"): for certfile in self._trusted_cert_files: self._transport.add_trusted_cert(certfile) # Then restart the loop to try the new entry point. if isinstance(response, transports.File): # Just return the file return response # an XML-RPC encoded data structure if isinstance(response, TupleType) and len(response) == 1: response = response[0] return response def __repr__(self): return ( "<%s for %s%s>" % (self.__class__.__name__, self._host, self._handler) ) __str__ = __repr__ def __getattr__(self, name): # magic method dispatcher return _Method(self._request, name) # note: to call a remote object with an non-standard name, use # result getattr(server, "strange-python-name")(args) def set_transport_flags(self, transfer=0, encoding=0, **kwargs): if not self._transport: # Nothing to do return kwargs.update({ 'transfer' : transfer, 'encoding' : encoding, }) self._transport.set_transport_flags(**kwargs) def get_transport_flags(self): if not self._transport: # Nothing to do return {} return self._transport.get_transport_flags() def reset_transport_flags(self): # Does nothing pass # Allow user-defined additional headers. def set_header(self, name, arg): if type(arg) in [ type([]), type(()) ]: # Multivalued header self._headers[name] = [str(a) for a in arg] else: self._headers[name] = str(arg) def add_header(self, name, arg): if name in self._headers: vlist = self._headers[name] if not isinstance(vlist, ListType): vlist = [ vlist ] else: vlist = self._headers[name] = [] vlist.append(str(arg)) # Sets the i18n options def setlang(self, lang): self._lang = lang if self._transport and hasattr(self._transport, "setlang"): self._transport.setlang(lang) # Sets the CA chain to be used def use_CA_chain(self, ca_chain = None): raise NotImplementedError("This method is deprecated") def add_trusted_cert(self, certfile): self._trusted_cert_files.append(certfile) if self._transport and hasattr(self._transport, "add_trusted_cert"): self._transport.add_trusted_cert(certfile) def close(self): if self._transport: self._transport.close() self._transport = None
class NonAuthenticatedDumper(rhnHandler, dumper.XML_Dumper): # pylint: disable=E1101,W0102,W0613,R0902,R0904 def __init__(self, req): rhnHandler.__init__(self) dumper.XML_Dumper.__init__(self) self.headers_out = UserDictCase() self._raw_stream = req self._raw_stream.content_type = 'application/octet-stream' self.compress_level = 0 # State machine self._headers_sent = 0 self._is_closed = 0 self._compressed_stream = None self.functions = [ 'arches', 'arches_extra', 'channel_families', 'channels', 'get_comps', 'get_modules', 'channel_packages_short', 'packages_short', 'packages', 'source_packages', 'errata', 'blacklist_obsoletes', 'product_names', 'get_rpm', 'kickstartable_trees', 'get_ks_file', 'orgs', ] self.system_id = None self._channel_family_query_template = """ select cfm.channel_family_id, 0 quantity from rhnChannelFamilyMembers cfm, rhnChannel c, rhnChannelFamily cf where cfm.channel_id = c.id and c.label in (%s) and cfm.channel_family_id = cf.id and cf.label != 'rh-public' and (cf.org_id in (%s) or cf.org_id is null) union select id channel_family_id, NULL quantity from rhnChannelFamily where label = 'rh-public' """ self._channel_family_query_public = """ select id channel_family_id, 0 quantity from rhnChannelFamily where org_id in (%s) or org_id is null """ self._channel_family_query = None def _send_headers(self, error=0, init_compressed_stream=1): log_debug(4, "is_closed", self._is_closed) if self._is_closed: raise Exception("Trying to write to a closed connection") if self._headers_sent: return self._headers_sent = 1 if self.compress_level: self.headers_out['Content-Encoding'] = 'gzip' # Send the headers if error: # No compression self.compress_level = 0 self._raw_stream.content_type = 'text/xml' for h, v in self.headers_out.items(): self._raw_stream.headers_out[h] = str(v) self._raw_stream.send_http_header() # If need be, start gzipping if self.compress_level and init_compressed_stream: log_debug(4, "Compressing with factor %s" % self.compress_level) self._compressed_stream = gzip.GzipFile(None, "wb", self.compress_level, self._raw_stream) def send(self, data): log_debug(3, "Sending %d bytes" % len(data)) try: self._send_headers() if self._compressed_stream: log_debug(4, "Sending through a compressed stream") self._compressed_stream.write(data) else: self._raw_stream.write(data) except IOError: log_error("Client appears to have closed connection") self.close() raise_with_tb(dumper.ClosedConnectionError, sys.exc_info()[2]) log_debug(5, "Bytes sent", len(data)) write = send def close(self): log_debug(2, "Closing") if self._is_closed: log_debug(3, "Already closed") return if self._compressed_stream: log_debug(5, "Closing a compressed stream") try: self._compressed_stream.close() except IOError: e = sys.exc_info()[1] # Remote end has closed connection already log_error("Error closing the stream", str(e)) self._compressed_stream = None self._is_closed = 1 log_debug(3, "Closed") def set_channel_family_query(self, channel_labels=[]): if not channel_labels: # All null-pwned channel families self._channel_family_query = self._channel_family_query_public % self.exportable_orgs return self self._channel_family_query = self._channel_family_query_template % ( ', '.join(["'%s'" % x for x in channel_labels]), self.exportable_orgs) return self def _get_channel_data(self, channels): writer = ContainerWriter() d = ChannelsDumper(writer, params=list(channels.values())) d.dump() data = writer.get_data() # We don't care about <rhn-channels> here channel_data = self._cleanse_channels(data[2]) return channel_data def _cleanse_channels(channels_dom): channels = {} for dummy, attributes, child_elements in channels_dom: channel_label = attributes['label'] channels[channel_label] = channel_entry = {} packages = attributes['packages'].split() del attributes['packages'] # Get dir of the prefix prefix = "rhn-package-" prefix_len = len(prefix) packages = [int(x[prefix_len:]) for x in packages] channel_entry['packages'] = packages ks_trees = attributes['kickstartable-trees'].split() channel_entry['ks_trees'] = ks_trees # Clean up to reduce memory footprint if possible attributes.clear() # tag name to object prefix maps = { 'source-packages': ('source_packages', 'rhn-source-package-'), 'rhn-channel-errata': ('errata', 'rhn-erratum-'), } # Now look for package sources for tag_name, dummy, celem in child_elements: if tag_name not in maps: continue field, prefix = maps[tag_name] prefix_len = len(prefix) # Hmm. x[1] is the attributes hash; we fetch the id and we get # rid of te prefix, then we run that through int() objects = [] for dummy, ceattr, dummy in celem: obj_id = ceattr['id'] obj_id = int(obj_id[prefix_len:]) last_modified = localtime(ceattr['last-modified']) objects.append((obj_id, last_modified)) channel_entry[field] = objects # Clean up to reduce memory footprint if possible del child_elements[:] return channels _cleanse_channels = staticmethod(_cleanse_channels) # Dumper functions here def dump_channel_families(self): log_debug(2) h = self.get_channel_families_statement() h.execute() writer = self._get_xml_writer() d = dumper.SatelliteDumper(writer, exportLib.ChannelFamiliesDumper(writer, data_iterator=h, null_max_members=0,),) d.dump() writer.flush() log_debug(4, "OK") self.close() return 0 def dump_channels(self, channel_labels=None): log_debug(2) channels = self._validate_channels(channel_labels=channel_labels) writer = self._get_xml_writer() d = dumper.SatelliteDumper(writer, dumper.ChannelsDumperEx(writer, params=list(channels.values()))) d.dump() writer.flush() log_debug(4, "OK") self.close() return 0 def dump_channel_packages_short(self, channel_label, last_modified): return dumper.XML_Dumper.dump_channel_packages_short( self, channel_label, last_modified, filepath=None, validate_channels=True, send_headers=True, open_stream=False) def _packages(self, packages, prefix, dump_class, sources=0): return dumper.XML_Dumper._packages(self, packages, prefix, dump_class, sources, verify_packages=True) def dump_errata(self, errata): return dumper.XML_Dumper.dump_errata(self, errata, verify_errata=True) def dump_kickstartable_trees(self, kickstart_labels=None): return dumper.XML_Dumper.dump_kickstartable_trees(self, kickstart_labels, validate_kickstarts=True) def dump_product_names(self): return dumper.XML_Dumper.dump_product_names(self) def arches(self): return self.dump_arches(rpm_arch_type_only=1) def arches_extra(self): return self.dump_server_group_type_server_arches(rpm_arch_type_only=1) def blacklist_obsoletes(self): return self.dump_blacklist_obsoletes() def product_names(self): return self.dump_product_names() def channel_families(self, channel_labels=[]): self.set_channel_family_query() return self.dump_channel_families() def channels(self, channel_labels, flags={}): if not channel_labels: channel_labels = [] self.set_channel_family_query(channel_labels=channel_labels) return self.dump_channels(channel_labels=channel_labels) def get_comps(self, channel): return self.get_repomd_file(channel, 1) def get_modules(self, channel): return self.get_repomd_file(channel, 2) def channel_packages_short(self, channel_label, last_modified): self.set_channel_family_query(channel_labels=[channel_label]) return self.dump_channel_packages_short(channel_label, last_modified) def packages(self, packages=[]): self.set_channel_family_query() return self.dump_packages(packages=packages) def packages_short(self, packages=[]): self.set_channel_family_query() return self.dump_packages_short(packages=packages) def source_packages(self, packages=[]): self.set_channel_family_query() return self.dump_source_packages(packages=packages) def errata(self, errata=[]): self.set_channel_family_query() return self.dump_errata(errata=errata) def orgs(self): return self.dump_orgs() def kickstartable_trees(self, kickstart_labels=[]): self.set_channel_family_query() return self.dump_kickstartable_trees(kickstart_labels=kickstart_labels) def get_rpm(self, package, channel): log_debug(1, package, channel) return self._send_package_stream(package, channel) def get_repomd_file(self, channel, comps_type_id): comps_query = """ select relative_filename from rhnChannelComps where channel_id = ( select id from rhnChannel where label = :channel_label and comps_type_id = :ctype_id ) order by id desc """ channel_comps_sth = rhnSQL.prepare(comps_query) channel_comps_sth.execute(channel_label=channel, ctype_id=comps_type_id) row = channel_comps_sth.fetchone_dict() if not row: raise rhnFault(3015, "No comps/modules file for channel [%s]" % channel) path = os.path.join(CFG.MOUNT_POINT, row['relative_filename']) if not os.path.exists(path): log_error("Missing comps/modules file [%s] for channel [%s]" % (path, channel)) raise rhnFault(3016, "Unable to retrieve comps/modules file for channel [%s]" % channel) return self._send_stream(path) def get_ks_file(self, ks_label, relative_path): log_debug(1, ks_label, relative_path) h = rhnSQL.prepare(""" select base_path from rhnKickstartableTree where label = :ks_label and org_id is null """) h.execute(ks_label=ks_label) row = h.fetchone_dict() if not row: raise rhnFault(3003, "No such file %s in tree %s" % (relative_path, ks_label)) path = os.path.join(CFG.MOUNT_POINT, row['base_path'], relative_path) if not os.path.exists(path): log_error("Missing file for satellite dumper: %s" % path) raise rhnFault(3007, "Unable to retrieve file %s in tree %s" % (relative_path, ks_label)) return self._send_stream(path) # Sends a package over the wire # prefix is whatever we prepend to the package id (rhn-package- or # rhn-source-package-) def _send_package_stream(self, package, channel): log_debug(3, package, channel) path, dummy = self.get_package_path_by_filename(package, channel) log_debug(3, "Package path", path) if not os.path.exists(path): log_error("Missing package (satellite dumper): %s" % path) raise rhnFault(3007, "Unable to retrieve package %s" % package) return self._send_stream(path) # This query is similar to the one aove, except that we have already # authorized this channel (so no need for server_id) _query_get_package_path_by_nvra = rhnSQL.Statement(""" select distinct p.id, p.path from rhnPackage p, rhnChannelPackage cp, rhnChannel c, rhnPackageArch pa where c.label = :channel and cp.channel_id = c.id and cp.package_id = p.id and p.name_id = LOOKUP_PACKAGE_NAME(:name) and p.evr_id = LOOKUP_EVR(:epoch, :version, :release) and p.package_arch_id = pa.id and pa.label = :arch """) def get_package_path_by_filename(self, fileName, channel): log_debug(3, fileName, channel) fileName = str(fileName) n, e, v, r, a = rhnLib.parseRPMFilename(fileName) h = rhnSQL.prepare(self._query_get_package_path_by_nvra) h.execute(name=n, version=v, release=r, epoch=e, arch=a, channel=channel) try: return _get_path_from_cursor(h) except InvalidPackageError: log_debug(4, "Error", "Non-existent package requested", fileName) raise_with_tb(rhnFault(17, _("Invalid RPM package %s requested") % fileName), sys.exc_info()[2]) except NullPathPackageError: e = sys.exc_info()[1] package_id = e[0] log_error("Package path null for package id", package_id) raise_with_tb(rhnFault(17, _("Invalid RPM package %s requested") % fileName), sys.exc_info()[2]) except MissingPackageError: e = sys.exc_info()[1] filePath = e[0] log_error("Package not found", filePath) raise_with_tb(rhnFault(17, _("Package not found")), sys.exc_info()[2]) # Opens the file and sends the stream def _send_stream(self, path): try: stream = open(path) except IOError: e = sys.exc_info()[1] if e.errno == 2: raise_with_tb(rhnFault(3007, "Missing file %s" % path), sys.exc_info()[2]) # Let it flow so we can find it later raise stream.seek(0, 2) file_size = stream.tell() stream.seek(0, 0) log_debug(3, "Package size", file_size) self.headers_out['Content-Length'] = file_size self.compress_level = 0 self._raw_stream.content_type = 'application/x-rpm' self._send_headers() self.send_rpm(stream) return 0 def send_rpm(self, stream): buffer_size = 65536 while 1: buf = stream.read(buffer_size) if not buf: break try: self._raw_stream.write(buf) except IOError: # client closed the connection? log_error("Client appears to have closed connection") self.close_rpm() raise_with_tb(dumper.ClosedConnectionError, sys.exc_info()[2]) self.close_rpm() def close_rpm(self): self._is_closed = 1 def _respond_xmlrpc(self, data): # Marshal s = xmlrpclib.dumps((data, )) self.headers_out['Content-Length'] = len(s) self._raw_stream.content_type = 'text/xml' for h, v in self.headers_out.items(): self._raw_stream.headers_out[h] = str(v) self._raw_stream.send_http_header() self._raw_stream.write(s) return 0
class ServerWrapper(Packages, Hardware, History, SuseData): """ This is a middle class that ties all the subclasses together, plus it provides a cleaner way to keep all the wrapper functions in one place. The main Server class is based on this one and it looks a little bit cleaner that way. """ def __init__(self): self.server = UserDictCase() Packages.__init__(self) History.__init__(self) Hardware.__init__(self) SuseData.__init__(self) def __repr__(self): return "<%s instance>" % (self.__class__, ) def set_value(self, name, value): """ update a value in self.server """ if name is None or value is None: return -1 self.server[name] = value return 0 ### # PACKAGES ### def add_package(self, entry): """ Wrappers for the similar functions from Packages class that supplementaly require a valid sysid. """ return Packages.add_package(self, self.server.get("id"), entry) def delete_package(self, entry): return Packages.delete_package(self, self.server.get("id"), entry) def dispose_packages(self): return Packages.dispose_packages(self, self.server["id"]) def save_packages(self, schedule=1): """ wrapper for the Packages.save_packages_byid() which requires the sysid """ ret = self.save_packages_byid(self.server["id"], schedule=schedule) # this function is primarily called from outside # so we have to commit here rhnSQL.commit() return ret ### # HARDWARE ### def delete_hardware(self): """ Wrappers for the similar functions from Hardware class """ return Hardware.delete_hardware(self, self.server.get("id")) def save_hardware(self): """ wrapper for the Hardware.save_hardware_byid() which requires the sysid """ ret = self.save_hardware_byid(self.server["id"]) # this function is primarily called from outside # so we have to commit here rhnSQL.commit() return ret def reload_hardware(self): """ wrapper for the Hardware.reload_hardware_byid() which requires the sysid """ ret = self.reload_hardware_byid(self.server["id"]) return ret ### # HISTORY ### def save_history(self): ret = self.save_history_byid(self.server["id"]) # this function is primarily called from outside # so we have to commit here rhnSQL.commit() return ret ### ### SUSE PRODUCT DATA ### def save_suse_products(self): ret = self.save_suse_products_byid(self.server["id"]) rhnSQL.commit() return ret def update_suse_products(self, products): self.add_suse_products(products) return self.save_suse_products()
def __init__(self): self.server = UserDictCase() Packages.__init__(self) History.__init__(self) Hardware.__init__(self) SuseData.__init__(self)
def __checkAuthSessionTokenCache(self, token, channel): """ Authentication / authorize the channel """ log_debug(2, token, channel) # make sure server-id does not contain path self.clientServerId = token['X-RHN-Server-ID'].split("/")[-1] cachedToken = self.proxyAuth.get_client_token(self.clientServerId) if not cachedToken: # maybe client logged in through different load-balanced proxy # try to update the cache an try again cachedToken = self.proxyAuth.update_client_token_if_valid( self.clientServerId, token) if not cachedToken: msg = _("Invalid session key - server ID not found in cache: %s") \ % self.clientServerId log_error(msg) raise rhnFault(33, msg) self.cachedClientInfo = UserDictCase(cachedToken) clockSkew = self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"] del self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"] # Add the server id self.authChannels = self.cachedClientInfo['X-RHN-Auth-Channels'] del self.cachedClientInfo['X-RHN-Auth-Channels'] self.cachedClientInfo['X-RHN-Server-ID'] = self.clientServerId log_debug(4, 'Retrieved token from cache: %s' % self.cachedClientInfo) # Compare the two things if not _dictEquals(token, self.cachedClientInfo, ['X-RHN-Auth-Channels']): # Maybe the client logged in through a different load-balanced # proxy? Check validity of the token the client passed us. updatedToken = self.proxyAuth.update_client_token_if_valid( self.clientServerId, token) # fix up the updated token the same way we did above if updatedToken: self.cachedClientInfo = UserDictCase(updatedToken) clockSkew = self.cachedClientInfo[ "X-RHN-Auth-Proxy-Clock-Skew"] del self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"] self.authChannels = self.cachedClientInfo[ 'X-RHN-Auth-Channels'] del self.cachedClientInfo['X-RHN-Auth-Channels'] self.cachedClientInfo['X-RHN-Server-ID'] = \ self.clientServerId log_debug(4, 'Retrieved token from cache: %s' % self.cachedClientInfo) if not updatedToken or not _dictEquals( token, self.cachedClientInfo, ['X-RHN-Auth-Channels']): log_debug(3, "Session tokens different") raise rhnFault(33) # Invalid session key # Check the expiration serverTime = float(token['X-RHN-Auth-Server-Time']) offset = float(token['X-RHN-Auth-Expire-Offset']) if time.time() > serverTime + offset + clockSkew: log_debug(3, "Session token has expired") raise rhnFault(34) # Session key has expired # Only autherized channels are the ones stored in the cache. authChannels = [x[0] for x in self.authChannels] log_debug(4, "Auth channels: '%s'" % authChannels) # Check the authorization if channel not in authChannels: log_debug(4, "Not subscribed to channel %s; unauthorized" % channel) raise rhnFault(35, _('Unauthorized channel access requested.'))
class Transport(xmlrpclib.Transport): user_agent = "rhn.rpclib.py/%s" % __version__ def __init__(self, transfer=0, encoding=0, refreshCallback=None, progressCallback=None, use_datetime=None, timeout=None): self._use_builtin_types = False self._transport_flags = {'transfer' : 0, 'encoding' : 0} self.set_transport_flags(transfer=transfer, encoding=encoding) self._headers = UserDictCase() self.verbose = 0 self.connection = None self.method = "POST" self._lang = None self.refreshCallback = refreshCallback self.progressCallback = progressCallback self.bufferSize = 16384 self.headers_in = None self.response_status = None self.response_reason = None self._redirected = None self._use_datetime = use_datetime self.timeout = timeout # set the progress callback def set_progress_callback(self, progressCallback, bufferSize=16384): self.progressCallback = progressCallback self.bufferSize = bufferSize # set the refresh callback def set_refresh_callback(self, refreshCallback): self.refreshCallback = refreshCallback # set the buffer size # The bigger this is, the faster the read is, but the more seldom is the # progress callback called def set_buffer_size(self, bufferSize): if bufferSize is None: # No buffer size specified; go with 16k bufferSize = 16384 self.bufferSize = bufferSize # set the request method def set_method(self, method): if method not in ("GET", "POST"): raise IOError("Unknown request method %s" % method) self.method = method # reset the transport options def set_transport_flags(self, transfer=None, encoding=None, **kwargs): # For backwards compatibility, we keep transfer and encoding as # positional parameters (they could come in as kwargs easily) self._transport_flags.update(kwargs) if transfer is not None: self._transport_flags['transfer'] = transfer if encoding is not None: self._transport_flags['encoding'] = encoding self.validate_transport_flags() def get_transport_flags(self): return self._transport_flags.copy() def validate_transport_flags(self): # Transfer and encoding are guaranteed to be there transfer = self._transport_flags.get('transfer') transfer = lookupTransfer(transfer, strict=1) self._transport_flags['transfer'] = transfer encoding = self._transport_flags.get('encoding') encoding = lookupEncoding(encoding, strict=1) self._transport_flags['encoding'] = encoding # Add arbitrary additional headers. def set_header(self, name, arg): if type(arg) in [ type([]), type(()) ]: # Multivalued header self._headers[name] = [str(a) for a in arg] else: self._headers[name] = str(arg) def add_header(self, name, arg): if name in self._headers: vlist = self._headers[name] if not isinstance(vlist, ListType): vlist = [ vlist ] else: vlist = self._headers[name] = [] vlist.append(str(arg)) def clear_headers(self): self._headers.clear() def get_connection(self, host): if self.verbose: print("Connecting via http to %s" % (host, )) if self.timeout: return connections.HTTPConnection(host, timeout=self.timeout) else: return connections.HTTPConnection(host) def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request # XXX: automatically compute how to send depending on how much data # you want to send # XXX Deal with HTTP/1.1 if necessary self.verbose = verbose # implement BASIC HTTP AUTHENTICATION host, extra_headers, x509 = self.get_host_info(host) if not extra_headers: extra_headers = [] # Establish the connection connection = self.get_connection(host) # Setting the user agent. Only interesting for SSL tunnels, in any # other case the general headers are good enough. connection.set_user_agent(self.user_agent) if self.verbose: connection.set_debuglevel(self.verbose - 1) # Get the output object to push data with req = Output(connection=connection, method=self.method) req.set_transport_flags(**self._transport_flags) # Add the extra headers req.set_header('User-Agent', self.user_agent) for header, value in list(self._headers.items()) + extra_headers: # Output.set_header correctly deals with multivalued headers now req.set_header(header, value) # Content-Type req.set_header("Content-Type", "text/xml") req.process(request_body) # Host and Content-Length are set by HTTP*Connection for h in ['Content-Length', 'Host']: req.clear_header(h) headers, fd = req.send_http(host, handler) if self.verbose: print("Incoming headers:") for header, value in headers.items(): print("\t%s : %s" % (header, value)) if fd.status in (301, 302): self._redirected = headers["Location"] self.response_status = fd.status return None # Save the headers self.headers_in = headers self.response_status = fd.status self.response_reason = fd.reason return self._process_response(fd, connection) def _process_response(self, fd, connection): # Now use the Input class in case we get an enhanced response resp = Input(self.headers_in, progressCallback=self.progressCallback, bufferSize=self.bufferSize) fd = resp.decode(fd) if isinstance(fd, InputStream): # When the File object goes out of scope, so will the InputStream; # that will eventually call the connection's close() method and # cleanly reap it f = File(fd.fd, fd.length, fd.name, bufferSize=self.bufferSize, progressCallback=self.progressCallback) # Set the File's close method to the connection's # Note that calling the HTTPResponse's close() is not enough, # since the main socket would remain open, and this is # particularily bad with SSL f.close = connection.close return f # We can safely close the connection now; if we had an # application/octet/stream (for which Input.read passes the original # socket object), Input.decode would return an InputStream, # so we wouldn't reach this point connection.close() return self.parse_response(fd) # Give back the new URL if redirected def redirected(self): return self._redirected # Rewrite parse_response to provide refresh callbacks def parse_response(self, f): # read response from input file, and parse it p, u = self.getparser() while 1: response = f.read(1024) if not response: break if self.refreshCallback: self.refreshCallback() if self.verbose: print("body:", repr(response)) p.feed(response) f.close() p.close() return u.close() def setlang(self, lang): self._lang = lang
class NonAuthenticatedDumper(rhnHandler, dumper.XML_Dumper): # pylint: disable=E1101,W0102,W0613,R0902,R0904 def __init__(self, req): rhnHandler.__init__(self) dumper.XML_Dumper.__init__(self) self.headers_out = UserDictCase() self._raw_stream = req self._raw_stream.content_type = 'application/octet-stream' self.compress_level = 0 # State machine self._headers_sent = 0 self._is_closed = 0 self._compressed_stream = None self.functions = [ 'arches', 'arches_extra', 'channel_families', 'channels', 'get_comps', 'channel_packages_short', 'packages_short', 'packages', 'source_packages', 'errata', 'blacklist_obsoletes', 'product_names', 'get_rpm', 'kickstartable_trees', 'get_ks_file', 'orgs', ] self.system_id = None self._channel_family_query_template = """ select cfm.channel_family_id, 0 quantity from rhnChannelFamilyMembers cfm, rhnChannel c, rhnChannelFamily cf where cfm.channel_id = c.id and c.label in (%s) and cfm.channel_family_id = cf.id and cf.label != 'rh-public' and (cf.org_id in (%s) or cf.org_id is null) union select id channel_family_id, NULL quantity from rhnChannelFamily where label = 'rh-public' """ self._channel_family_query_public = """ select id channel_family_id, 0 quantity from rhnChannelFamily where org_id in (%s) or org_id is null """ self._channel_family_query = None def _send_headers(self, error=0, init_compressed_stream=1): log_debug(4, "is_closed", self._is_closed) if self._is_closed: raise Exception("Trying to write to a closed connection") if self._headers_sent: return self._headers_sent = 1 if self.compress_level: self.headers_out['Content-Encoding'] = 'gzip' # Send the headers if error: # No compression self.compress_level = 0 self._raw_stream.content_type = 'text/xml' for h, v in self.headers_out.items(): self._raw_stream.headers_out[h] = str(v) self._raw_stream.send_http_header() # If need be, start gzipping if self.compress_level and init_compressed_stream: log_debug(4, "Compressing with factor %s" % self.compress_level) self._compressed_stream = gzip.GzipFile(None, "wb", self.compress_level, self._raw_stream) def send(self, data): log_debug(3, "Sending %d bytes" % len(data)) try: self._send_headers() if self._compressed_stream: log_debug(4, "Sending through a compressed stream") self._compressed_stream.write(data) else: self._raw_stream.write(data) except IOError: log_error("Client appears to have closed connection") self.close() raise_with_tb(dumper.ClosedConnectionError, sys.exc_info()[2]) log_debug(5, "Bytes sent", len(data)) write = send def close(self): log_debug(2, "Closing") if self._is_closed: log_debug(3, "Already closed") return if self._compressed_stream: log_debug(5, "Closing a compressed stream") try: self._compressed_stream.close() except IOError: e = sys.exc_info()[1] # Remote end has closed connection already log_error("Error closing the stream", str(e)) self._compressed_stream = None self._is_closed = 1 log_debug(3, "Closed") def set_channel_family_query(self, channel_labels=[]): if not channel_labels: # All null-pwned channel families self._channel_family_query = self._channel_family_query_public % self.exportable_orgs return self self._channel_family_query = self._channel_family_query_template % ( ', '.join(["'%s'" % x for x in channel_labels]), self.exportable_orgs) return self def _get_channel_data(self, channels): writer = ContainerWriter() d = ChannelsDumper(writer, params=list(channels.values())) d.dump() data = writer.get_data() # We don't care about <rhn-channels> here channel_data = self._cleanse_channels(data[2]) return channel_data def _cleanse_channels(channels_dom): channels = {} for dummy, attributes, child_elements in channels_dom: channel_label = attributes['label'] channels[channel_label] = channel_entry = {} packages = attributes['packages'].split() del attributes['packages'] # Get dir of the prefix prefix = "rhn-package-" prefix_len = len(prefix) packages = [int(x[prefix_len:]) for x in packages] channel_entry['packages'] = packages ks_trees = attributes['kickstartable-trees'].split() channel_entry['ks_trees'] = ks_trees # Clean up to reduce memory footprint if possible attributes.clear() # tag name to object prefix maps = { 'source-packages': ('source_packages', 'rhn-source-package-'), 'rhn-channel-errata': ('errata', 'rhn-erratum-'), } # Now look for package sources for tag_name, dummy, celem in child_elements: if tag_name not in maps: continue field, prefix = maps[tag_name] prefix_len = len(prefix) # Hmm. x[1] is the attributes hash; we fetch the id and we get # rid of te prefix, then we run that through int() objects = [] for dummy, ceattr, dummy in celem: obj_id = ceattr['id'] obj_id = int(obj_id[prefix_len:]) last_modified = localtime(ceattr['last-modified']) objects.append((obj_id, last_modified)) channel_entry[field] = objects # Clean up to reduce memory footprint if possible del child_elements[:] return channels _cleanse_channels = staticmethod(_cleanse_channels) # Dumper functions here def dump_channel_families(self): log_debug(2) h = self.get_channel_families_statement() h.execute() writer = self._get_xml_writer() d = dumper.SatelliteDumper(writer, exportLib.ChannelFamiliesDumper(writer, data_iterator=h, null_max_members=0,),) d.dump() writer.flush() log_debug(4, "OK") self.close() return 0 def dump_channels(self, channel_labels=None): log_debug(2) channels = self._validate_channels(channel_labels=channel_labels) writer = self._get_xml_writer() d = dumper.SatelliteDumper(writer, dumper.ChannelsDumperEx(writer, params=list(channels.values()))) d.dump() writer.flush() log_debug(4, "OK") self.close() return 0 def dump_channel_packages_short(self, channel_label, last_modified): return dumper.XML_Dumper.dump_channel_packages_short( self, channel_label, last_modified, filepath=None, validate_channels=True, send_headers=True, open_stream=False) def _packages(self, packages, prefix, dump_class, sources=0): return dumper.XML_Dumper._packages(self, packages, prefix, dump_class, sources, verify_packages=True) def dump_errata(self, errata): return dumper.XML_Dumper.dump_errata(self, errata, verify_errata=True) def dump_kickstartable_trees(self, kickstart_labels=None): return dumper.XML_Dumper.dump_kickstartable_trees(self, kickstart_labels, validate_kickstarts=True) def dump_product_names(self): log_debug(4) writer = self._get_xml_writer() d = dumper.SatelliteDumper(writer, exportLib.ProductNamesDumper(writer)) d.dump() writer.flush() self.close() return 0 def arches(self): return self.dump_arches(rpm_arch_type_only=1) def arches_extra(self): return self.dump_server_group_type_server_arches(rpm_arch_type_only=1) def blacklist_obsoletes(self): return self.dump_blacklist_obsoletes() def product_names(self): return self.dump_product_names() def channel_families(self, channel_labels=[]): self.set_channel_family_query() return self.dump_channel_families() def channels(self, channel_labels, flags={}): if not channel_labels: channel_labels = [] self.set_channel_family_query(channel_labels=channel_labels) return self.dump_channels(channel_labels=channel_labels) def get_comps(self, channel): return self.get_comps_file(channel) def channel_packages_short(self, channel_label, last_modified): self.set_channel_family_query(channel_labels=[channel_label]) return self.dump_channel_packages_short(channel_label, last_modified) def packages(self, packages=[]): self.set_channel_family_query() return self.dump_packages(packages=packages) def packages_short(self, packages=[]): self.set_channel_family_query() return self.dump_packages_short(packages=packages) def source_packages(self, packages=[]): self.set_channel_family_query() return self.dump_source_packages(packages=packages) def errata(self, errata=[]): self.set_channel_family_query() return self.dump_errata(errata=errata) def orgs(self): return self.dump_orgs() def kickstartable_trees(self, kickstart_labels=[]): self.set_channel_family_query() return self.dump_kickstartable_trees(kickstart_labels=kickstart_labels) def get_rpm(self, package, channel): log_debug(1, package, channel) return self._send_package_stream(package, channel) def get_comps_file(self, channel): comps_query = """ select relative_filename from rhnChannelComps where channel_id = ( select id from rhnChannel where label = :channel_label ) order by id desc """ channel_comps_sth = rhnSQL.prepare(comps_query) channel_comps_sth.execute(channel_label=channel) row = channel_comps_sth.fetchone_dict() if not row: raise rhnFault(3015, "No comps file for channel [%s]" % channel) path = os.path.join(CFG.MOUNT_POINT, row['relative_filename']) if not os.path.exists(path): log_error("Missing comps file [%s] for channel [%s]" % (path, channel)) raise rhnFault(3016, "Unable to retrieve comps file for channel [%s]" % channel) return self._send_stream(path) def get_ks_file(self, ks_label, relative_path): log_debug(1, ks_label, relative_path) h = rhnSQL.prepare(""" select base_path from rhnKickstartableTree where label = :ks_label and org_id is null """) h.execute(ks_label=ks_label) row = h.fetchone_dict() if not row: raise rhnFault(3003, "No such file %s in tree %s" % (relative_path, ks_label)) path = os.path.join(CFG.MOUNT_POINT, row['base_path'], relative_path) if not os.path.exists(path): log_error("Missing file for satellite dumper: %s" % path) raise rhnFault(3007, "Unable to retrieve file %s in tree %s" % (relative_path, ks_label)) return self._send_stream(path) # Sends a package over the wire # prefix is whatever we prepend to the package id (rhn-package- or # rhn-source-package-) def _send_package_stream(self, package, channel): log_debug(3, package, channel) path, dummy = self.get_package_path_by_filename(package, channel) log_debug(3, "Package path", path) if not os.path.exists(path): log_error("Missing package (satellite dumper): %s" % path) raise rhnFault(3007, "Unable to retrieve package %s" % package) return self._send_stream(path) # This query is similar to the one aove, except that we have already # authorized this channel (so no need for server_id) _query_get_package_path_by_nvra = rhnSQL.Statement(""" select distinct p.id, p.path from rhnPackage p, rhnChannelPackage cp, rhnChannel c, rhnPackageArch pa where c.label = :channel and cp.channel_id = c.id and cp.package_id = p.id and p.name_id = LOOKUP_PACKAGE_NAME(:name) and p.evr_id = LOOKUP_EVR(:epoch, :version, :release) and p.package_arch_id = pa.id and pa.label = :arch """) def get_package_path_by_filename(self, fileName, channel): log_debug(3, fileName, channel) fileName = str(fileName) n, e, v, r, a = rhnLib.parseRPMFilename(fileName) h = rhnSQL.prepare(self._query_get_package_path_by_nvra) h.execute(name=n, version=v, release=r, epoch=e, arch=a, channel=channel) try: return _get_path_from_cursor(h) except InvalidPackageError: log_debug(4, "Error", "Non-existent package requested", fileName) raise_with_tb(rhnFault(17, _("Invalid RPM package %s requested") % fileName), sys.exc_info()[2]) except NullPathPackageError: e = sys.exc_info()[1] package_id = e[0] log_error("Package path null for package id", package_id) raise_with_tb(rhnFault(17, _("Invalid RPM package %s requested") % fileName), sys.exc_info()[2]) except MissingPackageError: e = sys.exc_info()[1] filePath = e[0] log_error("Package not found", filePath) raise_with_tb(rhnFault(17, _("Package not found")), sys.exc_info()[2]) # Opens the file and sends the stream def _send_stream(self, path): try: stream = open(path) except IOError: e = sys.exc_info()[1] if e.errno == 2: raise_with_tb(rhnFault(3007, "Missing file %s" % path), sys.exc_info()[2]) # Let it flow so we can find it later raise stream.seek(0, 2) file_size = stream.tell() stream.seek(0, 0) log_debug(3, "Package size", file_size) self.headers_out['Content-Length'] = file_size self.compress_level = 0 self._raw_stream.content_type = 'application/x-rpm' self._send_headers() self.send_rpm(stream) return 0 def send_rpm(self, stream): buffer_size = 65536 while 1: buf = stream.read(buffer_size) if not buf: break try: self._raw_stream.write(buf) except IOError: # client closed the connection? log_error("Client appears to have closed connection") self.close_rpm() raise_with_tb(dumper.ClosedConnectionError, sys.exc_info()[2]) self.close_rpm() def close_rpm(self): self._is_closed = 1 def _respond_xmlrpc(self, data): # Marshal s = xmlrpclib.dumps((data, )) self.headers_out['Content-Length'] = len(s) self._raw_stream.content_type = 'text/xml' for h, v in self.headers_out.items(): self._raw_stream.headers_out[h] = str(v) self._raw_stream.send_http_header() self._raw_stream.write(s) return 0
class NonAuthenticatedDumper(rhnHandler, dumper.XML_Dumper): # pylint: disable=E1101,W0102,W0613,R0902,R0904 def __init__(self, req): rhnHandler.__init__(self) dumper.XML_Dumper.__init__(self) self.headers_out = UserDictCase() self._raw_stream = req self._raw_stream.content_type = 'application/octet-stream' self.compress_level = 0 # State machine self._headers_sent = 0 self._is_closed = 0 self._compressed_stream = None self.functions = [ 'arches', 'arches_extra', 'channel_families', 'channels', 'get_comps', 'channel_packages_short', 'packages_short', 'packages', 'source_packages', 'errata', 'blacklist_obsoletes', 'product_names', 'get_rpm', 'kickstartable_trees', 'get_ks_file', 'orgs', ] self.system_id = None self._channel_family_query_template = """ select cfm.channel_family_id, 0 quantity from rhnChannelFamilyMembers cfm, rhnChannel c, rhnChannelFamily cf where cfm.channel_id = c.id and c.label in (%s) and cfm.channel_family_id = cf.id and cf.label != 'rh-public' and (cf.org_id in (%s) or cf.org_id is null) union select id channel_family_id, NULL quantity from rhnChannelFamily where label = 'rh-public' """ self._channel_family_query_public = """ select id channel_family_id, 0 quantity from rhnChannelFamily where org_id in (%s) or org_id is null """ self._channel_family_query = None def _send_headers(self, error=0, init_compressed_stream=1): log_debug(4, "is_closed", self._is_closed) if self._is_closed: raise Exception, "Trying to write to a closed connection" if self._headers_sent: return self._headers_sent = 1 if self.compress_level: self.headers_out['Content-Encoding'] = 'gzip' # Send the headers if error: # No compression self.compress_level = 0 self._raw_stream.content_type = 'text/xml' for h, v in self.headers_out.items(): self._raw_stream.headers_out[h] = str(v) self._raw_stream.send_http_header() # If need be, start gzipping if self.compress_level and init_compressed_stream: log_debug(4, "Compressing with factor %s" % self.compress_level) self._compressed_stream = gzip.GzipFile(None, "wb", self.compress_level, self._raw_stream) def send(self, data): log_debug(3, "Sending %d bytes" % len(data)) try: self._send_headers() if self._compressed_stream: log_debug(4, "Sending through a compressed stream") self._compressed_stream.write(data) else: self._raw_stream.write(data) except IOError: log_error("Client appears to have closed connection") self.close() raise dumper.ClosedConnectionError, None, sys.exc_info()[2] log_debug(5, "Bytes sent", len(data)) write = send def close(self): log_debug(2, "Closing") if self._is_closed: log_debug(3, "Already closed") return if self._compressed_stream: log_debug(5, "Closing a compressed stream") try: self._compressed_stream.close() except IOError, e: # Remote end has closed connection already log_error("Error closing the stream", str(e)) self._compressed_stream = None self._is_closed = 1 log_debug(3, "Closed")
def __init__(self, req): rhnHandler.__init__(self) dumper.XML_Dumper.__init__(self) self.headers_out = UserDictCase() self._raw_stream = req self._raw_stream.content_type = 'application/octet-stream' self.compress_level = 0 # State machine self._headers_sent = 0 self._is_closed = 0 self._compressed_stream = None self.functions = [ 'arches', 'arches_extra', 'channel_families', 'channels', 'get_comps', 'get_modules', 'channel_packages_short', 'packages_short', 'packages', 'source_packages', 'errata', 'blacklist_obsoletes', 'product_names', 'get_rpm', 'kickstartable_trees', 'get_ks_file', 'orgs', 'support_information', 'suse_products', 'suse_product_channels', 'suse_upgrade_paths', 'suse_product_extensions', 'suse_product_repositories', 'scc_repositories', 'suse_subscriptions', 'cloned_channels', ] self.system_id = None self._channel_family_query_template = """ select cfm.channel_family_id, 0 quantity from rhnChannelFamilyMembers cfm, rhnChannel c, rhnChannelFamily cf where cfm.channel_id = c.id and c.label in (%s) and cfm.channel_family_id = cf.id and cf.label != 'rh-public' and (cf.org_id in (%s) or cf.org_id is null) union select id channel_family_id, NULL quantity from rhnChannelFamily where label = 'rh-public' """ self._channel_family_query_public = """ select id channel_family_id, 0 quantity from rhnChannelFamily where org_id in (%s) or org_id is null """ self._channel_family_query = None
def __init__(self): self.server = UserDictCase() Packages.__init__(self) History.__init__(self) Hardware.__init__(self)
class Server: """uri [,options] -> a logical connection to an XML-RPC server uri is the connection point on the server, given as scheme://host/target. The standard implementation always supports the "http" scheme. If SSL socket support is available (Python 2.0), it also supports "https". If the target part and the slash preceding it are both omitted, "/RPC2" is assumed. The following options can be given as keyword arguments: transport: a transport factory encoding: the request encoding (default is UTF-8) verbose: verbosity level proxy: use an HTTP proxy username: username for authenticated HTTP proxy password: password for authenticated HTTP proxy All 8-bit strings passed to the server proxy are assumed to use the given encoding. """ # Default factories _transport_class = transports.Transport _transport_class_https = transports.SafeTransport _transport_class_proxy = transports.ProxyTransport _transport_class_https_proxy = transports.SafeProxyTransport def __init__(self, uri, transport=None, encoding=None, verbose=0, proxy=None, username=None, password=None, refreshCallback=None, progressCallback=None, timeout=None): # establish a "logical" server connection # # First parse the proxy information if available # if proxy != None: (ph, pp, pu, pw) = get_proxy_info(proxy) if pp is not None: proxy = "%s:%s" % (ph, pp) else: proxy = ph # username and password will override whatever was passed in the # URL if pu is not None and username is None: username = pu if pw is not None and password is None: password = pw self._uri = sstr(uri) self._refreshCallback = None self._progressCallback = None self._bufferSize = None self._proxy = proxy self._username = username self._password = password self._timeout = timeout if len(__version__.split()) > 1: self.rpc_version = __version__.split()[1] else: self.rpc_version = __version__ self._reset_host_handler_and_type() if transport is None: self._allow_redirect = 1 transport = self.default_transport(self._type, proxy, username, password, timeout) else: # # dont allow redirect on unknow transports, that should be # set up independantly # self._allow_redirect = 0 self._redirected = None self.use_handler_path = 1 self._transport = transport self._trusted_cert_files = [] self._lang = None self._encoding = encoding self._verbose = verbose self.set_refresh_callback(refreshCallback) self.set_progress_callback(progressCallback) # referer, which redirect us to new handler self.send_handler = None self._headers = UserDictCase() def default_transport(self, type, proxy=None, username=None, password=None, timeout=None): if proxy: if type == 'https': transport = self._transport_class_https_proxy( proxy, proxyUsername=username, proxyPassword=password, timeout=timeout) else: transport = self._transport_class_proxy(proxy, proxyUsername=username, proxyPassword=password, timeout=timeout) else: if type == 'https': transport = self._transport_class_https(timeout=timeout) else: transport = self._transport_class(timeout=timeout) return transport def allow_redirect(self, allow): self._allow_redirect = allow def redirected(self): if not self._allow_redirect: return None return self._redirected def set_refresh_callback(self, refreshCallback): self._refreshCallback = refreshCallback self._transport.set_refresh_callback(refreshCallback) def set_buffer_size(self, bufferSize): self._bufferSize = bufferSize self._transport.set_buffer_size(bufferSize) def set_progress_callback(self, progressCallback, bufferSize=16384): self._progressCallback = progressCallback self._transport.set_progress_callback(progressCallback, bufferSize) def _req_body(self, params, methodname): return xmlrpclib.dumps(params, methodname, encoding=self._encoding) def get_response_headers(self): if self._transport: return self._transport.headers_in return None def get_response_status(self): if self._transport: return self._transport.response_status return None def get_response_reason(self): if self._transport: return self._transport.response_reason return None def get_content_range(self): """Returns a dictionary with three values: length: the total length of the entity-body (can be None) first_byte_pos: the position of the first byte (zero based) last_byte_pos: the position of the last byte (zero based) The range is inclusive; that is, a response 8-9/102 means two bytes """ headers = self.get_response_headers() if not headers: return None content_range = headers.get('Content-Range') if not content_range: return None arr = filter(None, content_range.split()) assert arr[0] == "bytes" assert len(arr) == 2 arr = arr[1].split('/') assert len(arr) == 2 brange, total_len = arr if total_len == '*': # Per RFC, the server is allowed to use * if the length of the # entity-body is unknown or difficult to determine total_len = None else: total_len = int(total_len) start, end = brange.split('-') result = { 'length': total_len, 'first_byte_pos': int(start), 'last_byte_pos': int(end), } return result def accept_ranges(self): headers = self.get_response_headers() if not headers: return None if 'Accept-Ranges' in headers: return headers['Accept-Ranges'] return None def _reset_host_handler_and_type(self): """ Reset the attributes: self._host, self._handler, self._type according the value of self._uri. """ # get the url type, uri = splittype(self._uri) if type is None: raise MalformedURIError("missing protocol in uri") # with a real uri passed in, uri will now contain "//hostname..." so we # need at least 3 chars for it to maybe be ok... if len(uri) < 3 or uri[0:2] != "//": raise MalformedURIError self._type = type.lower() if self._type not in ("http", "https"): raise IOError("unsupported XML-RPC protocol") self._host, self._handler = splithost(uri) if not self._handler: self._handler = "/RPC2" def _strip_characters(self, *args): """ Strip characters, which are not allowed according: http://www.w3.org/TR/2006/REC-xml-20060816/#charsets From spec: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */ """ regexp = r'[\x00-\x09]|[\x0b-\x0c]|[\x0e-\x1f]' result = [] for item in args: item_type = type(item) if item_type == StringType or item_type == UnicodeType: item = re.sub(regexp, '', sstr(item)) elif item_type == TupleType: item = tuple(self._strip_characters(i) for i in item) elif item_type == ListType: item = [self._strip_characters(i) for i in item] elif item_type == DictType or item_type == DictionaryType: item = dict([(self._strip_characters(name, val)) for name, val in item.items()]) # else: some object - should take care of himself # numbers - are safe result.append(item) if len(result) == 1: return result[0] else: return tuple(result) def _request(self, methodname, params): """ Call a method on the remote server we can handle redirections. """ # the loop is used to handle redirections redirect_response = 0 retry = 0 self._reset_host_handler_and_type() while 1: if retry >= MAX_REDIRECTIONS: raise InvalidRedirectionError( "Unable to fetch requested Package") # Clear the transport headers first self._transport.clear_headers() for k, v in self._headers.items(): self._transport.set_header(k, v) self._transport.add_header( "X-Info", 'RPC Processor (C) Red Hat, Inc (version %s)' % self.rpc_version) # identify the capability set of this client to the server self._transport.set_header("X-Client-Version", 1) if self._allow_redirect: # Advertise that we follow redirects #changing the version from 1 to 2 to support backward compatibility self._transport.add_header("X-RHN-Transport-Capability", "follow-redirects=3") if redirect_response: self._transport.add_header('X-RHN-Redirect', '0') if self.send_handler: self._transport.add_header('X-RHN-Path', self.send_handler) request = self._req_body(self._strip_characters(params), methodname) try: response = self._transport.request(self._host, \ self._handler, request, verbose=self._verbose) save_response = self._transport.response_status except xmlrpclib.ProtocolError: if self.use_handler_path: raise else: save_response = sys.exc_info()[1].errcode self._redirected = None retry += 1 if save_response == 200: # exit redirects loop and return response break elif save_response not in (301, 302): # Retry pkg fetch self.use_handler_path = 1 continue # rest of loop is run only if we are redirected (301, 302) self._redirected = self._transport.redirected() self.use_handler_path = 0 redirect_response = 1 if not self._allow_redirect: raise InvalidRedirectionError("Redirects not allowed") if self._verbose: print("%s redirected to %s" % (self._uri, self._redirected)) typ, uri = splittype(self._redirected) if typ != None: typ = typ.lower() if typ not in ("http", "https"): raise InvalidRedirectionError( "Redirected to unsupported protocol %s" % typ) # # We forbid HTTPS -> HTTP for security reasons # Note that HTTP -> HTTPS -> HTTP is allowed (because we compare # the protocol for the redirect with the original one) # if self._type == "https" and typ == "http": raise InvalidRedirectionError( "HTTPS redirected to HTTP is not supported") self._host, self._handler = splithost(uri) if not self._handler: self._handler = "/RPC2" # Create a new transport for the redirected service and # set up the parameters on the new transport del self._transport self._transport = self.default_transport(typ, self._proxy, self._username, self._password, self._timeout) self.set_progress_callback(self._progressCallback) self.set_refresh_callback(self._refreshCallback) self.set_buffer_size(self._bufferSize) self.setlang(self._lang) if self._trusted_cert_files != [] and \ hasattr(self._transport, "add_trusted_cert"): for certfile in self._trusted_cert_files: self._transport.add_trusted_cert(certfile) # Then restart the loop to try the new entry point. if isinstance(response, transports.File): # Just return the file return response # an XML-RPC encoded data structure if isinstance(response, TupleType) and len(response) == 1: response = response[0] return response def __repr__(self): return ("<%s for %s%s>" % (self.__class__.__name__, self._host, self._handler)) __str__ = __repr__ def __getattr__(self, name): # magic method dispatcher return _Method(self._request, name) # note: to call a remote object with an non-standard name, use # result getattr(server, "strange-python-name")(args) def set_transport_flags(self, transfer=0, encoding=0, **kwargs): if not self._transport: # Nothing to do return kwargs.update({ 'transfer': transfer, 'encoding': encoding, }) self._transport.set_transport_flags(**kwargs) def get_transport_flags(self): if not self._transport: # Nothing to do return {} return self._transport.get_transport_flags() def reset_transport_flags(self): # Does nothing pass # Allow user-defined additional headers. def set_header(self, name, arg): if type(arg) in [type([]), type(())]: # Multivalued header self._headers[name] = [str(a) for a in arg] else: self._headers[name] = str(arg) def add_header(self, name, arg): if name in self._headers: vlist = self._headers[name] if not isinstance(vlist, ListType): vlist = [vlist] else: vlist = self._headers[name] = [] vlist.append(str(arg)) # Sets the i18n options def setlang(self, lang): self._lang = lang if self._transport and hasattr(self._transport, "setlang"): self._transport.setlang(lang) # Sets the CA chain to be used def use_CA_chain(self, ca_chain=None): raise NotImplementedError("This method is deprecated") def add_trusted_cert(self, certfile): self._trusted_cert_files.append(certfile) if self._transport and hasattr(self._transport, "add_trusted_cert"): self._transport.add_trusted_cert(certfile) def close(self): if self._transport: self._transport.close() self._transport = None
def _querySatelliteForChecksum(self, req): """ Sends a HEAD request to the satellite for the purpose of obtaining the checksum for the requested resource. A (status, checksum) tuple is returned. If status is not apache.OK, checksum will be None. If status is OK, and a checksum is not returned, the old BZ 158236 behavior will be used. """ scheme = SCHEME_HTTP if req.server.port == 443: scheme = SCHEME_HTTPS log_debug(6, "Using scheme: %s" % scheme) # Initiate a HEAD request to the satellite to retrieve the MD5 sum. # Actually, we make the request through our own proxy first, so # that we don't accidentally bypass necessary authentication # routines. Since it's a HEAD request, the proxy will forward it # directly to the satellite like it would a POST request. host = "127.0.0.1" port = req.connection.local_addr[1] connection = self._createConnection(host, port, scheme) if not connection: # Couldn't form the connection. Log an error and revert to the # old BZ 158236 behavior. In order to be as robust as possible, # we won't fail here. log_error('HEAD req - Could not create connection to %s://%s:%s' % (scheme, host, str(port))) return (apache.OK, None) # We obtained the connection successfully. Construct the URL that # we'll connect to. pingURL = "%s://%s:%s%s" % (scheme, host, str(port), req.uri) log_debug(6, "Ping URI: %s" % pingURL) hdrs = UserDictCase() for k in req.headers_in.keys(): if k.lower() != 'range': # we want checksum of whole file hdrs[k] = req.headers_in[k] log_debug(9, "Using existing headers_in", hdrs) connection.request("HEAD", pingURL, None, hdrs) log_debug(6, "Connection made, awaiting response.") # Get the response. response = connection.getresponse() log_debug(6, "Received response status: %s" % response.status) connection.close() if (response.status != apache.HTTP_OK) and ( response.status != apache.HTTP_PARTIAL_CONTENT): # Something bad happened. Return back back to the client. log_debug( 1, "HEAD req - Received error code in reponse: %s" % (str(response.status))) return (response.status, None) # The request was successful. Dig the MD5 checksum out of the headers. responseHdrs = response.msg if not responseHdrs: # No headers?! This shouldn't happen at all. But if it does, # revert to the old # BZ 158236 behavior. log_error("HEAD response - No HTTP headers!") return (apache.OK, None) if not responseHdrs.has_key(HEADER_CHECKSUM): # No checksum was provided. This could happen if a newer # proxy is talking to an older satellite. To keep things # running smoothly, we'll just revert to the BZ 158236 # behavior. log_debug(1, "HEAD response - No X-RHN-Checksum field provided!") return (apache.OK, None) checksum = responseHdrs[HEADER_CHECKSUM] return (apache.OK, checksum)
class Device(GenericDevice): """ This is the base Device class that supports instantiation from a dictionarry. the __init__ takes the dictionary as its argument, together with a list of valid fields to recognize and with a mapping for dictionary keys into valid field names for self.data The fields are required to know what fields we have in the table. The mapping allows transformation from whatever comes in to valid fields in the table Looks complicated but it isn't -- gafton """ def __init__(self, fields, dict=None, mapping=None): GenericDevice.__init__(self) x = {} for k in fields: x[k] = None self.data = UserDictCase(x) if not dict: return # make sure we get a UserDictCase to work with if type(dict) == type({}): dict = UserDictCase(dict) if mapping is None or type(mapping) == type({}): mapping = UserDictCase(mapping) if not isinstance(dict, UserDictCase) or \ not isinstance(mapping, UserDictCase): log_error("Argument passed is not a dictionary", dict, mapping) raise TypeError("Argument passed is not a dictionary", dict, mapping) # make sure we have a platform for k in list(dict.keys()): if dict[k] == '': dict[k] = None if self.data.has_key(k): self.data[k] = dict[k] continue if mapping.has_key(k): # the mapping dict might tell us to lose some fields if mapping[k] is not None: self.data[mapping[k]] = dict[k] else: log_error("Unknown HW key =`%s'" % k, dict.dict(), mapping.dict()) # The try-except is added just so that we can send e-mails try: raise KeyError("Don't know how to parse key `%s''" % k, dict.dict()) except: Traceback(mail=1) # Ignore this key continue # clean up this data try: for k in list(self.data.keys()): if type(self.data[k]) == type("") and len(self.data[k]): self.data[k] = self.data[k].strip() if not len(self.data[k]): continue if self.data[k][0] == '"' and self.data[k][-1] == '"': self.data[k] = self.data[k][1:-1] except IndexError: raise_with_tb( IndexError("Can not process data = %s, key = %s" % (repr(self.data), k)), sys.exc_info()[2])
def set_info(self, name, value): """ set a certain value for the userinfo field. This is BUTT ugly. """ log_debug(3, name, value) # translation from what the client send us to real names of the fields # in the tables. mapping = { "first_name": "first_names", "position": "title", "title": "prefix" } if not name: return -1 name = name.lower() if type(value) == type(""): value = value.strip() # We have to watch over carefully for different field names # being sent from rhn_register changed = 0 # translation if name in mapping.keys(): name = mapping[name] # Some fields can not have null string values if name in ["first_names", "last_name", "prefix", # personal_info "address1", "city", "country"]: # site_info # we require something of it if len(str(value)) == 0: return -1 # fields in personal_info (and some in site) if name in ["last_name", "first_names", "company", "phone", "fax", "email", "title"]: self.info[name] = value[:128] changed = 1 elif name == "prefix": values = ["Mr.", "Mrs.", "Ms.", "Dr.", "Hr.", "Sr.", " "] # Now populate a dictinary of valid values valids = UserDictCase() for v in values: # initialize from good values, with and w/o the dot valids[v] = v valids[v[:-1]] = v # commonly encountered values valids["Miss"] = "Miss" valids["Herr"] = "Hr." valids["Sig."] = "Sr." valids["Sir"] = "Mr." # Now check it out if valids.has_key(value): self.info["prefix"] = valids[value] changed = 1 else: log_error("Unknown prefix value `%s'. Assumed `Mr.' instead" % value) self.info["prefix"] = "Mr." changed = 1 # fields in site if name in ["phone", "fax", "zip"]: self.site[name] = value[:32] changed = 1 elif name in ["city", "country", "alt_first_names", "alt_last_name", "address1", "address2", "email", "last_name", "first_names"]: if name == "last_name": self.site["alt_last_name"] = value changed = 1 elif name == "first_names": self.site["alt_first_names"] = value changed = 1 else: self.site[name] = value[:128] changed = 1 elif name in ["state"]: # stupid people put stupid things in here too self.site[name] = value[:60] changed = 1 if not changed: log_error("SET_INFO: Unknown info `%s' = `%s'" % (name, value)) return 0
def set_info(self, name, value): """ set a certain value for the userinfo field. This is BUTT ugly. """ log_debug(3, name, value) # translation from what the client send us to real names of the fields # in the tables. mapping = { "first_name": "first_names", "position": "title", "title": "prefix" } if not name: return -1 name = name.lower() if type(value) == type(""): value = value.strip() # We have to watch over carefully for different field names # being sent from rhn_register changed = 0 # translation if name in mapping.keys(): name = mapping[name] # Some fields can not have null string values if name in [ "first_names", "last_name", "prefix", # personal_info "address1", "city", "country" ]: # site_info # we require something of it if len(str(value)) == 0: return -1 # fields in personal_info (and some in site) if name in [ "last_name", "first_names", "company", "phone", "fax", "email", "title" ]: self.info[name] = value[:128] changed = 1 elif name == "prefix": values = ["Mr.", "Mrs.", "Ms.", "Dr.", "Hr.", "Sr.", " "] # Now populate a dictinary of valid values valids = UserDictCase() for v in values: # initialize from good values, with and w/o the dot valids[v] = v valids[v[:-1]] = v # commonly encountered values valids["Miss"] = "Miss" valids["Herr"] = "Hr." valids["Sig."] = "Sr." valids["Sir"] = "Mr." # Now check it out if valids.has_key(value): self.info["prefix"] = valids[value] changed = 1 else: log_error("Unknown prefix value `%s'. Assumed `Mr.' instead" % value) self.info["prefix"] = "Mr." changed = 1 # fields in site if name in ["phone", "fax", "zip"]: self.site[name] = value[:32] changed = 1 elif name in [ "city", "country", "alt_first_names", "alt_last_name", "address1", "address2", "email", "last_name", "first_names" ]: if name == "last_name": self.site["alt_last_name"] = value changed = 1 elif name == "first_names": self.site["alt_first_names"] = value changed = 1 else: self.site[name] = value[:128] changed = 1 elif name in ["state"]: # stupid people put stupid things in here too self.site[name] = value[:60] changed = 1 if not changed: log_error("SET_INFO: Unknown info `%s' = `%s'" % (name, value)) return 0
#start_init = time.time() self.filesuploaded = False self.options = rhnConfig.initCFG( 'server' ) print self.options mytime = time.time() self.test_username = username or ("test_username_%.3f" % mytime) self.test_password = password or ("test_password_%.3f" % mytime) self.test_email = email or ("%s@test_domain.com" % self.test_username) self.channel_arch = 'unittestarch' self.roles = ['org_admin'] rhnFlags.set( 'outputTransportOptions', UserDictCase() ) self._init_db() self._init_org() self._init_user(self.roles) self._init_server() self._init_channels() self._init_up2date() #Sets up database connection def _init_db( self ): rhnSQL.initDB() #creates an org def _init_org( self ): self.org_id, self.org_name, self.org_password = misc_functions.create_new_org()