def read_all_fqdns(): def flatten_dict(d): def items(): for key, value in d.items(): if isinstance(value, dict): for subkey, subvalue in flatten_dict(value).items(): yield key + "." + subkey, subvalue else: yield key, value return dict(items()) fqdns = set() ret = {} ret["class"] = "FQDN" interfaces = flatten_dict(read_network_interfaces()) ips = [] for ipv4 in list(value for key, value in interfaces.iteritems() if key.endswith('ipaddr') and not key.startswith('lo')): if ipv4: ips.append(ipv4) for ipv6 in list(value for key, value in interfaces.iteritems() if key.endswith('ipv6') and not key.startswith('lo')): if ipv6: ips.append(ipv6[0]['addr']) for ip in ips: try: fqdns.add(socket.gethostbyaddr(ip)[0]) except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e: up2dateLog.initLog().log_me("Error resolving address: %s\n" % (e)) ret["name"] = list(fqdns) return ret
def writeCachedLogin(): """ Pickle loginInfo to a file Returns: True -- wrote loginInfo to a pickle file False -- did _not_ write loginInfo to a pickle file """ log = up2dateLog.initLog() log.log_debug("writeCachedLogin() invoked") if not loginInfo: log.log_debug("writeCachedLogin() loginInfo is None, so bailing.") return False data = {'time': time.time(), 'loginInfo': loginInfo} pcklDir = os.path.dirname(pcklAuthFileName) if not os.access(pcklDir, os.W_OK): try: os.mkdir(pcklDir) os.chmod(pcklDir, int('0700', 8)) except: log.log_me("Unable to write pickled loginInfo to %s" % pcklDir) return False pcklAuth = open(pcklAuthFileName, 'wb') os.chmod(pcklAuthFileName, int('0600', 8)) pickle.dump(data, pcklAuth) pcklAuth.close() expireTime = data['time'] + float(loginInfo['X-RHN-Auth-Expire-Offset']) log.log_debug("Wrote pickled loginInfo at ", data['time'], " with expiration of ", expireTime, " seconds.") return True
def _initialize_dmi_data(): """ Initialize _dmi_data unless it already exist and returns it """ global _dmi_data, _dmi_not_available if _dmi_data is None: if _dmi_not_available: # do not try to initialize it again and again if not available return None else : dmixml = dmidecode.dmidecodeXML() dmixml.SetResultType(dmidecode.DMIXML_DOC) # Get all the DMI data and prepare a XPath context try: data = dmixml.QuerySection('all') dmi_warn = dmi_warnings() if dmi_warn: dmidecode.clear_warnings() log = up2dateLog.initLog() log.log_debug("dmidecode warnings: %s" % dmi_warn) except: # DMI decode FAIL, this can happend e.g in PV guest _dmi_not_available = 1 dmi_warn = dmi_warnings() if dmi_warn: dmidecode.clear_warnings() return None _dmi_data = data.xpathNewContext() return _dmi_data
def _getOSVersionAndRelease(): osVersionRelease = None ts = transaction.initReadOnlyTransaction() for h in ts.dbMatch('Providename', "oraclelinux-release"): SYSRELVER = 'system-release(releasever)' version = sstr(h['version']) release = sstr(h['release']) if SYSRELVER in (sstr(provide) for provide in h['providename']): provides = dict((sstr(n), sstr(v)) for n,v in zip(h['providename'], h['provideversion'])) release = '%s-%s' % (version, release) version = provides[SYSRELVER] osVersionRelease = (sstr(h['name']), version, release) return osVersionRelease else: for h in ts.dbMatch('Providename', "redhat-release"): SYSRELVER = 'system-release(releasever)' version = sstr(h['version']) release = sstr(h['release']) if SYSRELVER in (sstr(provide) for provide in h['providename']): provides = dict((sstr(n), sstr(v)) for n,v in zip(h['providename'], h['provideversion'])) release = '%s-%s' % (version, release) version = provides[SYSRELVER] osVersionRelease = (sstr(h['name']), version, release) return osVersionRelease else: # new SUSE always has a baseproduct link which point to the # product file of the first installed product (the OS) # all rpms containing a product must provide "product()" # search now for the package providing the base product baseproduct = '/etc/products.d/baseproduct' if os.path.exists(baseproduct): bp = os.path.abspath(os.path.join(os.path.dirname(baseproduct), os.readlink(baseproduct))) for h in ts.dbMatch('Providename', "product()"): if bstr(bp) in h['filenames']: osVersionRelease = (sstr(h['name']), sstr(h['version']), sstr(h['release'])) # zypper requires a exclusive lock on the rpmdb. So we need # to close it here. ts.ts.closeDB() return osVersionRelease else: # for older SUSE versions we need to search for distribution-release # package which also has /etc/SuSE-release file for h in ts.dbMatch('Providename', "distribution-release"): osVersionRelease = (sstr(h['name']), sstr(h['version']), sstr(h['release'])) if bstr('/etc/SuSE-release') in h['filenames']: # zypper requires a exclusive lock on the rpmdb. So we need # to close it here. ts.ts.closeDB() return osVersionRelease log = up2dateLog.initLog() log.log_me("Error: Could not determine what version of Linux you are running. "\ "Check if the product is installed correctly. Aborting.") raise up2dateErrors.RpmError( "Could not determine what version of Linux you "\ "are running.\nIf you get this error, try running \n\n"\ "\t\trpm --rebuilddb\n\n")
def updateLoginInfo(timeout=None): log = up2dateLog.initLog() log.log_me("updateLoginInfo() login info") # NOTE: login() updates the loginInfo object login(forceUpdate=True, timeout=timeout) if not loginInfo: raise up2dateErrors.AuthenticationError("Unable to authenticate") return loginInfo
def updatePackageProfile(timeout=None): """ get a list of installed packages and send it to rhnServer """ log = up2dateLog.initLog() log.log_me("Updating package profile") packages = pkgUtils.getInstalledPackageList(getArch=1) s = rhnserver.RhnServer(timeout=timeout) if not s.capabilities.hasCapability('xmlrpc.packages.extended_profile', 2): # for older satellites and hosted - convert to old format packages = convertPackagesFromHashToList(packages) s.registration.update_packages(up2dateAuth.getSystemId(), packages)
def exceptionHandler(type, value, tb): log = up2dateLog.initLog() sys.stderr.write(utf8_encode(_("An error has occurred:") + "\n")) if hasattr(value, "errmsg"): sys.stderr.write(utf8_encode(value.errmsg) + "\n") log.log_exception(type, value, tb) else: sys.stderr.write(utf8_encode(str(type) + "\n")) log.log_exception(type, value, tb) sys.stderr.write(utf8_encode(_("See /var/log/up2date for more information") + "\n"))
def exceptionHandler(type, value, tb): log = up2dateLog.initLog() sys.stderr.write(sstr(_("An error has occurred:") + "\n")) if hasattr(value, "errmsg"): sys.stderr.write(sstr(value.errmsg) + "\n") log.log_exception(type, value, tb) else: sys.stderr.write(sstr(str(type) + "\n")) log.log_exception(type, value, tb) sys.stderr.write(sstr(_("See /var/log/up2date for more information") + "\n"))
def __exceptionHandler(type, value, tb): log = up2dateLog.initLog() print _("An error has occurred:") if hasattr(value, "errmsg"): print value.errmsg log.log_exception(type, value, tb) else: print type log.log_exception(type, value, tb) print _("See /var/log/up2date for more information")
def _request1(self, methodname, params): self.log = up2dateLog.initLog() while 1: try: ret = self._request(methodname, params) except rpclib.InvalidRedirectionError: raise except xmlrpclib.Fault: raise except httplib.BadStatusLine: self.log.log_me("Error: Server Unavailable. Please try later.") stdoutMsgCallback( _("Error: Server Unavailable. Please try later.")) sys.exit(-1) except: server = self.serverList.next() if server == None: # since just because we failed, the server list could # change (aka, firstboot, they get an option to reset the # the server configuration) so reset the serverList self.serverList.resetServerIndex() raise msg = "An error occurred talking to %s:\n" % self._host msg = msg + "%s\n%s\n" % (sys.exc_info()[0], sys.exc_info()[1]) msg = msg + "Trying the next serverURL: %s\n" % self.serverList.server( ) self.log.log_me(msg) # try a different url # use the next serverURL parse_res = urlparse.urlsplit(self.serverList.server()) typ = parse_res[0] # scheme self._host = parse_res[1] # netloc self._handler = parse_res[2] # path typ = typ.lower() if typ not in ("http", "https"): raise_with_tb( rpclib.InvalidRedirectionError( "Redirected to unsupported protocol %s" % typ)) self._orig_handler = self._handler self._type = typ self._uri = self.serverList.server() if not self._handler: self._handler = "/RPC2" self._allow_redirect = 1 continue # if we get this far, we succedded break return ret
def _testRhnLogin(self): try: up2dateAuth.updateLoginInfo() return True except up2dateErrors.ServerCapabilityError: print sys.exc_info()[1] return False except up2dateErrors.AuthenticationError: return False except up2dateErrors.RhnServerException: log = up2dateLog.initLog() log.log_me('There was a RhnServerException while testing login:\n') log.log_exception(*sys.exc_info()) return False
def _testRhnLogin(self): try: up2dateAuth.updateLoginInfo() return True except up2dateErrors.ServerCapabilityError: print(sys.exc_info()[1]) return False except up2dateErrors.AuthenticationError: return False except up2dateErrors.RhnServerException: log = up2dateLog.initLog() log.log_me('There was a RhnServerException while testing login:\n') log.log_exception(*sys.exc_info()) return False
def _get_dmi_data_from_sysfs(path): """ Get the DMI data from sysfs """ dmi_dir = "/sys/devices/virtual/dmi/id" # mapping between DMI XML path and the corresponding filename in the DMI in sysfs xmlpath_to_filename = { "/dmidecode/SystemInfo/SystemUUID[not(@unavailable='1')]": "product_uuid", "/dmidecode/SystemInfo/SystemUUID": "product_uuid", "/dmidecode/BIOSinfo/Vendor": "bios_vendor", "/dmidecode/BIOSinfo/Version": "bios_version", "/dmidecode/BIOSinfo/ReleaseDate": "bios_date", "/dmidecode/SystemInfo/ProductName": "product_name", "/dmidecode/SystemInfo/Version": "product_version", "/dmidecode/SystemInfo/SerialNumber": "product_serial", "/dmidecode/BaseBoardInfo/Manufacturer": "board_vendor", "/dmidecode/BaseBoardInfo/SerialNumber": "board_serial", "/dmidecode/ChassisInfo/SerialNumber": "chassis_serial", "/dmidecode/ChassisInfo/AssetTag": "chassis_asset_tag", "/dmidecode/SystemInfo/Manufacturer": "sys_vendor", "/dmidecode/SystemInfo/SKUnumber": "product_sku", "/dmidecode/SystemInfo/Family": "product_family" } try: dmi_path = os.path.join(dmi_dir, xmlpath_to_filename[path]) with open(dmi_path) as dmi_file: return dmi_file.read().strip() except KeyError: log = up2dateLog.initLog() log.log_debug("No mapping for DMI XML path: %s\n" % path) except Exception as e: log = up2dateLog.initLog() log.log_debug("Cannot read DMI value from sysfs: %s. Exception: %s\n" % (path, e)) return ""
def read_all_fqdns(): def flatten_dict(d): def items(): for key, value in d.items(): if isinstance(value, dict): for subkey, subvalue in flatten_dict(value).items(): yield key + "." + subkey, subvalue else: yield key, value return dict(items()) fqdns = set() ret = {} ret["class"] = "FQDN" interfaces = flatten_dict(read_network_interfaces()) ips = [] for ipv4 in list(value for key, value in interfaces.items() if key.endswith('ipaddr') and not key.startswith('lo')): if ipv4: ips.append(ipv4) for ipv6 in list(value for key, value in interfaces.items() if key.endswith('ipv6') and not key.startswith('lo')): if ipv6: ips.append(ipv6[0]['addr']) for ip in ips: try: fqdns.add(socket.gethostbyaddr(ip)[0]) except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e: up2dateLog.initLog().log_me("Error resolving address: %s\n" % (e)) ret["name"] = list(fqdns) return ret
def initiate(kickstart_host, base, extra_append, static_device="", system_record="", preserve_files=[], cache_only=False): log = up2dateLog.initLog() log.log_me("initiating spacewalkkoan kickstart") return spacewalkkoan.initiate(kickstart_host, base, extra_append=extra_append, static_device=static_device, system_record=system_record, preserve_files=preserve_files)
def _request1(self, methodname, params): self.log = up2dateLog.initLog() while 1: try: ret = self._request(methodname, params) except rpclib.InvalidRedirectionError: raise except xmlrpclib.Fault: raise except httplib.BadStatusLine: self.log.log_me("Error: Server Unavailable. Please try later.") stdoutMsgCallback( _("Error: Server Unavailable. Please try later.")) sys.exit(-1) except: server = self.serverList.next() if server == None: # since just because we failed, the server list could # change (aka, firstboot, they get an option to reset the # the server configuration) so reset the serverList self.serverList.resetServerIndex() raise msg = "An error occurred talking to %s:\n" % self._host msg = msg + "%s\n%s\n" % (sys.exc_info()[0], sys.exc_info()[1]) msg = msg + "Trying the next serverURL: %s\n" % self.serverList.server() self.log.log_me(msg) # try a different url # use the next serverURL parse_res = urlparse.urlsplit(self.serverList.server()) typ = parse_res.scheme self._host = parse_res.netloc self._handler = parse_res.path typ = typ.lower() if typ not in ("http", "https"): raise_with_tb(rpclib.InvalidRedirectionError( "Redirected to unsupported protocol %s" % typ)) self._orig_handler = self._handler self._type = typ self._uri = self.serverList.server() if not self._handler: self._handler = "/RPC2" self._allow_redirect = 1 continue # if we get this far, we succedded break return ret
def readCachedLogin(): """ Read pickle info from a file Caches authorization info for connecting to the server. """ log = up2dateLog.initLog() log.log_debug("readCachedLogin invoked") if not os.access(pcklAuthFileName, os.R_OK): log.log_debug("Unable to read pickled loginInfo at: %s" % pcklAuthFileName) return False pcklAuth = open(pcklAuthFileName, 'rb') try: data = pickle.load(pcklAuth) except (EOFError, ValueError): log.log_debug("Unexpected EOF. Probably an empty file, \ regenerate auth file") pcklAuth.close() return False pcklAuth.close() # Check if system_id has changed try: idVer = rpclib.xmlrpclib.loads(getSystemId())[0][0]['system_id'] cidVer = "ID-%s" % data['loginInfo']['X-RHN-Server-Id'] if idVer != cidVer: log.log_debug("system id version changed: %s vs %s" % (idVer, cidVer)) return False except: pass createdTime = data['time'] li = data['loginInfo'] currentTime = time.time() expireTime = createdTime + float(li['X-RHN-Auth-Expire-Offset']) #Check if expired, offset is stored in "X-RHN-Auth-Expire-Offset" log.log_debug("Checking pickled loginInfo, currentTime=", currentTime, ", createTime=", createdTime, ", expire-offset=", float(li['X-RHN-Auth-Expire-Offset'])) if (currentTime > expireTime): log.log_debug("Pickled loginInfo has expired, created = %s, expire = %s." \ %(createdTime, expireTime)) return False _updateLoginInfo(li) log.log_debug( "readCachedLogin(): using pickled loginInfo set to expire at ", expireTime) return True
def readCachedLogin(): """ Read pickle info from a file Caches authorization info for connecting to the server. """ log = up2dateLog.initLog() log.log_debug("readCachedLogin invoked") if not os.access(pcklAuthFileName, os.R_OK): log.log_debug("Unable to read pickled loginInfo at: %s" % pcklAuthFileName) return False pcklAuth = open(pcklAuthFileName, 'rb') try: data = pickle.load(pcklAuth) except (EOFError, ValueError): log.log_debug("Unexpected EOF. Probably an empty file, \ regenerate auth file") pcklAuth.close() return False pcklAuth.close() # Check if system_id has changed try: idVer = rpclib.xmlrpclib.loads(getSystemId())[0][0]['system_id'] cidVer = "ID-%s" % data['loginInfo']['X-RHN-Server-Id'] if idVer != cidVer: log.log_debug("system id version changed: %s vs %s" % (idVer, cidVer)) return False except: pass createdTime = data['time'] li = data['loginInfo'] currentTime = time.time() expireTime = createdTime + float(li['X-RHN-Auth-Expire-Offset']) #Check if expired, offset is stored in "X-RHN-Auth-Expire-Offset" log.log_debug("Checking pickled loginInfo, currentTime=", currentTime, ", createTime=", createdTime, ", expire-offset=", float(li['X-RHN-Auth-Expire-Offset'])) if (currentTime > expireTime): log.log_debug("Pickled loginInfo has expired, created = %s, expire = %s." \ %(createdTime, expireTime)) return False _updateLoginInfo(li) log.log_debug("readCachedLogin(): using pickled loginInfo set to expire at ", expireTime) return True
def get_hal_system_and_smbios(): try: if using_gudev: props = get_computer_info() else: computer = get_hal_computer() props = computer.GetAllProperties() except Exception: log = up2dateLog.initLog() msg = "Error reading system and smbios information: %s\n" % (sys.exc_info()[1]) log.log_debug(msg) return {} system_and_smbios = {} for key in props: if key.startswith('system'): system_and_smbios[ustr(key)] = ustr(props[key]) system_and_smbios.update(get_smbios()) return system_and_smbios
def update_count(problem_dir): problem_dir = os.path.normpath(os.path.abspath(problem_dir)) basename = os.path.basename(problem_dir) log = up2dateLog.initLog() if not (os.path.exists(problem_dir) and os.path.isdir(problem_dir)): log.log_me("The specified path [%s] is not a valid directory." % problem_dir) return -1 server = rhnserver.RhnServer() if not server.capabilities.hasCapability('abrt'): return -1 systemid = up2dateAuth.getSystemId() crash_count_path = os.path.join(problem_dir, 'count') if not (os.path.exists(crash_count_path) and os.path.isfile(crash_count_path)): log.log_me("The problem directory [%s] does not contain any crash count information." % problem_dir) return 0 crash_count = _readline(crash_count_path) server.abrt.update_crash_count(systemid, basename, crash_count) return 1
def login(systemId=None, forceUpdate=False, timeout=None): log = up2dateLog.initLog() log.log_debug("login(forceUpdate=%s) invoked" % (forceUpdate)) if not forceUpdate and not loginInfo: if readCachedLogin(): return loginInfo server = rhnserver.RhnServer(timeout=timeout) # send up the capabality info headerlist = clientCaps.caps.headerFormat() for (headerName, value) in headerlist: server.add_header(headerName, value) if systemId == None: systemId = getSystemId() if not systemId: return None maybeUpdateVersion() log.log_me("logging into up2date server") li = server.up2date.login(systemId) # figure out if were missing any needed caps server.capabilities.validate() _updateLoginInfo(li) #update global var, loginInfo writeCachedLogin() #pickle global loginInfo if loginInfo: log.log_me("successfully retrieved authentication token " "from up2date server") log.log_debug("logininfo:", loginInfo) return loginInfo
def runTransaction(ts, rpmCallback, transdir=None): cfg = config.initUp2dateConfig() if transdir == None: transdir = cfg['storageDir'] deps = ts.check() if deps: raise up2dateErrors.DependencyError( "Dependencies should have already been resolved, "\ "but they are not.", deps) rc = ts.run(rpmCallback, transdir) if rc: errors = "\n" for e in rc: try: errors = errors + e[1] + "\n" except: errors = errors + str(e) + "\n" raise up2dateErrors.TransactionError( "Failed running transaction of packages: %s" % errors, deps=rc) elif type(rc) == type([]) and not len(rc): # let the user know whats wrong log = up2dateLog.initLog() log.log_me("Failed running rpm transaction - %pre %pro failure ?.") raise up2dateErrors.RpmError("Failed running rpm transaction")
def Hardware(): if using_gudev: allhw = get_devices() else: hal_status, dbus_status = check_hal_dbus_status() hwdaemon = 1 if hal_status or dbus_status: # if status != 0 haldaemon or messagebus service not running. # set flag and dont try probing hardware and DMI info # and warn the user. log = up2dateLog.initLog() msg = "Warning: haldaemon or messagebus service not running. Cannot probe hardware and DMI information.\n" log.log_me(msg) hwdaemon = 0 allhw = [] if hwdaemon: try: ret = read_hal() if ret: allhw = ret except: # bz253596 : Logging Dbus Error messages instead of printing on stdout log = up2dateLog.initLog() msg = "Error reading hardware information: %s\n" % ( sys.exc_info()[0]) log.log_me(msg) # all others return individual arrays # cpu info try: ret = read_cpuinfo() if ret: allhw.append(ret) except: print(_("Error reading cpu information:"), sys.exc_info()[0]) # memory size info try: ret = read_memory() if ret: allhw.append(ret) except: print(_("Error reading system memory information:"), sys.exc_info()[0]) cfg = config.initUp2dateConfig() if not cfg["skipNetwork"]: # minimal networking info try: ret = read_network() if ret: allhw.append(ret) except: print(_("Error reading networking information:"), sys.exc_info()[0]) # dont like catchall exceptions but theres not # really anything useful we could do at this point # and its been trouble prone enough # minimal DMI info try: ret = read_dmi() if ret: allhw.append(ret) except: # bz253596 : Logging Dbus Error messages instead of printing on stdout log = up2dateLog.initLog() msg = "Error reading DMI information: %s\n" % (sys.exc_info()[0]) log.log_me(msg) try: ret = read_installinfo() if ret: allhw.append(ret) except: print(_("Error reading install method information:"), sys.exc_info()[0]) if not cfg["skipNetwork"]: try: ret = read_network_interfaces() if ret: allhw.append(ret) except: print(_("Error reading network interface information:"), sys.exc_info()[0]) # all Done. return allhw
def Hardware(): if using_gudev: allhw = get_devices() else: hal_status, dbus_status = check_hal_dbus_status() hwdaemon = 1 if hal_status or dbus_status: # if status != 0 haldaemon or messagebus service not running. # set flag and dont try probing hardware and DMI info # and warn the user. log = up2dateLog.initLog() msg = "Warning: haldaemon or messagebus service not running. Cannot probe hardware and DMI information.\n" log.log_me(msg) hwdaemon = 0 allhw = [] if hwdaemon: try: ret = read_hal() if ret: allhw = ret except: # bz253596 : Logging Dbus Error messages instead of printing on stdout log = up2dateLog.initLog() msg = "Error reading hardware information: %s\n" % (sys.exc_info()[0]) log.log_me(msg) # all others return individual arrays # cpu info try: ret = read_cpuinfo() if ret: allhw.append(ret) except: print(_("Error reading cpu information:"), sys.exc_info()[0]) # memory size info try: ret = read_memory() if ret: allhw.append(ret) except: print(_("Error reading system memory information:"), sys.exc_info()[0]) cfg = config.initUp2dateConfig() if not cfg["skipNetwork"]: # minimal networking info try: ret = read_network() if ret: allhw.append(ret) except: print(_("Error reading networking information:"), sys.exc_info()[0]) # dont like catchall exceptions but theres not # really anything useful we could do at this point # and its been trouble prone enough # minimal DMI info try: ret = read_dmi() if ret: allhw.append(ret) except: # bz253596 : Logging Dbus Error messages instead of printing on stdout log = up2dateLog.initLog() msg = "Error reading DMI information: %s\n" % (sys.exc_info()[0]) log.log_me(msg) try: ret = read_installinfo() if ret: allhw.append(ret) except: print(_("Error reading install method information:"), sys.exc_info()[0]) if not cfg["skipNetwork"]: try: ret = read_network_interfaces() if ret: allhw.append(ret) except: print(_("Error reading network interface information:"), sys.exc_info()[0]) # all Done. return allhw
# Daniel Benamy <*****@*****.**> import sys import os import gettext t = gettext.translation('rhn-client-tools', fallback=True) # Python 3 translations don't have a ugettext method if not hasattr(t, 'ugettext'): t.ugettext = t.gettext _ = t.ugettext sys.path.append("/usr/share/rhn/") from up2date_client import up2dateLog up2dateLog.initLog().set_app_name('rhn_register') from up2date_client import up2dateAuth from up2date_client import rhncli from up2date_client import tui from up2date_client import up2dateErrors class RhnRegister(rhncli.RhnCli): """Runs rhn_register. Can run it in gui or tui mode depending on availablility of gui, DISPLAY environment variable, and --nox switch. """ def __init__(self): super(RhnRegister, self).__init__() self.optparser.add_option("--nox", action="store_true", default=False, help=_("Do not attempt to use X"))
def findHostByRoute(): """ returns [hostname, intf, intf6] Where hostname is you FQDN of this machine. And intf is numeric IPv4 address. And intf6 is IPv6 address. """ cfg = config.initUp2dateConfig() sl = config.getServerlURL() st = {'https':443, 'http':80} hostname = None intf = None intf6 = None etchostname = False sockethostname = None for serverUrl in sl: server = serverUrl.split('/')[2] servertype = serverUrl.split(':')[0] port = st[servertype] for family in (AF_INET6, AF_INET): try: s = socket.socket(family) except socket.error: continue if cfg['enableProxy']: server_port = config.getProxySetting() (server, port) = server_port.split(':') port = int(port) try: s.settimeout(5) s.connect((server, port)) intf_tmp = s.getsockname()[0] if family == AF_INET: intf = intf_tmp else: intf6 = intf_tmp hostname_tmp = socket.getfqdn(intf_tmp) if hostname_tmp != intf_tmp: hostname = hostname_tmp sockethostname = hostname_tmp except socket.error: s.close() continue s.close() # Override hostname with the value from /etc/hostname if os.path.isfile("/etc/hostname") and os.access("/etc/hostname", os.R_OK): hostnameinfo = open("/etc/hostname", "r").readlines() # Warn if /etc/hostname contains more than one entry (bsc#929979) if len(hostnameinfo) > 1: log = up2dateLog.initLog() log.log_me("/etc/hostname contains more than one entry! Using first one.") if len(hostnameinfo): info = hostnameinfo[0] else: info = "" # ".site" is now an allowed TLD. Catch local installations by checking # for pseudo TLD ".suse" and require more than the domainname for ".site" # so "local.site" still can be identified as local installation (bsc#923990) if len(info): tmpval = info.strip().split('.') if not info.strip().endswith(".suse"): if not info.strip().endswith(".site") or len(tmpval) > 2: hostname = info.strip() etchostname = True # Override hostname with the one in /etc/sysconfig/network # for bz# 457953 elif os.path.isfile("/etc/sysconfig/network") and os.access("/etc/sysconfig/network", os.R_OK): networkinfo = open("/etc/sysconfig/network", "r").readlines() for info in networkinfo: if not len(info): continue vals = info.split('=') if len(vals) <= 1: continue if vals[0].strip() == "HOSTNAME": # /etc/sysconfig/network is of shell syntax, # so values can be quoted hostname = ''.join(vals[1:]).strip('"\' \t\n') etchostname = False break # /etc/hostname doesn't contain a fully qualified hostname # try to find out the domain if etchostname and "." not in hostname: fqdn = socket.getfqdn(hostname) if "." in fqdn: domain = fqdn.split('.', 1)[1] aliasfqdn = "{0}.{1}".format(hostname, domain) try: socket.gethostbyname(aliasfqdn) hostname = aliasfqdn except socket.error: hostname = fqdn log = up2dateLog.initLog() log.log_me("Could not resolve /etc/hostname alias to {0}. Falling back to {1}".format(aliasfqdn, fqdn)) else: log = up2dateLog.initLog() log.log_me("Got an invalid FQDN {0} for /etc/hostname. Falling back to {1}".format(fqdn, sockethostname)) hostname = sockethostname if hostname == None or hostname == 'localhost.localdomain': hostname = "unknown" return hostname, intf, intf6
def doCall(method, *args, **kwargs): log = up2dateLog.initLog() log.log_debug("rpcServer: Calling XMLRPC %s" % method.__dict__['_Method__name']) cfg = config.initUp2dateConfig() ret = None attempt_count = 1 try: attempts = int(cfg["networkRetries"]) except ValueError: attempts = 1 if attempts <= 0: attempts = 1 while 1: failure = 0 ret = None try: ret = method(*args, **kwargs) except KeyboardInterrupt: raise_with_tb( up2dateErrors.CommunicationError( _("Connection aborted by the user"))) # if we get a socket error, keep tryingx2 except (socket.error, SSL.socket_error): log.log_me("A socket error occurred: %s, attempt #%s" % (sys.exc_info()[1], attempt_count)) if attempt_count >= attempts: e = sys.exc_info()[1] if len(e.args) > 1: raise_with_tb(up2dateErrors.CommunicationError(e.args[1])) else: raise_with_tb(up2dateErrors.CommunicationError(e.args[0])) else: failure = 1 except httplib.IncompleteRead: print("httplib.IncompleteRead") raise_with_tb( up2dateErrors.CommunicationError("httplib.IncompleteRead")) except urllib2.HTTPError: e = sys.exc_info()[1] msg = "\nAn HTTP error occurred:\n" msg = msg + "URL: %s\n" % e.filename msg = msg + "Status Code: %s\n" % e.code msg = msg + "Error Message: %s\n" % e.msg log.log_me(msg) raise_with_tb(up2dateErrors.CommunicationError(msg)) except xmlrpclib.ProtocolError: e = sys.exc_info()[1] log.log_me("A protocol error occurred: %s , attempt #%s," % (e.errmsg, attempt_count)) if e.errcode == 404: log.log_me("Could not find URL, %s" % (e.url)) log.log_me("Check server name and/or URL, then retry\n") (errCode, errMsg) = rpclib.reportError(e.headers) reset = 0 if abs(errCode) == 34: log.log_me("Auth token timeout occurred\n errmsg: %s" % errMsg) # this calls login, which in tern calls doCall (ie, # this function) but login should never get a 34, so # should be safe from recursion from up2date_client import up2dateAuth up2dateAuth.updateLoginInfo() # the servers are being throttle to pay users only, catch the # exceptions and display a nice error message if abs(errCode) == 51: log.log_me(_("Server has refused connection due to high load")) raise_with_tb(up2dateErrors.CommunicationError(e.errmsg)) # if we get a 404 from our server, thats pretty # fatal... no point in retrying over and over. Note that # errCode == 17 is specific to our servers, if the # serverURL is just pointing somewhere random they will # get a 0 for errcode and will raise a CommunicationError if abs(errCode) == 17: #in this case, the args are the package string, so lets try to # build a useful error message if type(args[0]) == type([]): pkg = args[0] else: pkg = args[1] if type(pkg) == type([]): pkgName = "%s-%s-%s.%s" % (pkg[0], pkg[1], pkg[2], pkg[4]) else: pkgName = pkg msg = "File Not Found: %s\n%s" % (pkgName, errMsg) log.log_me(msg) raise_with_tb(up2dateErrors.FileNotFoundError(msg)) if not reset: if attempt_count >= attempts: raise_with_tb(up2dateErrors.CommunicationError(e.errmsg)) else: failure = 1 except xmlrpclib.ResponseError: raise_with_tb( up2dateErrors.CommunicationError( "Broken response from the server.")) if ret != None: break else: failure = 1 if failure: # rest for five seconds before trying again time.sleep(5) attempt_count = attempt_count + 1 if attempt_count > attempts: raise up2dateErrors.CommunicationError( "The data returned from the server was incomplete") return ret
def __init__(self, errmsg): errmsg = ustr(errmsg) PmBaseError.__init__(self, errmsg) self.value = 'rhn-plugin: ' + self.premsg + errmsg self.log = up2dateLog.initLog()
def solveDep(self, unknowns, availList, msgCallback=None, progressCallback=None, refreshCallback=None): self.cfg = config.initUp2dateConfig() self.log = up2dateLog.initLog() self.log.log_me("solving dep for: %s" % unknowns) self.refreshCallback = refreshCallback self.progressCallback = progressCallback self.msgCallback = msgCallback self.availList = availList availList.sort() self.availListHash = {} for p in self.availList: if self.availListHash.has_key(tuple(p[:4])): self.availListHash[tuple(p[:4])].append(p) else: self.availListHash[tuple(p[:4])] = [p] self.retDict = {} self.getSolutions(unknowns, progressCallback=self.progressCallback, msgCallback=self.msgCallback) reslist = [] self.depToPkg = DictOfLists() self.depsNotAvailable = DictOfLists() # self.depToPkg = {} #FIXME: this should be cached, I dont really need to query the db # for this everytime self.installedPkgList = rpmUtils.getInstalledPackageList(getArch=1) self.installedPkgHash = {} for pkg in self.installedPkgList: if self.installedPkgHash.has_key(pkg[0]): self.installedPkgHash[pkg[0]].append(pkg) else: self.installedPkgHash[pkg[0]] = [pkg] # we didnt get any results, bow out... if not len(self.retDict): return (reslist, self.depToPkg) newList = [] availListNVRE = map(lambda p: p[:4], self.availList) failedDeps = [] solutionPkgs = [] pkgs = [] for dep in self.retDict.keys(): # skip the rest if we didnt get a result if len(self.retDict[dep]) == 0: continue solutions = self.retDict[dep] # fixme, grab the first package that satisfies the dep # but make sure we match nvre against the list of avail packages # so we grab the right version of the package # if we only get one soltution, use it. No point in jumping # though other hoops if len(solutions) == 1: for solution in solutions: pkgs.append(solution) # we've got more than one possible solution, do some work # to figure out if I want one, some, or all of them elif len(solutions) > 1: # try to install the new version of whatever arch is # installed solutionsInstalled = self.__getSolutionsInstalled(solutions) found = 0 if len(solutionsInstalled): for p in solutionsInstalled: pkgs.append(p) self.depToPkg[dep] = p found = 1 if found: break # we dont have any of possible solutions installed, pick one else: # this is where we could do all sort of heuristics to pick # best one. For now, grab the first one in the list thats # available #FIXME: we need to arch score here for multilib/kernel # packages that dont have a version installed # This tends to happen a lot when isntalling into # empty chroots (aka, pick which of the kernels to # install). # ie, this is the pure heuristic approach... shortest = solutions[0] for solution in solutions: if len(shortest[0]) > len(solution[0]): shortest = solution # if we get this far, its still possible that we have package # that is multilib and we need to install both versions of # this is a check for that... if self.installedPkgHash.has_key(shortest[0]): iList = self.installedPkgHash[shortest[0]] for iPkg in iList: if self.availListHash.has_key(tuple(shortest[:4])): for i in self.availListHash[tuple( shortest[:4])]: if self.cfg['forcedArch']: arches = self.cfg['forcedArch'] if i[4] in arches: pkgs.append(i) self.depToPkg[dep] = i break else: # its not the same package we have installed if iPkg[:5] != i[:5]: # this arch matches the arch of a package # installed if iPkg[4] == i[4]: pkgs.append(i) self.depToPkg[dep] = i break # you may be asking yourself, wtf is that madness that follows? # well, good question... # its basically a series of kluges to work around packaging problems # in RHEL-3 (depends who you ask... But basically, its packages doing # stuff that was determined to be "unsupported" at the time of the # initial multilib support, but packages did it later anyway # Basically, what we are trying to do is pick the best arch of # a package to solve a dep. Easy enough. The tricky part is # what happens when we discover the best arch is already in # transation and is _not_ solving the dep, so we need to look # at the next best arch. So we check to see if we added it to # the list of selected packges already, and if so, add the # next best arch to the set. To make it uglier, the second best # arch might not be valid at all, so in that case, dont use it # (which will cause an unsolved dep, but they happen...) if self.availListHash.has_key(tuple(shortest[:4])): avail = self.availListHash[tuple(shortest[:4])] bestArchP = None useNextBestArch = None bestArchP2 = None # a saner approach might be to find the applicable arches, # sort them, and walk over them in order # remove the items with archscore <= 0 app_avail = filter(lambda a: rpm.archscore(a[4]), avail) # sort the items by archscore, most approriate first app_avail.sort(lambda a, b: cmp( rpm.archscore(a[4]), rpm.archscore(b[4]))) # so, whats wrong with this bit? well, if say "libgnutls.so(64bit)" doesn't # find a dep, we'll try to solve it with gnutls.i386 # its because "gnutls" and "libgnutls.so(64bit)" are in the same set of # deps. Since gnutls.x86_64 is added for the "gnutls" dep, its in the # list of already selected for for i in app_avail: if i in self.selectedPkgs: continue pkgs.append(i) self.depToPkg[dep] = i # we found something, stop iterating over available break # we found something for this dep, stop iterating continue else: # FIXME: in an ideal world, I could raise an exception here, but that will break the current gui pkgs.append(p) self.depToPkg[dep] = p # raise UnsolvedDependencyError("Packages %s provide dep %s but are not available for install based on client config" % (pkgs,dep), dep, pkgs ) for pkg in pkgs: self.selectedPkgs.append(pkg) if pkg[:4] in availListNVRE: newList.append(pkg) else: newList.append(pkg) reslist = newList # FIXME: we need to return the list of stuff that was skipped # because it wasn't on the available list and present it to the # user something like: # blippy-1.0-1 requires barpy-2.0-1 but barpy-3.0-1 is already isntalled #print "\n\nself.depsNotAvailable" #pprint.pprint(self.depsNotAvailable) #pprint.pprint(self.depToPkg) return (reslist, self.depToPkg)
def doCall(method, *args, **kwargs): log = up2dateLog.initLog() log.log_debug("rpcServer: Calling XMLRPC %s" % method.__dict__['_Method__name']) cfg = config.initUp2dateConfig() ret = None attempt_count = 1 try: attempts = int(cfg["networkRetries"]) except ValueError: attempts = 1 if attempts <= 0: attempts = 1 while 1: failure = 0 ret = None try: ret = method(*args, **kwargs) except KeyboardInterrupt: raise_with_tb(up2dateErrors.CommunicationError(_( "Connection aborted by the user"))) # if we get a socket error, keep tryingx2 except (socket.error, SSL.socket_error): log.log_me("A socket error occurred: %s, attempt #%s" % ( sys.exc_info()[1], attempt_count)) if attempt_count >= attempts: e = sys.exc_info()[1] if len(e.args) > 1: raise_with_tb(up2dateErrors.CommunicationError(e.args[1])) else: raise_with_tb(up2dateErrors.CommunicationError(e.args[0])) else: failure = 1 except httplib.IncompleteRead: print("httplib.IncompleteRead") raise_with_tb(up2dateErrors.CommunicationError("httplib.IncompleteRead")) except urllib2.HTTPError: e = sys.exc_info()[1] msg = "\nAn HTTP error occurred:\n" msg = msg + "URL: %s\n" % e.filename msg = msg + "Status Code: %s\n" % e.code msg = msg + "Error Message: %s\n" % e.msg log.log_me(msg) raise_with_tb(up2dateErrors.CommunicationError(msg)) except xmlrpclib.ProtocolError: e = sys.exc_info()[1] log.log_me("A protocol error occurred: %s , attempt #%s," % ( e.errmsg, attempt_count)) if e.errcode == 404: log.log_me("Could not find URL, %s" % (e.url)) log.log_me("Check server name and/or URL, then retry\n"); (errCode, errMsg) = rpclib.reportError(e.headers) reset = 0 if abs(errCode) == 34: log.log_me("Auth token timeout occurred\n errmsg: %s" % errMsg) # this calls login, which in tern calls doCall (ie, # this function) but login should never get a 34, so # should be safe from recursion from up2date_client import up2dateAuth up2dateAuth.updateLoginInfo() # the servers are being throttle to pay users only, catch the # exceptions and display a nice error message if abs(errCode) == 51: log.log_me(_("Server has refused connection due to high load")) raise_with_tb(up2dateErrors.CommunicationError(e.errmsg)) # if we get a 404 from our server, thats pretty # fatal... no point in retrying over and over. Note that # errCode == 17 is specific to our servers, if the # serverURL is just pointing somewhere random they will # get a 0 for errcode and will raise a CommunicationError if abs(errCode) == 17: #in this case, the args are the package string, so lets try to # build a useful error message if type(args[0]) == type([]): pkg = args[0] else: pkg=args[1] if type(pkg) == type([]): pkgName = "%s-%s-%s.%s" % (pkg[0], pkg[1], pkg[2], pkg[4]) else: pkgName = pkg msg = "File Not Found: %s\n%s" % (pkgName, errMsg) log.log_me(msg) raise_with_tb(up2dateErrors.FileNotFoundError(msg)) if not reset: if attempt_count >= attempts: raise_with_tb(up2dateErrors.CommunicationError(e.errmsg)) else: failure = 1 except xmlrpclib.ResponseError: raise_with_tb(up2dateErrors.CommunicationError( "Broken response from the server.")) if ret != None: break else: failure = 1 if failure: # rest for five seconds before trying again time.sleep(5) attempt_count = attempt_count + 1 if attempt_count > attempts: raise up2dateErrors.CommunicationError("The data returned from the server was incomplete") return ret
def logDeltaPackages(pkgs): log = up2dateLog.initLog() log.log_me("Adding packages to package profile: %s" % pprint_pkglist(pkgs['added'])) log.log_me("Removing packages from package profile: %s" % pprint_pkglist(pkgs['removed']))
def getInstalledPackageList(msgCallback=None, progressCallback=None, getArch=None, getInfo=None): """ Return list of packages. Package is hash with keys name, epoch, version, release and optionaly arch and cookie """ pkg_list = [] if msgCallback != None: msgCallback(_("Getting list of packages installed on the system")) _ts = transaction.initReadOnlyTransaction() count = 0 total = 0 dbmatch = _ts.dbMatch() for h in dbmatch: if h == None: break count = count + 1 total = count count = 0 dbmatch = _ts.dbMatch() for h in dbmatch: if h == None: break if not (is_utf8(sstr(h['name'])) and is_utf8(sstr(h['version'])) and is_utf8(sstr(h['release']))): log = up2dateLog.initLog() log.log_me( "Package with invalid character set found. Skipping: '%s-%s-%s'" % (h['name'].decode('UTF-8', errors='replace'), h['version'].decode('UTF-8', errors='replace'), h['release'].decode('UTF-8', errors='replace'))) continue package = { 'name': sstr(h['name']), 'epoch': h['epoch'], 'version': sstr(h['version']), 'release': sstr(h['release']), 'installtime': h['installtime'] } if package['epoch'] == None: package['epoch'] = "" else: # convert it to string package['epoch'] = "%s" % package['epoch'] if getArch: package['arch'] = h['arch'] # the arch on gpg-pubkeys is "None"... if package['arch']: package['arch'] = sstr(package['arch']) pkg_list.append(package) elif getInfo: if h['arch']: package['arch'] = sstr(h['arch']) if h['cookie']: package['cookie'] = sstr(h['cookie']) pkg_list.append(package) else: pkg_list.append(package) if progressCallback != None: progressCallback(count, total) count = count + 1 dbmatch = None _ts.ts.closeDB() pkg_list.sort( key=lambda x: (x['name'], x['epoch'], x['version'], x['release'])) return pkg_list
def solveDep(self, unknowns, availList, msgCallback = None, progressCallback = None, refreshCallback = None): self.cfg = config.initUp2dateConfig() self.log = up2dateLog.initLog() self.log.log_me("solving dep for: %s" % unknowns) self.refreshCallback = refreshCallback self.progressCallback = progressCallback self.msgCallback = msgCallback self.availList = availList availList.sort() self.availListHash = {} for p in self.availList: if self.availListHash.has_key(tuple(p[:4])): self.availListHash[tuple(p[:4])].append(p) else: self.availListHash[tuple(p[:4])] = [p] self.retDict = {} self.getSolutions(unknowns, progressCallback = self.progressCallback, msgCallback = self.msgCallback) reslist = [] self.depToPkg = DictOfLists() self.depsNotAvailable = DictOfLists() # self.depToPkg = {} #FIXME: this should be cached, I dont really need to query the db # for this everytime self.installedPkgList = rpmUtils.getInstalledPackageList(getArch=1) self.installedPkgHash = {} for pkg in self.installedPkgList: if self.installedPkgHash.has_key(pkg[0]): self.installedPkgHash[pkg[0]].append(pkg) else: self.installedPkgHash[pkg[0]] = [pkg] # we didnt get any results, bow out... if not len(self.retDict): return (reslist, self.depToPkg) newList = [] availListNVRE = map(lambda p: p[:4], self.availList) failedDeps = [] solutionPkgs = [] pkgs = [] for dep in self.retDict.keys(): # skip the rest if we didnt get a result if len(self.retDict[dep]) == 0: continue solutions = self.retDict[dep] # fixme, grab the first package that satisfies the dep # but make sure we match nvre against the list of avail packages # so we grab the right version of the package # if we only get one soltution, use it. No point in jumping # though other hoops if len(solutions) == 1: for solution in solutions: pkgs.append(solution) # we've got more than one possible solution, do some work # to figure out if I want one, some, or all of them elif len(solutions) > 1: # try to install the new version of whatever arch is # installed solutionsInstalled = self.__getSolutionsInstalled(solutions) found = 0 if len(solutionsInstalled): for p in solutionsInstalled: pkgs.append(p) self.depToPkg[dep] = p found = 1 if found: break # we dont have any of possible solutions installed, pick one else: # this is where we could do all sort of heuristics to pick # best one. For now, grab the first one in the list thats # available #FIXME: we need to arch score here for multilib/kernel # packages that dont have a version installed # This tends to happen a lot when isntalling into # empty chroots (aka, pick which of the kernels to # install). # ie, this is the pure heuristic approach... shortest = solutions[0] for solution in solutions: if len(shortest[0]) > len(solution[0]): shortest = solution # if we get this far, its still possible that we have package # that is multilib and we need to install both versions of # this is a check for that... if self.installedPkgHash.has_key(shortest[0]): iList = self.installedPkgHash[shortest[0]] for iPkg in iList: if self.availListHash.has_key(tuple(shortest[:4])): for i in self.availListHash[tuple(shortest[:4])]: if self.cfg['forcedArch']: arches = self.cfg['forcedArch'] if i[4] in arches: pkgs.append(i) self.depToPkg[dep] = i break else: # its not the same package we have installed if iPkg[:5] != i[:5]: # this arch matches the arch of a package # installed if iPkg[4] == i[4]: pkgs.append(i) self.depToPkg[dep] = i break # you may be asking yourself, wtf is that madness that follows? # well, good question... # its basically a series of kluges to work around packaging problems # in RHEL-3 (depends who you ask... But basically, its packages doing # stuff that was determined to be "unsupported" at the time of the # initial multilib support, but packages did it later anyway # Basically, what we are trying to do is pick the best arch of # a package to solve a dep. Easy enough. The tricky part is # what happens when we discover the best arch is already in # transation and is _not_ solving the dep, so we need to look # at the next best arch. So we check to see if we added it to # the list of selected packges already, and if so, add the # next best arch to the set. To make it uglier, the second best # arch might not be valid at all, so in that case, dont use it # (which will cause an unsolved dep, but they happen...) if self.availListHash.has_key(tuple(shortest[:4])): avail = self.availListHash[tuple(shortest[:4])] bestArchP = None useNextBestArch = None bestArchP2 = None # a saner approach might be to find the applicable arches, # sort them, and walk over them in order # remove the items with archscore <= 0 app_avail = filter(lambda a: rpm.archscore(a[4]), avail) # sort the items by archscore, most approriate first app_avail.sort(lambda a,b: cmp(rpm.archscore(a[4]),rpm.archscore(b[4]))) # so, whats wrong with this bit? well, if say "libgnutls.so(64bit)" doesn't # find a dep, we'll try to solve it with gnutls.i386 # its because "gnutls" and "libgnutls.so(64bit)" are in the same set of # deps. Since gnutls.x86_64 is added for the "gnutls" dep, its in the # list of already selected for for i in app_avail: if i in self.selectedPkgs: continue pkgs.append(i) self.depToPkg[dep] = i # we found something, stop iterating over available break # we found something for this dep, stop iterating continue else: # FIXME: in an ideal world, I could raise an exception here, but that will break the current gui pkgs.append(p) self.depToPkg[dep] = p # raise UnsolvedDependencyError("Packages %s provide dep %s but are not available for install based on client config" % (pkgs,dep), dep, pkgs ) for pkg in pkgs: self.selectedPkgs.append(pkg) if pkg[:4] in availListNVRE: newList.append(pkg) else: newList.append(pkg) reslist = newList # FIXME: we need to return the list of stuff that was skipped # because it wasn't on the available list and present it to the # user something like: # blippy-1.0-1 requires barpy-2.0-1 but barpy-3.0-1 is already isntalled #print "\n\nself.depsNotAvailable" #pprint.pprint(self.depsNotAvailable) #pprint.pprint(self.depToPkg) return (reslist, self.depToPkg)
def getInstalledPackageList(msgCallback = None, progressCallback = None, getArch=None, getInfo = None): """ Return list of packages. Package is hash with keys name, epoch, version, release and optionaly arch and cookie """ pkg_list = [] if msgCallback != None: msgCallback(_("Getting list of packages installed on the system")) _ts = transaction.initReadOnlyTransaction() count = 0 total = 0 dbmatch = _ts.dbMatch() for h in dbmatch: if h == None: break count = count + 1 total = count count = 0 dbmatch = _ts.dbMatch() for h in dbmatch: if h == None: break if not (is_utf8(sstr(h['name'])) and is_utf8(sstr(h['version'])) and is_utf8(sstr(h['release']))): log = up2dateLog.initLog() log.log_me("Package with invalid character set found. Skipping: '%s-%s-%s'" % (h['name'].decode('UTF-8', errors='replace'), h['version'].decode('UTF-8', errors='replace'), h['release'].decode('UTF-8', errors='replace'))) continue package = { 'name': sstr(h['name']), 'epoch': h['epoch'], 'version': sstr(h['version']), 'release': sstr(h['release']), 'installtime': h['installtime'] } if package['epoch'] == None: package['epoch'] = "" else: # convert it to string package['epoch'] = "%s" % package['epoch'] if getArch: package['arch'] = h['arch'] # the arch on gpg-pubkeys is "None"... if package['arch']: package['arch'] = sstr(package['arch']) pkg_list.append(package) elif getInfo: if h['arch']: package['arch'] = sstr(h['arch']) if h['cookie']: package['cookie'] = sstr(h['cookie']) pkg_list.append(package) else: pkg_list.append(package) if progressCallback != None: progressCallback(count, total) count = count + 1 dbmatch = None _ts.ts.closeDB() pkg_list.sort(key=lambda x:(x['name'], x['epoch'], x['version'], x['release'])) return pkg_list
# granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os import time import dnf.exceptions import dnf.cli from up2date_client import up2dateLog from up2date_client import config from up2date_client import rpmUtils from up2date_client import rhnPackageInfo log = up2dateLog.initLog() # file used to keep track of the next time rhn_check # is allowed to update the package list on the server LAST_UPDATE_FILE = "/var/lib/up2date/dbtimestamp" # mark this module as acceptable __rhnexport__ = [ 'update', 'remove', 'refresh_list', 'fullUpdate', 'checkNeedUpdate', 'runTransaction', 'verify' ] def remove(package_list, cache_only=None): """We have been told that we should remove packages""" if cache_only:
def report(problem_dir): problem_dir = os.path.normpath(os.path.abspath(problem_dir)) basename = os.path.basename(problem_dir) log = up2dateLog.initLog() if not (os.path.exists(problem_dir) and os.path.isdir(problem_dir)): log.log_me("The specified path [%s] is not a valid directory." % problem_dir) return -1 crash_items = ['analyzer', 'cmdline', 'reason'] if os.path.exists(os.path.join(problem_dir, 'vmcore')): crash_items = ['analyzer', 'vmcore-dmesg.txt'] for item in crash_items: item_path = os.path.join(problem_dir, item) if not os.path.exists(item_path): log.log_me("Crash directory [%s] is incomplete or invalid" % problem_dir) return -1 server = rhnserver.RhnServer() if not server.capabilities.hasCapability('abrt'): return -1 systemid = up2dateAuth.getSystemId() # Package information pkg_data = {} for item in ['package', 'pkg_name', 'pkg_epoch', 'pkg_version', 'pkg_release', 'pkg_arch']: pkg_item_path = os.path.join(problem_dir, item) if os.path.exists(pkg_item_path): filecontent = _readline(pkg_item_path) if filecontent: pkg_data[item] = filecontent # Crash information crash_data = {'crash': basename, 'path': problem_dir} # Crash count crash_count = _readline(os.path.join(problem_dir, 'count')) if crash_count: crash_data['count'] = crash_count # Create record about the crash r = server.abrt.create_crash(systemid, crash_data, pkg_data) if (r < 0): # Error creating new crash report log.log_me("Error creating new crash report.") return -1 # Upload every particular file in the problem directory to the server for i in os.listdir(problem_dir): path = os.path.join(problem_dir, i) if not os.path.isfile(path): continue filesize = os.stat(path).st_size crash_file_data = {'filename': os.path.basename(i), 'path': path, 'filesize': filesize, 'filecontent': base64.encodestring(bstr("")), 'content-encoding': 'base64'} if server.abrt.is_crashfile_upload_enabled(systemid) and filesize <= server.abrt.get_crashfile_uploadlimit(systemid): f = open(path, 'r') try: crash_file_data['filecontent'] = base64.encodestring(f.read()) finally: f.close() server.abrt.upload_crash_file(systemid, basename, crash_file_data) return 1
def getServer(refreshCallback=None, serverOverride=None, timeout=None): log = up2dateLog.initLog() cfg = config.initUp2dateConfig() # Where do we keep the CA certificate for RHNS? # The servers we're talking to need to have their certs # signed by one of these CA. ca = cfg["sslCACert"] if not isinstance(ca, list): ca = [ca] rhns_ca_certs = ca or ["/usr/share/rhn/RHNS-CA-CERT"] if cfg["enableProxy"]: proxyHost = config.getProxySetting() else: proxyHost = None if not serverOverride: serverUrls = config.getServerlURL() else: serverUrls = serverOverride serverList = ServerList(serverUrls) proxyUser = None proxyPassword = None if cfg["enableProxyAuth"]: proxyUser = cfg["proxyUser"] or None proxyPassword = cfg["proxyPassword"] or None lang = None for env in 'LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG': if env in os.environ: if not os.environ[env]: # sometimes unset continue lang = os.environ[env].split(':')[0] lang = lang.split('.')[0] break s = RetryServer(serverList.server(), refreshCallback=refreshCallback, proxy=proxyHost, username=proxyUser, password=proxyPassword, timeout=timeout) s.addServerList(serverList) s.add_header("X-Up2date-Version", up2dateUtils.version()) if lang: s.setlang(lang) # require RHNS-CA-CERT file to be able to authenticate the SSL connections need_ca = [ True for i in s.serverList.serverList if urlparse.urlparse(i)[0] == 'https' ] if need_ca: for rhns_ca_cert in rhns_ca_certs: if not os.access(rhns_ca_cert, os.R_OK): msg = "%s: %s" % (_("ERROR: can not find RHNS CA file"), rhns_ca_cert) log.log_me("%s" % msg) raise up2dateErrors.SSLCertificateFileNotFound(msg) # force the validation of the SSL cert s.add_trusted_cert(rhns_ca_cert) clientCaps.loadLocalCaps() # send up the capabality info headerlist = clientCaps.caps.headerFormat() for (headerName, value) in headerlist: s.add_header(headerName, value) return s
import os import subprocess import xml.sax import tempfile import shutil from base64 import encodestring sys.path.append("/usr/share/rhn/") from up2date_client import up2dateLog from up2date_client import rhnserver from up2date_client import up2dateAuth from up2date_client import up2dateErrors from rhn.i18n import sstr, bstr __rhnexport__ = [ 'xccdf_eval' ] log = up2dateLog.initLog() def xccdf_eval(args, cache_only=None): if cache_only: return (0, 'no-ops for caching', {}) results_dir = None if ('id' in args) and ('file_size' in args) and args['file_size'] > 0: results_dir = tempfile.mkdtemp() pwd = os.getcwd() os.chdir(results_dir) results_file = tempfile.NamedTemporaryFile(dir=results_dir) params, oscap_err = _process_params(args['params'], results_file.name, results_dir) oscap_err += _run_oscap(['xccdf', 'eval'] + params + [args['path']])
def getServer(refreshCallback=None, serverOverride=None, timeout=None): log = up2dateLog.initLog() cfg = config.initUp2dateConfig() # Where do we keep the CA certificate for RHNS? # The servers we're talking to need to have their certs # signed by one of these CA. ca = cfg["sslCACert"] if not isinstance(ca, list): ca = [ca] rhns_ca_certs = ca or ["/usr/share/rhn/RHNS-CA-CERT"] if cfg["enableProxy"]: proxyHost = config.getProxySetting() else: proxyHost = None if not serverOverride: serverUrls = config.getServerlURL() else: serverUrls = serverOverride serverList = ServerList(serverUrls) proxyUser = None proxyPassword = None if cfg["enableProxyAuth"]: proxyUser = cfg["proxyUser"] or None proxyPassword = cfg["proxyPassword"] or None lang = None for env in 'LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG': if env in os.environ: if not os.environ[env]: # sometimes unset continue lang = os.environ[env].split(':')[0] lang = lang.split('.')[0] break s = RetryServer(serverList.server(), refreshCallback=refreshCallback, proxy=proxyHost, username=proxyUser, password=proxyPassword, timeout=timeout) s.addServerList(serverList) s.add_header("X-Up2date-Version", up2dateUtils.version()) if lang: s.setlang(lang) # require RHNS-CA-CERT file to be able to authenticate the SSL connections need_ca = [ True for i in s.serverList.serverList if urlparse.urlparse(i)[0] == 'https'] if need_ca: for rhns_ca_cert in rhns_ca_certs: if not os.access(rhns_ca_cert, os.R_OK): msg = "%s: %s" % (_("ERROR: can not find RHNS CA file"), rhns_ca_cert) log.log_me("%s" % msg) raise up2dateErrors.SSLCertificateFileNotFound(msg) # force the validation of the SSL cert s.add_trusted_cert(rhns_ca_cert) clientCaps.loadLocalCaps() # send up the capabality info headerlist = clientCaps.caps.headerFormat() for (headerName, value) in headerlist: s.add_header(headerName, value) return s