def setUp(self): initCFG("server.xmlrpc") rhnSQL.initDB(backend="oracle", username=DB_SETTINGS["user"], password=DB_SETTINGS["password"], database=DB_SETTINGS["database"]) rhnSQL.clear_log_id()
def getParentsChilds(b_only_custom=False): initCFG('server.satellite') rhnSQL.initDB() sql = """ select c1.label, c2.label parent_channel, c1.id from rhnChannel c1 left outer join rhnChannel c2 on c1.parent_channel = c2.id order by c2.label desc, c1.label asc """ h = rhnSQL.prepare(sql) h.execute() d_parents = {} while 1: row = h.fetchone_dict() if not row: break if not b_only_custom or rhnChannel.isCustomChannel(row['id']): parent_channel = row['parent_channel'] if not parent_channel: d_parents[row['label']] = [] else: # If the parent is not a custom channel treat the child like # it's a parent for our purposes if parent_channel not in d_parents: d_parents[row['label']] = [] else: d_parents[parent_channel].append(row['label']) return d_parents
def getParentsChilds(): initCFG('server') rhnSQL.initDB() sql = """ select c1.label, c2.label parent_channel, c1.id from rhnChannel c1 left outer join rhnChannel c2 on c1.parent_channel = c2.id order by c2.label desc, c1.label asc """ h = rhnSQL.prepare(sql) h.execute() d_parents = {} while 1: row = h.fetchone_dict() if not row: break if rhnChannel.isCustomChannel(row['id']): parent_channel = row['parent_channel'] if not parent_channel: d_parents[row['label']] = [] else: d_parents[parent_channel].append(row['label']) return d_parents
def _load_proxy_settings(self, url): # read the proxy configuration in /etc/rhn/rhn.conf comp = CFG.getComponent() initCFG('server.satellite') # Get the global HTTP Proxy settings from DB or per-repo # settings on /etc/rhn/spacewalk-repo-sync/zypper.conf if CFG.http_proxy: self.proxy_url, self.proxy_user, self.proxy_pass = get_proxy(url) self.proxy_hostname = self.proxy_url elif os.path.isfile(REPOSYNC_ZYPPER_CONF): zypper_cfg = configparser.ConfigParser() zypper_cfg.read_file(open(REPOSYNC_ZYPPER_CONF)) section_name = None if zypper_cfg.has_section(self.name): section_name = self.name elif zypper_cfg.has_section(channel_label): section_name = channel_label elif zypper_cfg.has_section('main'): section_name = 'main' if section_name: if zypper_cfg.has_option(section_name, option='proxy'): self.proxy_hostname = zypper_cfg.get(section_name, option='proxy') self.proxy_url = "http://%s" % self.proxy_hostname if zypper_cfg.has_option(section_name, 'proxy_username'): self.proxy_user = zypper_cfg.get(section_name, 'proxy_username') if zypper_cfg.has_option(section_name, 'proxy_password'): self.proxy_pass = zypper_cfg.get(section_name, 'proxy_password') # set config component back to original initCFG(comp)
def setUp(self): initCFG("server") rhnSQL.initDB(backend="postgresql", username=DB_SETTINGS["user"], password=DB_SETTINGS["password"], database=DB_SETTINGS["database"], host=DB_SETTINGS["host"])
def __init__(self): rhnSQL.initDB() initCFG('server.satellite') # Channel families mapping to channels with open(constants.CHANNEL_FAMILY_MAPPING_PATH, 'r') as f: self.families = json.load(f) # Channel metadata with open(constants.CHANNEL_DEFINITIONS_PATH, 'r') as f: self.channel_metadata = json.load(f) # Dist/Release channel mapping with open(constants.CHANNEL_DIST_MAPPING_PATH, 'r') as f: self.channel_dist_mapping = json.load(f) # Channel to repositories mapping with open(constants.CONTENT_SOURCE_MAPPING_PATH, 'r') as f: self.content_source_mapping = json.load(f) # Map channels to their channel family self.channel_to_family = {} for family in self.families: for channel in self.families[family]['channels']: self.channel_to_family[channel] = family # Set already synced channels h = rhnSQL.prepare(""" select label from rhnChannel where org_id is null """) h.execute() channels = h.fetchall_dict() or [] self.synced_channels = [ch['label'] for ch in channels]
def __init__(self, retries=3, log_obj=None, force=False): self.queue = Queue() initCFG('server.satellite') self.threads = CFG.REPOSYNC_DOWNLOAD_THREADS self.retries = retries self.log_obj = log_obj self.force = force
def __init__(self, url, name, yumsrc_conf=YUMSRC_CONF): self.url = url self.name = name self.yumbase = yum.YumBase() self.yumbase.preconf.fn = yumsrc_conf if not os.path.exists(yumsrc_conf): self.yumbase.preconf.fn = '/dev/null' self.configparser = ConfigParser() # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password self._authenticate(url) if name in self.yumbase.repos.repos: repo = self.yumbase.repos.repos[name] else: repo = yum.yumRepo.YumRepository(name) repo.populate(self.configparser, name, self.yumbase.conf) self.repo = repo self.sack = None self.setup_repo(repo) self.num_packages = 0 self.num_excluded = 0
def __init__(self, url, name, org=1, channel_label="", ca_cert_file=None, client_cert_file=None, client_key_file=None): # pylint: disable=W0613 self.url = url self.name = name if org: self.org = org else: self.org = "NULL" # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password self.authtoken = None self.repo = DebRepo(url, os.path.join(CACHE_DIR, self.org, name), os.path.join(CFG.MOUNT_POINT, CFG.PREPENDED_DIR, self.org, 'stage'), self.proxy_addr, self.proxy_user, self.proxy_pass) self.num_packages = 0 self.num_excluded = 0 # keep authtokens for mirroring (_scheme, _netloc, _path, query, _fragid) = urlparse.urlsplit(url) if query: self.authtoken = query
def headerParserHandler(self, req): log_setreq(req) # init configuration options with proper component options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if not options.has_key("RHNComponentType"): # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) if req.method == 'GET': # This is the ping method return apache.OK self.servers = rhnImport.load("upload_server/handlers", interface_signature='upload_class') if not options.has_key('SERVER'): log_error("SERVER not set in the apache config files!") return apache.HTTP_INTERNAL_SERVER_ERROR server_name = options['SERVER'] if not self.servers.has_key(server_name): log_error("Unable to load server %s from available servers %s" % (server_name, self.servers)) return apache.HTTP_INTERNAL_SERVER_ERROR server_class = self.servers[server_name] self.server = server_class(req) return self._wrapper(req, "headerParserHandler")
def __init__(self, retries=3, log_obj=None, force=False): self.queues = {} comp = CFG.getComponent() initCFG("server.satellite") try: self.threads = int(CFG.REPOSYNC_DOWNLOAD_THREADS) except ValueError: raise ValueError("Number of threads expected, found: '%s'" % CFG.REPOSYNC_DOWNLOAD_THREADS) try: self.timeout = int(CFG.REPOSYNC_TIMEOUT) except ValueError: raise ValueError("Timeout in seconds expected, found: '%s'" % CFG.REPOSYNC_TIMEOUT) try: self.minrate = int(CFG.REPOSYNC_MINRATE) except ValueError: raise ValueError( "Minimal transfer rate in bytes pre second expected, found: '%s'" % CFG.REPOSYNC_MINRATE) if self.threads < 1: raise ValueError("Invalid number of threads: %d" % self.threads) initCFG(comp) self.retries = retries self.log_obj = log_obj self.force = force self.lock = Lock() self.exception = None # WORKAROUND - BZ #1439758 - ensure first item in queue is performed alone to properly setup NSS self.first_in_queue_done = False self.first_in_queue_lock = Lock()
def __init__(self, url, name, insecure=False, interactive=True, yumsrc_conf=None, org="1", channel_label="", no_mirrors=False, ca_cert_file=None, client_cert_file=None, client_key_file=None): # pylint: disable=W0613 self.url = url self.name = name if org: self.org = org else: self.org = "NULL" # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr, self.proxy_user, self.proxy_pass = get_proxy(self.url) self.authtoken = None self.repo = DebRepo(url, os.path.join(CACHE_DIR, self.org, name), os.path.join(CFG.MOUNT_POINT, CFG.PREPENDED_DIR, self.org, 'stage'), self.proxy_addr, self.proxy_user, self.proxy_pass) self.num_packages = 0 self.num_excluded = 0 # keep authtokens for mirroring (_scheme, _netloc, _path, query, _fragid) = urlparse.urlsplit(url) if query: self.authtoken = query
def __init__(self, url, name): self.url = url self.name = name self.yumbase = yum.YumBase() self.yumbase.preconf.fn = YUMSRC_CONF if not os.path.exists(YUMSRC_CONF): self.yumbase.preconf.fn = '/dev/null' self.configparser = ConfigParser() self._clean_cache(CACHE_DIR + name) # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password if name in self.yumbase.repos.repos: repo = self.yumbase.repos.repos[name] else: repo = yum.yumRepo.YumRepository(name) repo.populate(self.configparser, name, self.yumbase.conf) self.repo = repo self.sack = None self.setup_repo(repo) self.num_packages = 0 self.num_excluded = 0
def initDB(backend=None, host=None, port=None, username=None, password=None, database=None, sslmode=None, sslrootcert=None): """ Initialize the database. Either we get backend and all parameter which means the caller knows what they are doing, or we populate everything from the config files. """ if backend is None: if CFG is None or not CFG.is_initialized(): initCFG('server') backend = CFG.DB_BACKEND host = CFG.DB_HOST port = CFG.DB_PORT database = CFG.DB_NAME username = CFG.DB_USER password = CFG.DB_PASSWORD sslmode = None sslrootcert = None if CFG.DB_SSL_ENABLED: sslmode = 'verify-full' sslrootcert = CFG.DB_SSLROOTCERT if backend not in SUPPORTED_BACKENDS: raise rhnException("Unsupported database backend", backend) if port: port = int(port) # Hide the password add_to_seclist(password) try: __init__DB(backend, host, port, username, password, database, sslmode, sslrootcert) __init__DB2(backend, host, port, username, password, database, sslmode, sslrootcert) # except (rhnException, SQLError): # raise # pass on, we know those ones # except (KeyboardInterrupt, SystemExit): # raise except SQLConnectError, e: try: global __DB global __DB2 del __DB del __DB2 except NameError: pass raise e
def setUp(self): initCFG("server") rhnSQL.initDB( backend="oracle", username=DB_SETTINGS["user"], password=DB_SETTINGS["password"], database=DB_SETTINGS["database"] )
def __init__(self): jabber_lib.Runner.__init__(self) initCFG("osa-dispatcher") self._tcp_server = None self._poll_interval = None self._next_poll_interval = None # Cache states self._state_ids = {}
def initDB(backend=None, host=None, port=None, username=None, password=None, database=None, sslmode=None, sslrootcert=None, initsecond=False): """ Initialize the database. Either we get backend and all parameter which means the caller knows what they are doing, or we populate everything from the config files. initsecond: If set to True it initialize a second DB connection. By default only one DB connection is needed. """ if backend is None: if CFG is None or not CFG.is_initialized(): initCFG('server') backend = CFG.DB_BACKEND host = CFG.DB_HOST port = CFG.DB_PORT database = CFG.DB_NAME username = CFG.DB_USER password = CFG.DB_PASSWORD sslmode = None sslrootcert = None if CFG.DB_SSL_ENABLED: sslmode = 'verify-full' sslrootcert = CFG.DB_SSLROOTCERT if backend not in SUPPORTED_BACKENDS: raise rhnException("Unsupported database backend", backend) if port: port = int(port) # Hide the password add_to_seclist(password) try: if initsecond == False: __init__DB(backend, host, port, username, password, database, sslmode, sslrootcert) else: __init__DB2(backend, host, port, username, password, database, sslmode, sslrootcert) # except (rhnException, SQLError): # raise # pass on, we know those ones # except (KeyboardInterrupt, SystemExit): # raise except SQLConnectError: e = sys.exc_info()[1] try: closeDB() except NameError: pass raise_with_tb(e, sys.exc_info()[2]) except: raise #e_type, e_value = sys.exc_info()[:2] # raise rhnException("Could not initialize Oracle database connection", # str(e_type), str(e_value)) return 0
def initDB(backend=None, host=None, port=None, username=None, password=None, database=None, sslmode=None, sslrootcert=None, initsecond=False): """ Initialize the database. Either we get backend and all parameter which means the caller knows what they are doing, or we populate everything from the config files. initsecond: If set to True it initialize a second DB connection. By default only one DB connection is needed. """ if backend is None: if CFG is None or not CFG.is_initialized(): initCFG('server') backend = CFG.DB_BACKEND host = CFG.DB_HOST port = CFG.DB_PORT database = CFG.DB_NAME username = CFG.DB_USER password = CFG.DB_PASSWORD sslmode = None sslrootcert = None if CFG.DB_SSL_ENABLED: sslmode = 'verify-full' sslrootcert = CFG.DB_SSLROOTCERT if backend not in SUPPORTED_BACKENDS: raise rhnException("Unsupported database backend", backend) if port: port = int(port) # Hide the password add_to_seclist(password) try: if initsecond == False: __init__DB(backend, host, port, username, password, database, sslmode, sslrootcert) else: __init__DB2(backend, host, port, username, password, database, sslmode, sslrootcert) # except (rhnException, SQLError): # raise # pass on, we know those ones # except (KeyboardInterrupt, SystemExit): # raise except SQLConnectError: e = sys.exc_info()[1] try: closeDB() except NameError: pass raise e except: raise #e_type, e_value = sys.exc_info()[:2] # raise rhnException("Could not initialize Oracle database connection", # str(e_type), str(e_value)) return 0
def __init__( self, channel_label, repo_type, url=None, fail=False, quiet=False, filters=None, no_errata=False, sync_kickstart=False, latest=False, ): self.regen = False self.fail = fail self.quiet = quiet self.filters = filters or [] self.no_errata = no_errata self.sync_kickstart = sync_kickstart self.latest = latest initCFG("server") rhnSQL.initDB() # setup logging log_filename = channel_label + ".log" rhnLog.initLOG(default_log_location + log_filename) # os.fchown isn't in 2.4 :/ os.system("chgrp apache " + default_log_location + log_filename) self.log_msg("\nSync started: %s" % (time.asctime(time.localtime()))) self.log_msg(str(sys.argv)) self.channel_label = channel_label self.channel = self.load_channel() if not self.channel or not rhnChannel.isCustomChannel(self.channel["id"]): self.print_msg("Channel does not exist or is not custom.") sys.exit(1) if not url: # TODO:need to look at user security across orgs h = rhnSQL.prepare( """select s.id, s.source_url, s.label from rhnContentSource s, rhnChannelContentSource cs where s.id = cs.source_id and cs.channel_id = :channel_id""" ) h.execute(channel_id=int(self.channel["id"])) source_data = h.fetchall_dict() if source_data: self.urls = [(row["id"], row["source_url"], row["label"]) for row in source_data] else: self.error_msg("Channel has no URL associated") sys.exit(1) else: self.urls = [(None, u, None) for u in url] self.repo_plugin = self.load_plugin(repo_type)
def __init__(self, channel_label, repo_type, url=None, fail=False, quiet=False, filters=None, no_errata=False, sync_kickstart=False, latest=False, strict=0): self.regen = False self.fail = fail self.quiet = quiet self.filters = filters or [] self.no_errata = no_errata self.sync_kickstart = sync_kickstart self.latest = latest initCFG('server.satellite') rhnSQL.initDB() # setup logging log_filename = channel_label + '.log' if CFG.DEBUG is not None: log_level = CFG.DEBUG rhnLog.initLOG(default_log_location + log_filename, log_level) # os.fchown isn't in 2.4 :/ if isSUSE(): os.system("chgrp www " + default_log_location + log_filename) else: os.system("chgrp apache " + default_log_location + log_filename) self.log_msg("\nSync started: %s" % (time.asctime(time.localtime()))) self.log_msg(str(sys.argv)) self.channel_label = channel_label self.channel = self.load_channel() if not self.channel: self.print_msg("Channel does not exist.") sys.exit(1) if not url: # TODO:need to look at user security across orgs h = rhnSQL.prepare("""select s.id, s.source_url, s.label, fm.channel_family_id from rhnContentSource s, rhnChannelContentSource cs, rhnChannelFamilyMembers fm where s.id = cs.source_id and cs.channel_id = fm.channel_id and cs.channel_id = :channel_id""") h.execute(channel_id=int(self.channel['id'])) source_data = h.fetchall_dict() if source_data: self.urls = [(row['id'], row['source_url'], row['label'], row['channel_family_id']) for row in source_data] else: self.error_msg("Channel has no URL associated") sys.exit(1) else: self.urls = [(None, u, None, None) for u in url] self.repo_plugin = self.load_plugin(repo_type) self.strict = strict
def getNotificationsTypeDisabled(): """Return list of types which are disabled""" disabledTypes = [] comp = CFG.getComponent() initCFG("java") if CFG.notifications_type_disabled: disabledTypes = CFG.notifications_type_disabled.split(",") initCFG(comp) return disabledTypes
def package_details(packageid): """displays the details for that package id""" #db access import sys sys.path.append("/usr/share/rhn") try: import spacewalk.common.rhnConfig as rhnConfig import spacewalk.server.rhnSQL as rhnSQL except ImportError: try: import common.rhnConfig as rhnConfig import server.rhnSQL as rhnSQL except ImportError: print "Couldn't load the modules required to connect to the db" sys.exit(1) rhnConfig.initCFG() rhnSQL.initDB() query=""" select rp.id as "package_id", rpn.name||'-'||rpe.version||'-'||rpe.release||'.'||rpa.label as "package", rc.label as "channel_label", rc.id as "channel_id", coalesce((select name from rhnpackageprovider rpp where rpp.id = rpk.provider_id),'Unknown') as "provider" from rhnpackage rp inner join rhnpackagename rpn on rpn.id = rp.name_id inner join rhnpackageevr rpe on rpe.id = rp.evr_id inner join rhnpackagearch rpa on rpa.id = rp.package_arch_id left outer join rhnchannelpackage rcp on rcp.package_id = rp.id left outer join rhnchannel rc on rc.id = rcp.channel_id left outer join rhnpackagekeyassociation rpka on rpka.package_id = rp.id left outer join rhnpackagekey rpk on rpk.id = rpka.key_id where rp.id = :packageid order by 2, 3 """ cursor = rhnSQL.prepare(query) cursor.execute(packageid=packageid) rows = cursor.fetchall_dict() if not rows is None: c = 0 print "Package %d : %s" % (rows[0]['package_id'], rows[0]['package']) pkg_channels = [] pkg_provider = [] for row in rows: c += 1 if row.channel_id != None: pkg_channels[row['channel_id']] = row['channel_label'] pkg_provider[row['channel_id']] = row['provider'] else: pkg_channels[0] = "Not in a channel" pkg_provider[0] = row['provider'] print "\r%s of %s" % (str(c), str(len(rows))), print "Provided by channels : %s" % (', '.join(pkg_channels)) print "With providers (same order): %s" % (', '.join(pkg_provider)) else: print "no package found for the id %d" % (packageid)
def __init__(self, channel_label, repo_type, url=None, fail=False, quiet=False, filters=None, no_errata=False, sync_kickstart=False, latest=False): self.regen = False self.fail = fail self.quiet = quiet self.filters = filters or [] self.no_errata = no_errata self.sync_kickstart = sync_kickstart self.latest = latest initCFG('server') rhnSQL.initDB() # setup logging log_filename = channel_label + '.log' rhnLog.initLOG(default_log_location + log_filename) # os.fchown isn't in 2.4 :/ os.system("chgrp apache " + default_log_location + log_filename) self.log_msg("\nSync started: %s" % (time.asctime(time.localtime()))) self.log_msg(str(sys.argv)) self.channel_label = channel_label self.channel = self.load_channel() if not self.channel or not rhnChannel.isCustomChannel( self.channel['id']): self.print_msg("Channel does not exist or is not custom.") sys.exit(1) if not url: # TODO:need to look at user security across orgs h = rhnSQL.prepare("""select s.id, s.source_url, s.label from rhnContentSource s, rhnChannelContentSource cs where s.id = cs.source_id and cs.channel_id = :channel_id""") h.execute(channel_id=int(self.channel['id'])) source_data = h.fetchall_dict() if source_data: self.urls = [(row['id'], row['source_url'], row['label']) for row in source_data] else: self.error_msg("Channel has no URL associated") sys.exit(1) else: self.urls = [(None, u, None) for u in url] self.repo_plugin = self.load_plugin(repo_type)
def __init__(self, no_packages=False, no_errata=False, no_rpms=False, no_kickstarts=False): self.no_packages = no_packages self.no_errata = no_errata self.no_rpms = no_rpms self.no_kickstarts = no_kickstarts rhnSQL.initDB() initCFG('server.satellite') try: # Channel families mapping to channels with open(constants.CHANNEL_FAMILY_MAPPING_PATH, 'r') as f: self.families = json.load(f) # Channel metadata with open(constants.CHANNEL_DEFINITIONS_PATH, 'r') as f: self.channel_metadata = json.load(f) # Dist/Release channel mapping with open(constants.CHANNEL_DIST_MAPPING_PATH, 'r') as f: self.channel_dist_mapping = json.load(f) # Channel to repositories mapping with open(constants.CONTENT_SOURCE_MAPPING_PATH, 'r') as f: self.content_source_mapping = json.load(f) # Channel to kickstart repositories mapping with open(constants.KICKSTART_SOURCE_MAPPING_PATH, 'r') as f: self.kickstart_source_mapping = json.load(f) except IOError: e = sys.exc_info()[1] # TODO: print only on bigger debug level print("ERROR: Problem with loading file: %s" % e) raise CdnMappingsLoadError() # Map channels to their channel family self.channel_to_family = {} for family in self.families: for channel in self.families[family]['channels']: self.channel_to_family[channel] = family # Set already synced channels h = rhnSQL.prepare(""" select label from rhnChannel where org_id is null """) h.execute() channels = h.fetchall_dict() or [] self.synced_channels = [ch['label'] for ch in channels] # Set SSL-keys for channel family self.family_keys = {}
def __init__(self, url, name, insecure=False, interactive=True, yumsrc_conf=None, org="1", channel_label="", no_mirrors=False, ca_cert_file=None, client_cert_file=None, client_key_file=None): # pylint: disable=W0613 self.url = url self.name = name if org: self.org = org else: self.org = "NULL" comp = CFG.getComponent() # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr, self.proxy_user, self.proxy_pass = get_proxy(self.url) self.authtoken = None # Replace non-valid characters from reponame (only alphanumeric chars allowed) self.reponame = "".join([x if x.isalnum() else "_" for x in self.name]) self.channel_label = channel_label # SUSE vendor repositories belongs to org = NULL # The repository cache root will be "/var/cache/rhn/reposync/REPOSITORY_LABEL/" root = os.path.join(CACHE_DIR, str(org or "NULL"), self.reponame) self.repo = DebRepo(url, root, os.path.join(CFG.MOUNT_POINT, CFG.PREPENDED_DIR, self.org, 'stage'), self.proxy_addr, self.proxy_user, self.proxy_pass, gpg_verify=not (insecure)) self.repo.verify() self.num_packages = 0 self.num_excluded = 0 # keep authtokens for mirroring (_scheme, _netloc, _path, query, _fragid) = urlparse.urlsplit(url) if query: self.authtoken = query initCFG(comp)
def __init__(self, url, name, yumsrc_conf=YUMSRC_CONF, org="1", channel_label="", no_mirrors=False): self.url = url self.name = name self.yumbase = yum.YumBase() self.yumbase.preconf.fn = yumsrc_conf if not os.path.exists(yumsrc_conf): self.yumbase.preconf.fn = '/dev/null' self.configparser = ConfigParser() if org: self.org = org else: self.org = "NULL" # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password self._authenticate(url) # Check for settings in yum configuration files (for custom repos/channels only) if org: repos = self.yumbase.repos.repos else: repos = None if repos and name in repos: repo = repos[name] elif repos and channel_label in repos: repo = repos[channel_label] # In case we are using Repo object based on channel config, override it's id to name of the repo # To not create channel directories in cache directory repo.id = name else: # Not using values from config files repo = yum.yumRepo.YumRepository(name) repo.populate(self.configparser, name, self.yumbase.conf) self.repo = repo self.setup_repo(repo, no_mirrors) self.num_packages = 0 self.num_excluded = 0 # if self.url is metalink it will be expanded into # real urls in self.repo.urls and also save this metalink # in begin of the url list ("for repolist -v ... or anything else wants to know the baseurl") # Remove it from the list, we don't need it to download content of repo real_urls = [] for url in self.repo.urls: if '?' not in url: real_urls.append(url) self.repo.urls = real_urls
def _read_config(): # we want to change the logging file to 'audit' and set it back # after we finished reading the config file # TODO Changing the component twice on every request is not nice comp = CFG.getComponent() initCFG("audit") enabled = CFG.get("enabled") server_url = CFG.get("server", "") # XXX haven't tested what happens if it's not set back to the original value initCFG(comp) return (enabled, server_url)
def __init__(self, url, name, yumsrc_conf=YUMSRC_CONF, org="1", channel_label=""): self.url = url self.name = name self.yumbase = yum.YumBase() self.yumbase.preconf.fn = yumsrc_conf if not os.path.exists(yumsrc_conf): self.yumbase.preconf.fn = '/dev/null' self.configparser = ConfigParser() if org: self.org = org else: self.org = "NULL" # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password self._authenticate(url) # Check for settings in yum configuration files (for custom repos/channels only) if org: repos = self.yumbase.repos.repos else: repos = None if repos and name in repos: repo = repos[name] elif repos and channel_label in repos: repo = repos[channel_label] # In case we are using Repo object based on channel config, override it's id to name of the repo # To not create channel directories in cache directory repo.id = name else: # Not using values from config files repo = yum.yumRepo.YumRepository(name) repo.populate(self.configparser, name, self.yumbase.conf) self.repo = repo self.setup_repo(repo) self.num_packages = 0 self.num_excluded = 0 # if self.url is metalink it will be expanded into # real urls in self.repo.urls and also save this metalink # in begin of the url list ("for repolist -v ... or anything else wants to know the baseurl") # Remove it from the list, we don't need it to download content of repo real_urls = [] for url in self.repo.urls: if '?' not in url: real_urls.append(url) self.repo.urls = real_urls
def __init__(self, retries=3, log_obj=None, force=False): self.queue = Queue() initCFG('server.satellite') try: self.threads = int(CFG.REPOSYNC_DOWNLOAD_THREADS) except ValueError: raise ValueError("Number of threads expected, found: '%s'" % CFG.REPOSYNC_DOWNLOAD_THREADS) if self.threads < 1: raise ValueError("Invalid number of threads: %d" % self.threads) self.retries = retries self.log_obj = log_obj self.force = force self.lock = Lock() self.exception = None
def get_rhn_credentials(self): """ If we're using the rhn authorization module, we get the taskomatic user's password by importing the spacewalk modules """ sys.path.append('/usr/share/rhn') try: from spacewalk.common.rhnConfig import initCFG, CFG except ImportError: raise ConfigError("This is not a Spacewalk server, but the" + "configuration says I am.") initCFG() self._config['username'] = '******' self._config['password'] = CFG.SESSION_SECRET_1
def get_system_ID_by_name(systemname): initCFG('server.susemanager') rhnSQL.initDB() sql = rhnSQL.prepare("""select id from rhnServer where name = :sname""") sql.execute(sname=systemname) system = sql.fetchall_dict() if system: if len(system) > 1: resultExit(3, "System name \"%s\" not unique" % systemname) else: return (system[0]['id']) else: resultExit(3, "Unknown System: \"%s\"" % systemname)
def __init__(self, url, name): self.url = url self.name = name self._clean_cache(CACHE_DIR + name) # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password self.repo = DebRepo(url, CACHE_DIR + name) self.num_packages = 0 self.num_excluded = 0
def headerParserHandler(self, req): log_setreq(req) # We need to init CFG and Logging options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if "RHNComponentType" not in options: # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) """ parse the request, init database and figure out what can we call """ log_debug(2, req.the_request) # call method from inherited class ret = apacheSession.headerParserHandler(self, req) if ret != apache.OK: return ret # make sure we have DB connection if not CFG.SEND_MESSAGE_TO_ALL: try: rhnSQL.initDB() except rhnSQL.SQLConnectError: rhnTB.Traceback(mail=1, req=req, severity="schema") return apache.HTTP_INTERNAL_SERVER_ERROR else: # If in outage mode, close the DB connections rhnSQL.closeDB() # Store client capabilities client_cap_header = 'X-RHN-Client-Capability' if client_cap_header in req.headers_in: client_caps = req.headers_in[client_cap_header] client_caps = [ _f for _f in list( map(string.strip, string.split(client_caps, ","))) if _f ] rhnCapability.set_client_capabilities(client_caps) # Enabling the input header flags associated with the redirects/newer clients redirect_support_flags = [ 'X-RHN-Redirect', 'X-RHN-Transport-Capability' ] for flag in redirect_support_flags: if flag in req.headers_in: rhnFlags.set(flag, str(req.headers_in[flag])) return apache.OK
def __init__(self, username=None, password=None, email=None, dbusername=None, dbpassword=None, dbhostname=None): #start_init = time.time() self.filesuploaded = False self.options = rhnConfig.initCFG( 'server' ) print self.options mytime = time.time() self.test_username = username or ("test_username_%.3f" % mytime) self.test_password = password or ("test_password_%.3f" % mytime) self.test_email = email or ("%s@test_domain.com" % self.test_username) self.dbusername = dbusername or 'rhnuser' self.dbpassword = dbpassword or 'rhnuser' self.dbhostname = dbhostname or 'webdev' self.channel_arch = 'unittestarch' self.roles = ['org_admin'] rhnFlags.set( 'outputTransportOptions', UserDictCase() ) self._init_db( self.dbusername, self.dbpassword, self.dbhostname ) self._init_org() self._init_user(self.roles) self._init_server() self._init_channels() self._init_up2date()
def package_details(packageid): """displays the details for that package id""" #db access import sys sys.path.append("/usr/share/rhn") try: import spacewalk.common.rhnConfig as rhnConfig import spacewalk.server.rhnSQL as rhnSQL except ImportError: try: import common.rhnConfig as rhnConfig import server.rhnSQL as rhnSQL except ImportError: print "Couldn't load the modules required to connect to the db" sys.exit(1) rhnConfig.initCFG() rhnSQL.initDB()
def __init__(self, retries=3, log_obj=None, force=False): self.queues = {} initCFG('server.satellite') try: self.threads = int(CFG.REPOSYNC_DOWNLOAD_THREADS) except ValueError: raise ValueError("Number of threads expected, found: '%s'" % CFG.REPOSYNC_DOWNLOAD_THREADS) if self.threads < 1: raise ValueError("Invalid number of threads: %d" % self.threads) self.retries = retries self.log_obj = log_obj self.force = force self.lock = Lock() self.exception = None # WORKAROUND - BZ #1439758 - ensure first item in queue is performed alone to properly setup NSS self.first_in_queue_done = False self.first_in_queue_lock = Lock()
def gen_idlist_from_keyid_by_packageid(packageid): """docstring for gen_idlist_from_keyid_by_packageid""" import sys sys.path.append("/usr/share/rhn") try: import spacewalk.common.rhnConfig as rhnConfig import spacewalk.server.rhnSQL as rhnSQL except ImportError: try: import common.rhnConfig as rhnConfig import server.rhnSQL as rhnSQL except ImportError: print "Couldn't load the libraries required to connect to the db" sys.exit(1) rhnConfig.initCFG() rhnSQL.initDB() query = """ select rpka.package_id as "id", rpn.name||'-'||rpe.version||'-'||rpe.release||'.'||rpa.label as package, rpka.key_id, rpk.key_id as signing_key, coalesce((select name from rhnpackageprovider rpp where rpp.id = rpk.provider_id),'Unknown') as provider, rpka.created, rpka.modified from rhnpackagekeyassociation rpka, rhnpackage rp, rhnpackagename rpn, rhnpackagekey rpk, rhnpackageevr rpe, rhnpackagearch rpa, (select key_id from rhnpackagekeyassociation where package_id = """ + str( packageid) + """ ) pkginfo where rpka.package_id = rp.id and rpka.key_id = rpk.id and rp.name_id = rpn.id and rp.evr_id = rpe.id and rp.package_arch_id = rpa.id and rpk.id = pkginfo.key_id """ cursor = rhnSQL.prepare(query) cursor.execute() rows = cursor.fetchall_dict() list_ids = [] if not rows is None: c = 0 for row in rows: c += 1 list_ids.append(row['id']) print "\r%s of %s" % (str(c), str(len(rows))), print "" else: print "no packages found" return list_ids
def headerParserHandler(self, req): log_setreq(req) # We need to init CFG and Logging options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if not options.has_key("RHNComponentType"): # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) """ parse the request, init database and figure out what can we call """ log_debug(2, req.the_request) # call method from inherited class ret = apacheSession.headerParserHandler(self, req) if ret != apache.OK: return ret # make sure we have DB connection if not CFG.SEND_MESSAGE_TO_ALL: try: rhnSQL.initDB() except rhnSQL.SQLConnectError: rhnTB.Traceback(mail=1, req=req, severity="schema") return apache.HTTP_INTERNAL_SERVER_ERROR else: # If in outage mode, close the DB connections rhnSQL.closeDB() # Store client capabilities client_cap_header = 'X-RHN-Client-Capability' if req.headers_in.has_key(client_cap_header): client_caps = req.headers_in[client_cap_header] client_caps = filter(None, map(string.strip, string.split(client_caps, ",")) ) rhnCapability.set_client_capabilities(client_caps) # Enabling the input header flags associated with the redirects/newer clients redirect_support_flags = ['X-RHN-Redirect', 'X-RHN-Transport-Capability'] for flag in redirect_support_flags: if req.headers_in.has_key(flag): rhnFlags.set(flag, str(req.headers_in[flag])) return apache.OK
def gen_idlist_from_keyid_by_packageid(packageid): """docstring for gen_idlist_from_keyid_by_packageid""" import sys sys.path.append("/usr/share/rhn") try: import spacewalk.common.rhnConfig as rhnConfig import spacewalk.server.rhnSQL as rhnSQL except ImportError: try: import common.rhnConfig as rhnConfig import server.rhnSQL as rhnSQL except ImportError: print "Couldn't load the libraries required to connect to the db" sys.exit(1) rhnConfig.initCFG() rhnSQL.initDB() query = """ select rpka.package_id as "id", rpn.name||'-'||rpe.version||'-'||rpe.release||'.'||rpa.label as package, rpka.key_id, rpk.key_id as signing_key, coalesce((select name from rhnpackageprovider rpp where rpp.id = rpk.provider_id),'Unknown') as provider, rpka.created, rpka.modified from rhnpackagekeyassociation rpka, rhnpackage rp, rhnpackagename rpn, rhnpackagekey rpk, rhnpackageevr rpe, rhnpackagearch rpa, (select key_id from rhnpackagekeyassociation where package_id = """+str(packageid)+""" ) pkginfo where rpka.package_id = rp.id and rpka.key_id = rpk.id and rp.name_id = rpn.id and rp.evr_id = rpe.id and rp.package_arch_id = rpa.id and rpk.id = pkginfo.key_id """ cursor = rhnSQL.prepare(query) cursor.execute() rows = cursor.fetchall_dict() list_ids = [] if not rows is None: c = 0 for row in rows: c += 1 list_ids.append(row['id']) print "\r%s of %s" % (str(c), str(len(rows))), print "" else: print "no packages found" return list_ids
def _get_proxy_from_rhn_conf(): """Return a tuple of (url, user, pass) proxy information from rhn config Returns None instead of a tuple if there was no proxy url. user, pass can be None. """ comp = CFG.getComponent() if not CFG.has_key("http_proxy"): initCFG("server.satellite") result = None if CFG.http_proxy: # CFG.http_proxy format is <hostname>[:<port>] in 1.7 url = 'http://%s' % CFG.http_proxy result = (url, CFG.http_proxy_username, CFG.http_proxy_password) initCFG(comp) log_debug(2, "Could not read proxy URL from rhn config.") return result
def gen_idlist_from_paths(pathfile): """generates the list of package IDs from a file with all paths inside it.""" import sys sys.path.append("/usr/share/rhn") try: import spacewalk.common.rhnConfig as rhnConfig import spacewalk.server.rhnSQL as rhnSQL except ImportError: try: import common.rhnConfig as rhnConfig import server.rhnSQL as rhnSQL except ImportError: print "Couldn't load the libraries required to connect to the db" sys.exit(1) rhnConfig.initCFG() rhnSQL.initDB() query = """ select id from rhnpackage where path like :apath """ #read the file pkglistfile = open(options.file, "rb") pkgline = pkglistfile.readline() pkgpaths = [] while pkgline: pkgpaths.append(pkgline.rstrip("\n")) pkgline = pkglistfile.readline() pkglistfile.close() #init the db, init the list list_ids = [] cursor = rhnSQL.prepare(query) for apath in pkgpaths: cursor.execute(apath=apath) rows = cursor.fetchall_dict() if not rows is None: c = 0 for row in rows: c += 1 list_ids.append(row['id']) print "\r%s of %s" % (str(c), str(len(rows))), print "" else: print "no entry found for " return list_ids
def gen_idlist_from_paths(pathfile): """generates the list of package IDs from a file with all paths inside it.""" import sys sys.path.append("/usr/share/rhn") try: import spacewalk.common.rhnConfig as rhnConfig import spacewalk.server.rhnSQL as rhnSQL except ImportError: try: import common.rhnConfig as rhnConfig import server.rhnSQL as rhnSQL except ImportError: print "Couldn't load the libraries required to connect to the db" sys.exit(1) rhnConfig.initCFG() rhnSQL.initDB() query = """ select id from rhnpackage where path like :apath """ #read the file pkglistfile=open(options.file,"rb") pkgline=pkglistfile.readline() pkgpaths=[] while pkgline: pkgpaths.append(pkgline.rstrip("\n")) pkgline=pkglistfile.readline() pkglistfile.close() #init the db, init the list list_ids = [] cursor = rhnSQL.prepare(query) for apath in pkgpaths: cursor.execute(apath=apath) rows = cursor.fetchall_dict() if not rows is None: c = 0 for row in rows: c += 1 list_ids.append(row['id']) print "\r%s of %s" % (str(c), str(len(rows))), print "" else: print "no entry found for " return list_ids
def processCommandline(): initCFG('server.satellite') options = [ Option('--ca-cert', action='store', default=DEFAULT_TRUSTED_CERT, type="string", help='public CA certificate, default is %s' % DEFAULT_TRUSTED_CERT), Option('--label', action='store', default='RHN-ORG-TRUSTED-SSL-CERT', type="string", help='FOR TESTING ONLY - alternative database label for this CA certificate, default is "RHN-ORG-TRUSTED-SSL-CERT"'), Option('-v','--verbose', action='count', help='be verbose (accumulable: -vvv means "be *really* verbose").'), ] values, args = OptionParser(option_list=options).parse_args() # we take no extra commandline arguments that are not linked to an option if args: msg = ("ERROR: these arguments make no sense in this context (try " "--help): %s\n" % repr(args)) raise ValueError(msg) if not os.path.exists(values.ca_cert): sys.stderr.write("ERROR: can't find CA certificate at this location: " "%s\n" % values.ca_cert) sys.exit(10) try: db_backend = CFG.DB_BACKEND db_host = CFG.DB_HOST db_port = CFG.DB_PORT db_user = CFG.DB_user db_password = CFG.DB_PASSWORD db_name = CFG.DB_NAME rhnSQL.initDB(backend=db_backend, host=db_host, port=db_port, username=db_user, password=db_password, database=db_name) except: sys.stderr.write("""\ ERROR: there was a problem trying to initialize the database: %s\n""" % fetchTraceback()) sys.exit(11) if values.verbose: print 'Public CA SSL certificate: %s' % values.ca_cert return values
def headerParserHandler(self, req): # pylint: disable=W0201 log_setreq(req) self.start_time = time.time() # init configuration options with proper component options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if "RHNComponentType" not in options: # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) # short-circuit everything if sending a system-wide message. if CFG.SEND_MESSAGE_TO_ALL: # Drop the database connection # pylint: disable=W0702 try: rhnSQL.closeDB() except: pass # Fetch global message being sent to clients if applicable. msg = open(CFG.MESSAGE_TO_ALL).read() log_debug(3, "Sending message to all clients: %s" % msg) return self._send_xmlrpc( req, rhnFault(-1, _("IMPORTANT MESSAGE FOLLOWS:\n%s") % msg, explain=0)) rhnSQL.initDB() self.server = options['SERVER'] self.server_classes = rhnImport.load("satellite_exporter/handlers") if self.server not in self.server_classes: # XXX do something interesting here log_error("Missing server", self.server) return apache.HTTP_NOT_FOUND return self._wrapper(req, self._headerParserHandler)
def __init__(self, url, name, org=1, channel_label="", ca_cert_file=None, client_cert_file=None, client_key_file=None): # pylint: disable=W0613 self.url = url self.name = name if org: self.org = org else: self.org = "NULL" # read the proxy configuration in /etc/rhn/rhn.conf initCFG('server.satellite') self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password self.repo = DebRepo(url, os.path.join(CACHE_DIR, self.org, name), os.path.join(CFG.MOUNT_POINT, CFG.PREPENDED_DIR, self.org, 'stage')) self.num_packages = 0 self.num_excluded = 0
def processCommandline(): options = [ Option('--sanity-only', action='store_true', help="confirm certificate sanity. Does not activate" + "the Red Hat Satellite locally or remotely."), Option('--ignore-expiration', action='store_true', help='execute regardless of the expiration' + 'of the RHN Certificate (not recommended).'), Option('--ignore-version-mismatch', action='store_true', help='execute regardless of version ' + 'mismatch of existing and new certificate.'), Option('-v', '--verbose', action='count', help='be verbose ' + '(accumulable: -vvv means "be *really* verbose").'), Option('--dump-version', action='store', help="requested version of XML dump"), Option('--manifest', action='store', help='the RHSM manifest path/filename to activate for CDN'), Option('--cdn-deactivate', action='store_true', help='deactivate CDN-activated Satellite'), Option('--disconnected', action='store_true', help="activate locally, not subscribe to remote repository") ] options, args = OptionParser(option_list=options).parse_args() # we take no extra commandline arguments that are not linked to an option if args: msg = "ERROR: these arguments make no sense in this context (try --help): %s\n" % repr(args) raise ValueError(msg) initCFG('server.satellite') # No need to check further if deactivating if options.cdn_deactivate: return options if options.sanity_only: options.disconnected = 1 if CFG.DISCONNECTED and not options.disconnected: sys.stderr.write("""ERROR: Satellite server has been setup to run in disconnected mode. Either correct server configuration in /etc/rhn/rhn.conf or use --disconnected to activate it locally. """) sys.exit(1) return options
def headerParserHandler(self, req): # pylint: disable=W0201 log_setreq(req) self.start_time = time.time() # init configuration options with proper component options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if "RHNComponentType" not in options: # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) # short-circuit everything if sending a system-wide message. if CFG.SEND_MESSAGE_TO_ALL: # Drop the database connection # pylint: disable=W0702 try: rhnSQL.closeDB() except: pass # Fetch global message being sent to clients if applicable. msg = open(CFG.MESSAGE_TO_ALL).read() log_debug(3, "Sending message to all clients: %s" % msg) return self._send_xmlrpc(req, rhnFault(-1, _("IMPORTANT MESSAGE FOLLOWS:\n%s") % msg, explain=0)) rhnSQL.initDB() self.server = options['SERVER'] self.server_classes = rhnImport.load("satellite_exporter/handlers") if self.server not in self.server_classes: # XXX do something interesting here log_error("Missing server", self.server) return apache.HTTP_NOT_FOUND return self._wrapper(req, self._headerParserHandler)
def __call__(self, req): # NOTE: all imports done here due to required initialization of # of the configuration module before all others. # Initialization is dependent on RHNComponentType in the # req object. if self.__init: from apacheHandler import getComponentType # We cannot trust the config files to tell us if we are in the # broker or in the redirect because we try to always pass # upstream all requests componentType = getComponentType(req) initCFG(componentType) initLOG(CFG.LOG_FILE, CFG.DEBUG) log_debug(1, 'New request, component %s' % (componentType, )) # Instantiate the handlers if HandlerWrap.svrHandlers is None: HandlerWrap.svrHandlers = self.get_handler_factory(req)() if self.__init: # Set the component type HandlerWrap.svrHandlers.set_component(componentType) try: log_setreq(req) if hasattr(HandlerWrap.svrHandlers, self.__name): f = getattr(HandlerWrap.svrHandlers, self.__name) ret = f(req) else: raise Exception("Class has no attribute %s" % self.__name) # pylint: disable=W0702 except: Traceback(self.__name, req, extra="Unhandled exception type", severity="unhandled") return apache.HTTP_INTERNAL_SERVER_ERROR else: return ret
def getChannelRepo(): initCFG('server.satellite') rhnSQL.initDB() items = {} sql = """ select s.source_url, c.label from rhnContentSource s, rhnChannelContentSource cs, rhnChannel c where s.id = cs.source_id and cs.channel_id=c.id """ h = rhnSQL.prepare(sql) h.execute() while 1: row = h.fetchone_dict() if not row: break if not row['label'] in items: items[row['label']] = [] items[row['label']] += [row['source_url']] return items