def Install(self, packages, states): """ Try and fix everything that YUMng.VerifyPackages() found wrong for each Package Entry. This can result in individual RPMs being installed (for the first time), deleted, downgraded or upgraded. packages is a list of Package Elements that has states[<Package Element>] == False The following effects occur: - states{} is conditionally updated for each package. - self.installed{} is rebuilt, possibly multiple times. - self.instance_status{} is conditionally updated for each instance of a package. - Each package will be added to self.modified[] if its states{} entry is set to True. """ self.logger.debug('Running YUMng.Install()') install_pkgs = [] gpg_keys = [] upgrade_pkgs = [] reinstall_pkgs = [] def queuePkg(pkg, inst, queue): if pkg.get('name') == 'gpg-pubkey': gpg_keys.append(inst) else: queue.append(inst) # Remove extra instances. # Can not reverify because we don't have a package entry. if self.extra_instances is not None and len(self.extra_instances) > 0: if (self.setup.get('remove') == 'all' or \ self.setup.get('remove') == 'packages'): self.RemovePackages(self.extra_instances) else: self.logger.info("The following extra package instances will be removed by the '-r' option:") for pkg in self.extra_instances: for inst in pkg: self.logger.info(" %s %s" % \ ((pkg.get('name'), self.str_evra(inst)))) # Figure out which instances of the packages actually need something # doing to them and place in the appropriate work 'queue'. for pkg in packages: insts = [pinst for pinst in pkg \ if pinst.tag in ['Instance', 'Package']] if insts: for inst in insts: if inst not in self.instance_status: m = " Asked to install/update package never verified" p = nevraString(build_yname(pkg.get('name'), inst)) self.logger.warning("%s: %s" % (m, p)) continue status = self.instance_status[inst] if not status.get('installed', False) and self.doInstall: queuePkg(pkg, inst, install_pkgs) elif status.get('version_fail', False) and self.doUpgrade: queuePkg(pkg, inst, upgrade_pkgs) elif status.get('verify_fail', False) and self.doReinst: queuePkg(pkg, inst, reinstall_pkgs) else: # Either there was no Install/Version/Verify # task to be done or the user disabled the actions # in the configuration. XXX Logging for the latter? pass else: msg = "YUMng: Package tag found where Instance expected: %s" self.logger.warning(msg % pkg.get('name')) queuePkg(pkg, pkg, install_pkgs) # Install GPG keys. # Alternatively specify the required keys using 'gpgkey' in the # repository definition in yum.conf. YUM will install the keys # automatically. if len(gpg_keys) > 0: self.logger.info("Installing GPG keys.") for inst in gpg_keys: if inst.get('simplefile') is None: self.logger.error("GPG key has no simplefile attribute") continue key_file = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ inst.get('simplefile')) self._installGPGKey(inst, key_file) self.RefreshPackages() pkg = self.instance_status[gpg_keys[0]].get('pkg') states[pkg] = self.VerifyPackage(pkg, []) # Install packages. try: # We want to reload all Yum configuration in case we've # deployed new .repo files we should consider self.yb = yum.YumBase() self.yb.doTsSetup() self.yb.doRpmDBSetup() self.yb.doConfigSetup() except Exception, e: self.logger.warning("YUMng: Error Refreshing Yum Repos: %s" % e)
use_yum = True try: import yum except ModuleNotFoundError: import dnf import libdnf use_yum = False try: import json except: import simplejson as json repositories = [] if use_yum: yb = yum.YumBase() for repo in yb.repos.sort(): repo_dict = dict() repo_dict["alias"] = repo.id repo_dict["name"] = repo.name repo_dict["type"] = "rpm-md" repo_dict["url"] = repo.baseurl or [] repo_dict["mirrorlist"] = repo.mirrorlist or "" repo_dict["enabled"] = repo.enabled repo_dict["gpgcheck"] = repo.gpgcheck repo_dict["gpgkey"] = repo.gpgkey repositories.append(repo_dict) else: db = dnf.Base() db.read_all_repos()
def main(): global wapt_folder,NGINX_GID parser = OptionParser(usage=usage, version='waptserver.py ' + __version__) parser.add_option( '-c', '--config', dest='configfile', default=waptserver_config.DEFAULT_CONFIG_FILE, help='Config file full path (default: %default)') parser.add_option( "-k", "--use-kerberos", dest="use_kerberos", default=False, action='store_true', help="Use kerberos for host registration (default: False)") parser.add_option( "-s", "--force-https", dest="force_https", default=False, action='store_true', help="Use https only, http is 301 redirected to https (default: False). Requires a proper DNS name") (options, args) = parser.parse_args() if postconf.yesno("Do you want to launch post configuration tool ?") != postconf.DIALOG_OK: print "canceling wapt postconfiguration" sys.exit(1) # TODO : check if it a new install or an upgrade (upgrade from mongodb to postgresql) if type_redhat(): if re.match('^SELinux status:.*enabled', run('sestatus')): postconf.msgbox('SELinux detected, tweaking httpd permissions.') run('setsebool -P httpd_can_network_connect 1') run('setsebool -P httpd_setrlimit on') for sepath in ('wapt','wapt-host','wapt-hostref'): run('semanage fcontext -a -t httpd_sys_content_t "/var/www/html/%s(/.*)?"' %sepath) run('restorecon -R -v /var/www/html/%s' %sepath) postconf.msgbox('SELinux correctly configured for Nginx reverse proxy') server_config = waptserver_config.load_config(options.configfile) if os.path.isfile(options.configfile): print('making a backup copy of the configuration file') datetime_now = datetime.datetime.now() shutil.copyfile(options.configfile,'%s.bck_%s'% (options.configfile,datetime_now.isoformat()) ) wapt_folder = server_config['wapt_folder'] # add secret key initialisation string (for session token) if not server_config['secret_key']: server_config['secret_key'] = ''.join(random.SystemRandom().choice(string.letters + string.digits) for _ in range(64)) # add user db and password in ini file if server_config['db_host'] in (None,'','localhost','127.0.0.1','::1'): ensure_postgresql_db(db_name=server_config['db_name'],db_owner=server_config['db_name'],db_password=server_config['db_password']) #run('sudo -u wapt PYTHONHOME=/opt/wapt PYTHONPATH=/opt/wapt /opt/wapt/bin/python /opt/wapt/waptserver/waptserver_model.py init_db -c "%s"' % options.configfile) # Password setup/reset screen if not server_config['wapt_password'] or \ postconf.yesno("Do you want to reset admin password ?",yes_label='skip',no_label='reset') != postconf.DIALOG_OK: wapt_password_ok = False while not wapt_password_ok: wapt_password = '' wapt_password_check = '' while wapt_password == '': (code,wapt_password) = postconf.passwordbox("Please enter the wapt server password (min. 10 characters): ", insecure=True,width=100) if code != postconf.DIALOG_OK: exit(0) while wapt_password_check == '': (code,wapt_password_check) = postconf.passwordbox("Please enter the wapt server password again: ", insecure=True,width=100) if code != postconf.DIALOG_OK: exit(0) if wapt_password != wapt_password_check: postconf.msgbox('Password mismatch !') elif len(wapt_password) < 10: postconf.msgbox('Password must be at least 10 characters long !') else: wapt_password_ok = True password = pbkdf2_sha256.hash(wapt_password.encode('utf8')) server_config['wapt_password'] = password if not server_config['server_uuid']: server_config['server_uuid'] = str(uuid.uuid1()) if options.use_kerberos: server_config['use_kerberos'] = True else: server_config['use_kerberos'] = False # waptagent authentication method choices = [ ("1","Allow unauthenticated registration, same behavior as wapt 1.3", True), ("2","Enable kerberos authentication required for machines registration. Registration will ask for password if kerberos not working", False), ("3","Disable Kerberos but registration require strong authentication", False), ] code, t = postconf.radiolist("WaptAgent Authentication type?", choices=choices,width=120) if code=='cancel': print("\n\npostconfiguration canceled\n\n") sys.exit(1) if t=="1": server_config['allow_unauthenticated_registration'] = True if t=="3": server_config['allow_unauthenticated_registration'] = False server_config['use_kerberos'] = False if t=="2": server_config['allow_unauthenticated_registration'] = False server_config['use_kerberos'] = True waptserver_config.write_config_file(cfgfile=options.configfile,server_config=server_config,non_default_values_only=True) run("/bin/chmod 640 %s" % options.configfile) run("/bin/chown wapt %s" % options.configfile) repo = WaptLocalRepo(wapt_folder) repo.update_packages_index(force_all=True) final_msg = ['Postconfiguration completed.',] postconf.msgbox("Press ok to start waptserver") enable_waptserver() start_waptserver() # In this new version Apache is replaced with Nginx? Proceed to disable Apache. After migration one can remove Apache install altogether try: run_verbose('systemctl stop %s' % APACHE_SVC) except: pass try: run_verbose('systemctl disable %s' % APACHE_SVC) except: pass # nginx configuration dialog reply = postconf.yesno("Do you want to configure nginx?") if reply == postconf.DIALOG_OK: try: fqdn = socket.getfqdn() if not fqdn: fqdn = 'wapt' if '.' not in fqdn: fqdn += '.lan' msg = 'FQDN for the WAPT server (eg. wapt.acme.com)' (code, reply) = postconf.inputbox(text=msg, width=len(msg)+4, init=fqdn) if code != postconf.DIALOG_OK: exit(1) else: fqdn = reply dh_filename = '/etc/ssl/certs/dhparam.pem' if not os.path.exists(dh_filename): run_verbose('openssl dhparam -out %s 2048' % dh_filename) os.chown(dh_filename, 0, NGINX_GID) #pylint: disable=no-member os.chmod(dh_filename, 0o640) #pylint: disable=no-member # cleanup of nginx.conf file with open('/etc/nginx/nginx.conf','r') as read_conf: nginx_conf = nginxparser.load(read_conf) nginx_conf = nginx_set_worker_limit(nginx_conf) nginx_conf = nginx_clean_default_vhost(nginx_conf) with open("/etc/nginx/nginx.conf", "w") as nginx_conf_file: nginx_conf_file.write(nginxparser.dumps(nginx_conf)) if options.use_kerberos: if type_debian(): if not check_if_deb_installed('libnginx-mod-http-auth-spnego'): print('missing dependency libnginx-mod-http-auth-spnego, please install first before configuring kerberos') sys.exit(1) elif type_redhat(): import yum yb = yum.YumBase() yb.conf.cache = os.geteuid() != 1 #pylint: disable=no-member pkgs = yb.rpmdb.returnPackages() found = False for pkg in pkgs: if pkg.name=='nginx-mod-http-auth-spnego': found = True if not found: print('missing dependency nginx-mod-http-auth-spnego, please install first before configuring kerberos') sys.exit(1) make_httpd_config(wapt_folder, '/opt/wapt/waptserver', fqdn, options.use_kerberos, options.force_https) final_msg.append('Please connect to https://' + fqdn + '/ to access the server.') postconf.msgbox("The Nginx config is done. We need to restart Nginx?") run_verbose('systemctl enable nginx') run_verbose('systemctl restart nginx') setup_firewall() except subprocess.CalledProcessError as cpe: final_msg += [ 'Error while trying to configure Nginx!', 'errno = ' + str(cpe.returncode) + ', output: ' + cpe.output ] except Exception as e: import traceback final_msg += [ 'Error while trying to configure Nginx!', traceback.format_exc() ] if check_mongo2pgsql_upgrade_needed(options.configfile) and\ postconf.yesno("It is necessary to migrate current database backend from mongodb to postgres. Press yes to start migration",no_label='cancel') == postconf.DIALOG_OK: upgrade2postgres(options.configfile) width = 4 + max(10, len(max(final_msg, key=len))) height = 2 + max(20, len(final_msg)) postconf.msgbox('\n'.join(final_msg), height=height, width=width)
def install(architecture, conflicts, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module): if manager == 'apt': with apt.Cache() as cache: is_installed, is_virtual, installed_pkg = apt_package_status(name, cache) if is_installed and version == installed_pkg.version: # package is present already return False, conflicts, depends, enhances, recommends, suggests with tempfile.TemporaryDirectory() as dir: pkg_path = make_deb( architecture, conflicts, dir, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module) cmd = "apt-get install -y '{pkg_path}'".format(pkg_path=pkg_path) module.run_command(cmd, check_rc=True, cwd=dir, environ_update=dict_merge(ENV_VARS, APT_ENV_VARS)) return True, conflicts, depends, enhances, recommends, suggests elif manager == 'dnf': with dnf.Base() as base: base.read_all_repos() base.fill_sack(load_system_repo=True, load_available_repos=False) q = base.sack.query() installed_pkgs = q.installed().filter(name=name, version=version).run() if installed_pkgs: # package is present already return False, conflicts, depends, enhances, recommends, suggests base.reset(repos=True, sack=True) base.conf.substitutions.update_from_etc(base.conf.installroot) base.read_all_repos() base.fill_sack(load_system_repo=True, load_available_repos=True) with tempfile.TemporaryDirectory() as dir: pkg_path = make_rpm( architecture, conflicts, dir, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module) # Install using dnf CLI # cmd = "dnf install -y '{pkg_path}'".format(pkg_path=pkg_path) # module.run_command(cmd, check_rc=True, cwd=dir, environ_update=ENV_VARS) # Install using dnf API pkgs = base.add_remote_rpms([pkg_path]) for pkg in pkgs: base.package_install(pkg) base.resolve() base.download_packages(base.transaction.install_set) base.do_transaction() return True, conflicts, depends, enhances, recommends, suggests elif manager == 'yum': yb = yum.YumBase() if yb.rpmdb.searchNevra(name=name, ver=version): # package is present already return False, conflicts, depends, enhances, recommends, suggests with tempfile.TemporaryDirectory() as dir: pkg_path = make_rpm( architecture, conflicts, dir, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module) cmd = "yum install -y '{pkg_path}'".format(pkg_path=pkg_path) module.run_command(cmd, check_rc=True, cwd=dir, environ_update=ENV_VARS) return True, conflicts, depends, enhances, recommends, suggests # else manager not in [ 'apt', 'dnf', 'yum' ] return False, conflicts, depends, enhances, recommends, suggests
def __init__(self, context): PackageManager.__init__(self, context) self.yum = yum.YumBase() self.yum.doGenericSetup(cache=1)
def remove(name=None, pkgs=None, **kwargs): ''' Removes packages using python API for yum. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example:: salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} yumbase = yum.YumBase() setattr(yumbase.conf, 'assumeyes', True) # same comments as in upgrade for remove. for target in targets: if __grains__.get('cpuarch', '') == 'x86_64': try: arch = re.search(r'(\.i\d86)$', target).group(1) except AttributeError: arch = None else: # Remove arch from pkgname target = target[:-len(arch)] arch = arch.lstrip('.') else: arch = None yumbase.remove(name=target, arch=arch) log.info('Resolving dependencies') yumbase.resolveDeps() log.info('Processing transaction') yumlogger = _YumLogger() yumbase.processTransaction(rpmDisplay=yumlogger) yumlogger.log_accumulated_errors() yumbase.closeRpmDB() __context__.pop('pkg.list_pkgs', None) new = list_pkgs() return __salt__['pkg_resource.find_changes'](old, new)
if ppid == 1: raise RuntimeError("orphaned") line = inpipe.readline() # only way to detect EOF in python if line == "": break try: command = json.loads(line) except ValueError, e: raise RuntimeError("bad json parse") if base is None: base = yum.YumBase() if command['action'] == "whatinstalled": query(base, command) elif command['action'] == "whatavailable": query(base, command) elif command['action'] == "versioncompare": versioncompare(command['versions']) elif command['action'] == "installonlypkgs": install_only_packages(base, command['package']) elif command['action'] == "close_rpmdb": base.closeRpmDB() base = None outpipe.write('nil nil nil\n') outpipe.flush() else:
def update_repodata(bucketName, key, operation): if key.rfind("/") > -1: fileName = key[key.rfind("/") + 1:] repoPath = key[:key.rfind("/")] else: fileName = key repoPath = "" (name, version, release, epoch, arch) = splitFilename(fileName) logger.debug("fileName={0}".format(fileName)) logger.debug("repoPath={0}".format(repoPath)) tmpdir = tempfile.mkdtemp() s3base = urlparse.urlunsplit(("s3", bucketName, repoPath, "", "")) s3grabber = S3Grabber(s3base) # Set up temporary repo that will fetch repodata from s3 yumbase = yum.YumBase() yumbase.preconf.disabled_plugins = '*' yumbase.conf.cachedir = os.path.join(tmpdir, 'cache') yumbase.repos.disableRepo('*') repo = yumbase.add_enable_repo('s3') repo._grab = s3grabber repo._urls = [os.path.join(s3base, '')] # Ensure that missing base path doesn't cause trouble repo._sack = yum.sqlitesack.YumSqlitePackageSack( createrepo.readMetadata.CreaterepoPkgOld) # Create metadata generator mdconf = createrepo.MetaDataConfig() mdconf.directory = tmpdir mdconf.pkglist = yum.packageSack.MetaSack() mdgen = createrepo.MetaDataGenerator(mdconf, LoggerCallback()) mdgen.tempdir = tmpdir mdgen._grabber = s3grabber new_packages = yum.packageSack.PackageSack() if operation == "add": # Combine existing package sack with new rpm file list newpkg = mdgen.read_in_package(os.path.join(s3base, fileName)) newpkg._baseurl = '' # don't leave s3 base urls in primary metadata new_packages.addPackage(newpkg) else: # Remove deleted package logger.debug("Delete package {0}".format(key)) older_pkgs = yumbase.pkgSack.searchNevra(name=name) for i, older in enumerate(older_pkgs, 1): if older.version == version and older.release == release: yumbase.pkgSack.delPackage(older) mdconf.pkglist.addSack('existing', yumbase.pkgSack) mdconf.pkglist.addSack('new', new_packages) # Write out new metadata to tmpdir mdgen.doPkgMetadata() mdgen.doRepoMetadata() mdgen.doFinalMove() # Replace metadata on s3 s3grabber.syncdir(os.path.join(tmpdir, 'repodata'), 'repodata') shutil.rmtree(tmpdir)
def pkgProcessTask(self, task): setproctitle("deskctld-pkg") syslog.openlog("deskctld-pkg", syslog.LOG_PID) signal.signal(signal.SIGTERM, self._signal_handler_pkg) signal.signal(signal.SIGINT, self._signal_handler_pkg) syslog.syslog('deskctld-pkg started') ## open the pkgdb sqlite database try: conn = sqlite3.connect("/etc/soton/pkgdb.sqlite") conn.row_factory = sqlite3.Row cursor = conn.cursor() except Exception as ex: syslog.syslog("Could not open the pkgdb: " + str(type(ex)) + " - " + str(ex)) return if task['action'] in ['install', 'remove']: # try to load the package data from the pkgdb by ID cursor.execute("SELECT * FROM `entries` WHERE `id` = ?", (task['id'], )) entry = cursor.fetchone() if entry is None: syslog.syslog("Could not find requested entry ID " + task['id']) return try: yb = yum.YumBase() yb.conf.cache = 0 ## Now we need to get the individual packages or groups ## which make up this 'entry'. cursor.execute("SELECT * FROM `items` WHERE `entry` = ?", (task['id'], )) items = cursor.fetchall() transaction = False for item in items: # start of loop over each item # Check if we've been asked to install/remove a package group if item['name'].startswith("@"): group = True envgroup = False item_name = item['name'][1:] item_type = "group" # Check if we've been asked to install/remove an environment group elif item['name'].startswith("#"): group = True envgroup = True item_name = item['name'][1:] item_type = "group" # We must have been asked to process a normal package then else: group = False envgroup = False item_name = item['name'] item_type = "package" ## support name.arch if not a package group if not group: arch = None if item_name.endswith(".i686"): item_name = item_name[:-5] arch = "i686" elif item_name.endswith(".x86_64"): item_name = item_name[:-7] arch = "x86_64" elif item_name.endswith(".noarch"): item_name = item_name[:-7] arch = "noarch" # If we've been told to install if task['action'] == 'install': try: res = yb.install(name=item_name, arch=arch, silence_warnings=True) except Exception as ex: syslog.syslog("Could not install package " + item_name + ": " + str(ex)) continue elif task['action'] == 'remove': try: res = yb.remove(name=item_name, arch=arch, silence_warnings=True) except Exception as ex: syslog.syslog("Could not remove package " + item_name + ": " + str(ex)) continue else: # this is a group action, not a package if task['action'] == 'install': try: if envgroup: res = yb.selectEnvironment( evgrpid=item_name) else: res = yb.selectGroup(grpid=item_name) except Exception as ex: syslog.syslog("Could not install group " + item_name + ": " + str(ex)) continue elif task['action'] == 'remove': try: if envgroup: res = yb.environmentRemove( evgrpid=item_name) else: res = yb.groupRemove(grpid=item_name) except Exception as ex: syslog.syslog("Could not remove group " + item_name + ": " + str(ex)) continue if len(res) > 0: transaction = True else: # this means that yum returned no transaction results (it doesnt, sadly, return # an exception with any useful information. So we just know that yum couldn't # do anything...cos we got nothing in the 'res' list. Oh well. syslog.syslog("Could not " + task['action'] + " " + item_type + " " + item_name) ## end of loop over each item # did we find any actions to undertake? if transaction: syslog.syslog("running transaction check") yb.buildTransaction() syslog.syslog("processing transaction") yb.processTransaction() syslog.syslog("transaction complete") else: syslog.syslog("no transaction tasks to complete") yb.closeRpmDB() yb.close() except Exception as ex: syslog.syslog("Error during yum transaction: " + str(type(ex)) + " " + str(ex)) traceback.print_exc() yb.closeRpmDB() yb.close() # close sqlite3 before we quit conn.close() # syslog.syslog("exiting")
def install(name=None, refresh=False, repo='', skip_verify=False, pkgs=None, sources=None, **kwargs): ''' Install the passed package(s), add refresh=True to clean the yum database before package is installed. name The name of the package to be installed. Note that this parameter is ignored if either "pkgs" or "sources" is passed. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option. CLI Example:: salt '*' pkg.install <package name> refresh Whether or not to clean the yum database before executing. repo Specify a package repository to install from. (e.g., ``yum --enablerepo=somerepo``) skip_verify Skip the GPG verification check. (e.g., ``--nogpgcheck``) Multiple Package Installation Options: pkgs A list of packages to install from a software repository. Must be passed as a python list. CLI Example:: salt '*' pkg.install pkgs='["foo","bar"]' sources A list of RPM packages to install. Must be passed as a list of dicts, with the keys being package names, and the values being the source URI or local path to the package. CLI Example:: salt '*' pkg.install sources='[{"foo": "salt://foo.rpm"},{"bar": "salt://bar.rpm"}]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>']} ''' # Catch both boolean input from state and string input from CLI if refresh is True or refresh == 'True': refresh_db() pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs, sources) if pkg_params is None or len(pkg_params) == 0: return {} old = list_pkgs() yb = yum.YumBase() setattr(yb.conf, 'assumeyes', True) setattr(yb.conf, 'gpgcheck', not skip_verify) if repo: log.info('Enabling repo \'{0}\''.format(repo)) yb.repos.enableRepo(repo) for target in pkg_params: try: if pkg_type == 'file': log.info( 'Selecting "{0}" for local installation'.format(target)) a = yb.installLocal(target) # if yum didn't install anything, maybe its a downgrade? log.debug('Added {0} transactions'.format(len(a))) if len(a) == 0 and target not in old.keys(): log.info('Upgrade failed, trying local downgrade') yb.downgradeLocal(target) else: log.info('Selecting "{0}" for installation'.format(target)) # Changed to pattern to allow specific package versions a = yb.install(pattern=target) # if yum didn't install anything, maybe its a downgrade? log.debug('Added {0} transactions'.format(len(a))) if len(a) == 0 and target not in old.keys(): log.info('Upgrade failed, trying downgrade') yb.downgrade(pattern=target) except Exception: log.exception('Package "{0}" failed to install'.format(target)) # Resolve Deps before attempting install. This needs to be improved by # also tracking any deps that may get upgraded/installed during this # process. For now only the version of the package(s) you request be # installed is tracked. log.info('Resolving dependencies') yb.resolveDeps() log.info('Processing transaction') yumlogger = _YumErrorLogger() yb.processTransaction(rpmDisplay=yumlogger) yumlogger.log_accumulated_errors() yb.closeRpmDB() new = list_pkgs() return __salt__['pkg_resource.find_changes'](old, new)
def main(args): try: sourcedir, targetdir = args[1], args[2] except IndexError: print("invalid argument count") print("usage: python {0} sourcetree targettree".format(args[0])) sys.exit(2) if sourcedir.endswith("/"): sourcedir = sourcedir[:-1] if targetdir.endswith("/"): targetdir = targetdir[:-1] # parse sourcedir and targetdir sourcetree, targettree = {}, {} for tree, d in [[sourcetree, sourcedir], [targettree, targetdir]]: for root, _dnames, fnames in os.walk(d): for fname in fnames: fpath = os.path.join(root, fname) rpath = fpath.replace(d, "", 1) tree[rpath] = fpath # set up magic m = magic.open(magic.MAGIC_NONE) m.load() # get files missing in source sys.stderr.write("getting files missing in source\n") for rpath in sorted(targettree.keys()): fpath = targettree[rpath] targetfile = fpath try: sourcefile = sourcetree[rpath] except KeyError: sys.stdout.write('Missing: %s\n' % rpath) continue # skip broken links if os.path.islink(targetfile) and not os.path.exists(targetfile): continue # check stat #sourcemode = os.stat(sourcefile).st_mode #targetmode = os.stat(targetfile).st_mode #if sourcemode != targetmode: # sys.stdout.write('Stat differ: %s\n' % rpath) # diff only text files ftype = m.file(fpath) if ftype not in ["ASCII text"]: continue with open(targetfile, "r") as fobj: target = fobj.readlines() with open(sourcefile) as fobj: source = fobj.readlines() # do the file diff for line in difflib.unified_diff(source, target, fromfile=sourcefile, tofile=targetfile): sys.stdout.write(line) # set up yum # XXX HACK # we don't want yum's stuff in the output # so we redirect stdout to /dev/null for a while... stdout = os.dup(1) null = open("/dev/null", "w") os.dup2(null.fileno(), 1) # here yum prints out some stuff we really don't care about yb = yum.YumBase() yb.doSackSetup() # give the stdout back os.dup2(stdout, 1) null.close() # get excessive files in source sys.stderr.write("getting excessive files in source\n") sizedict, pkgdict = {}, {} for rpath, fpath in sourcetree.items(): # if file in target, skip it if rpath in targettree: continue # get file size try: sizeinbytes = os.path.getsize(fpath) except OSError: sizeinbytes = 0 # set link size to 0 islink = os.path.islink(fpath) if islink: sizeinbytes = 0 pkglist = yb.whatProvides(rpath, None, None) pkglist = set(map(lambda pkgobj: pkgobj.name, pkglist)) for pkg in pkglist: sizedict[pkg] = sizedict.get(pkg, 0) + sizeinbytes pkgdict[pkg] = pkgdict.get(pkg, []) + \ [(rpath, sizeinbytes, islink)] # sort by size for pkg, _size in sorted(sizedict.items(), key=operator.itemgetter(1), reverse=True): for item in sorted(pkgdict[pkg]): sys.stdout.write("%s\t%s\n" % (pkg, item))
def ajax_software(category): ## Connect to the desktop management service deskctld = deskctld_connect() ## Get the pkgdb database db = open_pkgdb() cur = db.cursor() cur.execute("SELECT * FROM `categories` WHERE `id` = ?", (category, )) category_obj = cur.fetchone() if not category_obj: abort(404) # Get all the entries for this category cur.execute( "SELECT * FROM `entries` WHERE `category` = ? ORDER BY `name`", (category, )) entries = cur.fetchall() if entries is not None: if len(entries) > 0: # Prepare yum for querying yb = yum.YumBase() yb.setCacheDir() # Prepare groups (installedGroups, availableGroups, installedEnvGroups, availableEnvGroups) = yb.doGroupLists(return_evgrps=True) groups = [] for group in installedGroups: groups.append(group.groupid) envgroups = [] for group in installedEnvGroups: envgroups.append(group.environmentid) # Now we need to check if the entry is installed or not on this system # to determine what button to show. pkgs = [] for entry in entries: ## Get the items we need to install cur.execute("SELECT * FROM `items` WHERE `entry` = ?", (entry['id'], )) items = cur.fetchall() status = 0 # entry status unknown if len(items) > 0: try: installed = True for item in items: if item['name'].startswith("@"): grpName = item['name'][1:] if grpName not in groups: installed = False elif item['name'].startswith("#"): grpName = item['name'][1:] if grpName not in envgroups: installed = False else: if not yb.rpmdb.searchNevra(name=item['name']): installed = False if installed: status = 1 # entry is installed else: status = 2 # entry is not installed except Exception as ex: status = 0 # entry status unknown ## check if this entry is currently being installed/removed by deskctld try: deskctld_status = deskctld.pkgEntryStatus(entry['id']) if deskctld_status == 1: status = 3 # entry is being installed elif deskctld_status == 2: status = 4 # entry is being removed except Exception as ex: pass pkgs.append({ 'id': entry['id'], 'status': status, 'name': entry['name'], 'desc': entry['desc'], 'icon': entry['icon'] }) return render_template('ajax_software.html', pkgs=pkgs, can_user_remove_software=can_user_remove_software())
def main(): parser = OptionParser(version="Depcheck version %s" % version) parser.add_option( "--releasever", default=None, help="set value of $releasever in yum config and repo files") parser.add_option("--show-duplicates", action="store_true", dest="show_dupes", help="show all versions of packages") parser.add_option("--show-dupes", action="store_true", help=SUPPRESS_HELP) parser.add_option( "--repoid", action="append", help= "specify repoids to query, can be specified multiple times (default is all enabled)" ) parser.add_option( "--enablerepo", action="append", dest="enablerepos", help= "specify additional repoids to query, can be specified multiple times") parser.add_option( "--disablerepo", action="append", dest="disablerepos", help="specify repoids to disable, can be specified multiple times") parser.add_option( "--repofrompath", action="append", help= "specify repoid & paths of additional repositories - unique repoid and complete path required, can be specified multiple times. Example. --repofrompath=myrepo,/path/to/repo" ) parser.add_option( "--old-packages", action="append", help="packages to use to compare against, instead of installed") parser.add_option("--ignore-arch", action="store_true", help="ignore arch when searching for old packages") parser.add_option("--skip-new", action="store_true", help="skip packages without a matching old package") parser.add_option("-C", "--cache", action="store_true", help="run from cache only") parser.add_option("-c", "--config", dest="conffile", default=None, help="config file location") (opts, args) = parser.parse_args() yb = yum.YumBase() yb.preconf.releasever = opts.releasever if opts.conffile is not None: yb.preconf.fn = opts.conffile # setup the fake repos for repo in opts.repofrompath or []: tmp = tuple(repo.split(',')) if len(tmp) != 2: yb.logger.error("Error: Bad repofrompath argument: %s" % repo) continue repoid, repopath = tmp if repopath[0] == '/': baseurl = 'file://' + repopath else: baseurl = repopath yb.add_enable_repo(repoid, baseurls=[baseurl], basecachedir=yb.conf.cachedir) yb.logger.info("Added %s repo from %s" % (repoid, repopath)) if opts.cache: yb.conf.cache = 1 elif not yb.setCacheDir(): yb.conf.cache = 1 if opts.show_dupes: yb.conf.showdupesfromrepos = True if opts.repoid: found_repos = set() for repo in yb.repos.findRepos('*'): if repo.id not in opts.repoid: repo.disable() else: found_repos.add(repo.id) repo.enable() for not_found in set(opts.repoid).difference(found_repos): yb.logger.error('Repoid %s was not found.' % not_found) if opts.disablerepos: for repo_match in opts.disablerepos: for repo in yb.repos.findRepos(repo_match): repo.disable() if opts.enablerepos: for repo_match in opts.enablerepos: for repo in yb.repos.findRepos(repo_match): repo.enable() npkgs = _get_npkgs(yb, args) opkgs = {} for pkg in sorted(_get_opkgs(yb, npkgs, opts.old_packages)): opkgs[(pkg.name, pkg.arch)] = pkg opkgs[pkg.name] = pkg for pkg in sorted(npkgs): opkg = None oreqs = {} oobss = {} ocons = {} if opts.ignore_arch: if pkg.name in opkgs: opkg = opkgs[pkg.name] elif (pkg.name, pkg.arch) in opkgs: opkg = opkgs[(pkg.name, pkg.arch)] if opkg is None and opts.skip_new: continue print "New-Package:", pkg, pkg.ui_from_repo if opkg is not None: print "Old-Package:", opkg, opkg.ui_from_repo oreqs = _get_oreqs(pkg, opkg.requires) ocons = _get_oreqs(pkg, opkg.conflicts) oobss = _get_oreqs(pkg, opkg.obsoletes) used_repos_reqs = [] nreqs, creqs = _get_reqs(pkg, pkg.requires, oreqs) if nreqs: print "New-Requires:" _print_reqs(yb, pkg, nreqs, used_repos_reqs) if creqs: print "Modified-Requires:" _print_reqs(yb, pkg, creqs, used_repos_reqs) _print_sum("Dep-Requires-Repos:", used_repos_reqs) used_repos_cons = [] nreqs, creqs = _get_reqs(pkg, pkg.conflicts, ocons) if nreqs: print "New-Conflicts:" _print_reqs(yb, pkg, nreqs, used_repos_cons) if creqs: print "Mod-Conflicts:" _print_reqs(yb, pkg, creqs, used_repos_cons) _print_sum("Dep-Conflicts-Repos:", used_repos_cons) used_repos_obss = [] nreqs, creqs = _get_reqs(pkg, pkg.obsoletes, oobss) if nreqs: print "New-Obsoletes:" _print_reqs(yb, pkg, nreqs, used_repos_obss) if creqs: print "Mod-Obsoletes:" _print_reqs(yb, pkg, creqs, used_repos_obss) _print_sum("Dep-Obsoletes-Repos:", used_repos_obss) _print_sum("Dep-Repos:", used_repos_reqs + used_repos_cons + used_repos_obss, end='')
def main(): # pylint: disable=missing-docstring,too-many-branches module = AnsibleModule( argument_spec=dict(packages=dict(type='list', default=[])), supports_check_mode=True) # NOTE(rhcarvalho): sosiouxme added _unmute, but I couldn't find a case yet # for when it is actually necessary. Leaving it commented out for now, # though this comment and the commented out code related to _unmute should # be deleted later if not proven necessary. # sys.stdout = os.devnull # mute yum so it doesn't break our output # def _unmute(): # pylint: disable=missing-docstring # sys.stdout = sys.__stdout__ def bail(error): # pylint: disable=missing-docstring # _unmute() module.fail_json(msg=error) yb = yum.YumBase() # pylint: disable=invalid-name # determine if the existing yum configuration is valid try: yb.repos.populateSack(mdtype='metadata', cacheonly=1) # for error of type: # 1. can't reach the repo URL(s) except yum.Errors.NoMoreMirrorsRepoError as e: # pylint: disable=invalid-name bail('Error getting data from at least one yum repository: %s' % e) # 2. invalid repo definition except yum.Errors.RepoError as e: # pylint: disable=invalid-name bail('Error with yum repository configuration: %s' % e) # 3. other/unknown # * just report the problem verbatim except: # pylint: disable=bare-except; # noqa bail('Unexpected error with yum repository: %s' % sys.exc_info()[1]) packages = module.params['packages'] no_such_pkg = [] for pkg in packages: try: yb.install(name=pkg) except yum.Errors.InstallError as e: # pylint: disable=invalid-name no_such_pkg.append(pkg) except: # pylint: disable=bare-except; # noqa bail('Unexpected error with yum install/update: %s' % sys.exc_info()[1]) if not packages: # no packages requested means test a yum update of everything yb.update() elif no_such_pkg: # wanted specific packages to install but some aren't available user_msg = 'Cannot install all of the necessary packages. Unavailable:\n' for pkg in no_such_pkg: user_msg += ' %s\n' % pkg user_msg += 'You may need to enable one or more yum repositories to make this content available.' bail(user_msg) try: txn_result, txn_msgs = yb.buildTransaction() except: # pylint: disable=bare-except; # noqa bail( 'Unexpected error during dependency resolution for yum update: \n %s' % sys.exc_info()[1]) # find out if there are any errors with the update/install if txn_result == 0: # 'normal exit' meaning there's nothing to install/update pass elif txn_result == 1: # error with transaction user_msg = 'Could not perform a yum update.\n' if len(txn_msgs) > 0: user_msg += 'Errors from dependency resolution:\n' for msg in txn_msgs: user_msg += ' %s\n' % msg user_msg += 'You should resolve these issues before proceeding with an install.\n' user_msg += 'You may need to remove or downgrade packages or enable/disable yum repositories.' bail(user_msg) # TODO: it would be nice depending on the problem: # 1. dependency for update not found # * construct the dependency tree # * find the installed package(s) that required the missing dep # * determine if any of these packages matter to openshift # * build helpful error output # 2. conflicts among packages in available content # * analyze dependency tree and build helpful error output # 3. other/unknown # * report the problem verbatim # * add to this list as we come across problems we can clearly diagnose elif txn_result == 2: # everything resolved fine pass else: bail( 'Unknown error(s) from dependency resolution. Exit Code: %d:\n%s' % (txn_result, txn_msgs)) # _unmute() module.exit_json(changed=False)
#! /usr/bin/python -tt import sys import yum __provides_of_requires_exact__ = False yb1 = yum.YumBase() yb1.conf.cache = True yb2 = yum.YumBase() yb2.conf.cache = True if len(sys.argv) > 1 and sys.argv[1].lower() == 'full': print("Doing full test") __provides_of_requires_exact__ = True assert hasattr(yb1.rpmdb, '__cache_rpmdb__') yb1.rpmdb.__cache_rpmdb__ = False yb2.setCacheDir() # Version ver1 = yb1.rpmdb.simpleVersion(main_only=True)[0] ver2 = yb2.rpmdb.simpleVersion(main_only=True)[0] if ver1 != ver2: print("Error: Version mismatch:", ver1, ver2, file=sys.stderr) # Conflicts cpkgs1 = yb1.rpmdb.returnConflictPackages() cpkgs2 = yb2.rpmdb.returnConflictPackages() if len(cpkgs1) != len(cpkgs2): print("Error: Conflict len mismatch:",
def index_yum_pkgs(self): """ index_yum_pkgs Index the packages from yum into this format: {base_package_name: {'name': base_package_name, 'summary': base_package_summary, 'description': base_package_summary, 'devel_owner': owner, 'icon': icon_name, 'pkg': pkg, 'upstream_url': url, 'src_pkg': src_pkg, 'sub_pkgs': [{'name': sub_pkg_name, 'summary': sub_pkg_summary, 'description': sub_pkg_description, 'icon': icon_name, 'pkg': pkg}, ...]}, ... } """ import yum yb = yum.YumBase() self.yum_base = yb if not os.path.exists(self.yum_cache_path): os.mkdir(self.yum_cache_path) if not os.path.exists(self.icons_path): os.mkdir(self.icons_path) yb.doConfigSetup(self.yum_conf, root=os.getcwd(), init_plugins=False) for r in yb.repos.findRepos('*'): if r.id in ['rawhide-x86_64', 'rawhide-source']: r.enable() else: r.disable() yb._getRepos(doSetup=True) yb._getSacks(['x86_64', 'noarch', 'src']) yb.doRepoSetup() yb.doSackFilelistPopulate() # Doesn't work right now due to a bug in yum. # https://bugzilla.redhat.com/show_bug.cgi?id=750593 #yb.disablePlugins() yb.conf.cache = 1 self.icon_cache = IconCache(yb, ['gnome-icon-theme', 'oxygen-icon-theme'], self.icons_path, self.cache_path) pkgs = yb.pkgSack.returnPackages() base_pkgs = {} seen_pkg_names = [] # get the tagger data self.tagger_cache = None if self.tagger_url: print "Caching tagger data" response = urllib2.urlopen(self.tagger_url) html = response.read() tagger_data = json.loads(html) self.tagger_cache = {} for pkg_tag_info in tagger_data['packages']: for pkg_name in pkg_tag_info.keys(): self.tagger_cache[pkg_name] = pkg_tag_info[pkg_name] pkg_count = 0 for pkg in pkgs: # precache the icon themes for later extraction and matching if pkg.ui_from_repo != 'rawhide-source': self.icon_cache.check_pkg(pkg) if not pkg.base_package_name in base_pkgs: # we haven't seen this base package yet so add it base_pkgs[pkg.base_package_name] = { 'name': pkg.base_package_name, 'summary': '', 'description': '', 'devel_owner': '', 'pkg': None, 'src_pkg': None, 'icon': self.default_icon, 'upstream_url': None, 'sub_pkgs': [] } base_pkg = base_pkgs[pkg.base_package_name] if pkg.ui_from_repo == 'rawhide-source': pkg_count += 1 print "%d: pre-processing package '%s':" % (pkg_count, pkg['name']) base_pkg['src_pkg'] = pkg base_pkg['upstream_url'] = pkg.URL if not base_pkg['devel_owner']: base_pkg['devel_owner'] = self.find_devel_owner(pkg.name) if not base_pkg['summary']: base_pkg['summary'] = pkg.summary if not base_pkg['description']: base_pkg['description'] = pkg.description continue # avoid duplicates if pkg.name in seen_pkg_names: continue seen_pkg_names.append(pkg.name) if pkg.base_package_name == pkg.name: # this is the main package if not base_pkg['src_pkg']: pkg_count += 1 print "%d: pre-processing package '%s':" % (pkg_count, pkg['name']) base_pkg['summary'] = pkg.summary base_pkg['description'] = pkg.description base_pkg['pkg'] = pkg base_pkg['devel_owner'] = self.find_devel_owner(pkg.name) else: # this is a sub package pkg_count += 1 print "%d: pre-processing package '%s':" % (pkg_count, pkg['name']) subpkgs = base_pkg['sub_pkgs'] subpkgs.append({ 'name': pkg.name, 'summary': pkg.summary, 'description': pkg.description, 'icon': self.default_icon, 'pkg': pkg }) return base_pkgs
def install(name=None, refresh=False, skip_verify=False, pkgs=None, sources=None, **kwargs): ''' Install the passed package(s), add refresh=True to clean the yum database before package is installed. name The name of the package to be installed. Note that this parameter is ignored if either "pkgs" or "sources" is passed. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option. 32-bit packages can be installed on 64-bit systems by appending the architecture designation (``.i686``, ``.i586``, etc.) to the end of the package name. CLI Example:: salt '*' pkg.install <package name> refresh Whether or not to update the yum database before executing. skip_verify Skip the GPG verification check. (e.g., ``--nogpgcheck``) version Install a specific version of the package, e.g. 1.2.3-4.el6. Ignored if "pkgs" or "sources" is passed. Repository Options: fromrepo Specify a package repository (or repositories) from which to install. (e.g., ``yum --disablerepo='*' --enablerepo='somerepo'``) enablerepo Specify a disabled package repository (or repositories) to enable. (e.g., ``yum --enablerepo='somerepo'``) disablerepo Specify an enabled package repository (or repositories) to disable. (e.g., ``yum --disablerepo='somerepo'``) Multiple Package Installation Options: pkgs A list of packages to install from a software repository. Must be passed as a python list. A specific version number can be specified by using a single-element dict representing the package and its version. CLI Examples:: salt '*' pkg.install pkgs='["foo", "bar"]' salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-4.el6"}]' sources A list of RPM packages to install. Must be passed as a list of dicts, with the keys being package names, and the values being the source URI or local path to the package. CLI Example:: salt '*' pkg.install sources='[{"foo": "salt://foo.rpm"}, {"bar": "salt://bar.rpm"}]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} ''' if salt.utils.is_true(refresh): refresh_db() pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs, sources, **kwargs) if pkg_params is None or len(pkg_params) == 0: return {} old = list_pkgs() yumbase = yum.YumBase() setattr(yumbase.conf, 'assumeyes', True) setattr(yumbase.conf, 'gpgcheck', not skip_verify) version = kwargs.get('version') if version: if pkgs is None and sources is None: # Allow "version" to work for single package target pkg_params = {name: version} else: log.warning('"version" parameter will be ignored for multiple ' 'package targets') error = _set_repo_options(yumbase, **kwargs) if error: log.error(error) return {} try: for pkgname in pkg_params: if pkg_type == 'file': log.info( 'Selecting "{0}" for local installation'.format(pkgname) ) installed = yumbase.installLocal(pkgname) # if yum didn't install anything, maybe its a downgrade? log.debug('Added {0} transactions'.format(len(installed))) if len(installed) == 0 and pkgname not in old.keys(): log.info('Upgrade failed, trying local downgrade') yumbase.downgradeLocal(pkgname) else: version = pkg_params[pkgname] if version is not None: if __grains__.get('cpuarch', '') == 'x86_64': try: arch = re.search(r'(\.i\d86)$', pkgname).group(1) except AttributeError: arch = '' else: # Remove arch from pkgname pkgname = pkgname[:-len(arch)] else: arch = '' target = '{0}-{1}{2}'.format(pkgname, version, arch) else: target = pkgname log.info('Selecting "{0}" for installation'.format(target)) # Changed to pattern to allow specific package versions installed = yumbase.install(pattern=target) # if yum didn't install anything, maybe its a downgrade? log.debug('Added {0} transactions'.format(len(installed))) if len(installed) == 0 and target not in old.keys(): log.info('Upgrade failed, trying downgrade') yumbase.downgrade(pattern=target) # Resolve Deps before attempting install. This needs to be improved by # also tracking any deps that may get upgraded/installed during this # process. For now only the version of the package(s) you request be # installed is tracked. log.info('Resolving dependencies') yumbase.resolveDeps() log.info('Processing transaction') yumlogger = _YumLogger() yumbase.processTransaction(rpmDisplay=yumlogger) yumlogger.log_accumulated_errors() yumbase.closeRpmDB() except Exception as e: log.error('Install failed: {0}'.format(e)) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() return __salt__['pkg_resource.find_changes'](old, new)
def main(): """ Run the BYOB setup script """ import os import sys import logging import subprocess # debugging logging.basicConfig(level=logging.DEBUG, handlers=[logging.StreamHandler()]) logger = logging.getLogger(__name__) #urllib vomit if sys.version_info[0] > 2: from urllib.request import urlopen # for mainstream linux kernel we need to get opencv from the repo, else it must be compiled from source; # this, to prevent a segfault at runtime for resources loading cv2 package in python3 if os.name != "nt": try: import apt aptcache = apt.Cache() if not aptcache['python3-opencv'].is_installed: logger.error( 'Install python3-opencv before continuing:\n\n sudo apt install python3-opencv\n' ) sys.exit() except: #assuming then we're rhel based try: import yum yumapp = yum.YumBase() rpmdb = yumapp.doPackageLists(patterns="python3-opencv") if not rpmdb.installed: logger.error( 'Install python3-opencv before continuing:\n\n sudo yum install python3-opencv\n' ) sys.exit() except: logger.error( 'Unable to determine if python3-opencv is installed; continuing anyway.\n If you get a cv2 import error, install python3-opencv' ) else: from urllib import urlopen # find pip try: pip_path = subprocess.check_output( 'where pip' if os.name == 'nt' else 'which pip', shell=True).strip().rstrip() except Exception as e: logger.debug("Error in pip package installer: {}".format(str(e))) # install pip if missing try: import pip except: # intrct: removing this check -- the import pip failure above should make this check unnecessary, and # it actually becomes a problem if someone runs python 2.7.x and 3.x on the same host. #if not bool('pip_path' in locals() and os.path.exists(pip_path)) and if os.name != "nt": try: # NOTE: intrct -- I think this is a bad practice (instituting execution of arbitrary remote code we don't control). if os.getuid() != 0: # intrct: added this exit because otherwise this just runs in an infinite loop; I still think it's a bad idea. logger.error( "pip is not installed or a module, so setup must run elevated (sudo)" ) sys.exit() # intrct: added check for version for proper callout materials, and # running as subprocess rather than internal due to potential early exits in remote code. if sys.version_info[0] > 2: subprocess.check_call( """{} -c 'from urllib.request import urlopen; exec(urlopen("https://bootstrap.pypa.io/get-pip.py").read())'""" .format(sys.executable), shell=True) else: subprocess.check_call( """{} -c 'from urllib import urlopen; exec(urlopen("https://bootstrap.pypa.io/get-pip.py").read())'""" .format(sys.executable), shell=True) except Exception as e: logger.debug("Error installing pip: {}".format(str(e))) # restart # intrct: I would like to point out a limitation that this only installs packages for the linked "python" # and not the called version of python in the original runtime. os.execv(sys.executable, ['python'] + [os.path.abspath(sys.argv[0])] + sys.argv[1:]) # find requirements for tree in os.walk('..'): if 'byob' not in tree[0]: continue elif 'requirements.txt' in tree[2]: requirements = os.path.join(tree[0], 'requirements.txt') break # install requirements try: print("Installing requirements.txt") if os.name != "nt": locals()['pip_install_1'] = subprocess.Popen( 'sudo --prompt=" Please enter sudo password (to install python dependencies): " {} -m pip install -r {}' .format(sys.executable, requirements), 0, None, subprocess.PIPE, subprocess.PIPE, subprocess.PIPE, shell=True) else: locals()['pip_install_1'] = subprocess.Popen( '{} -m pip install -r {}'.format(sys.executable, requirements), 0, None, subprocess.PIPE, subprocess.PIPE, subprocess.PIPE, shell=True) for line in locals()['pip_install_1'].stdout: print(line.decode()) sys.stdout.flush() except Exception as e: logger.error("Error installing requirements: {}".format(e))
def getExternalRPMS(self): import rocks.roll import rocks.gen # The distAll distribution includes all of the installed rolls # on the system and is used to generate a kickstart files for # the everything appliance. This gives us a list of RPMs that # we know we need from the source os/updates CDs. print 'making rocks-dist-all' cwd = os.getcwd() os.environ['RPMHOME'] = os.getcwd() distAll = rocks.roll.Distribution(self.getDistArch(), 'rocks-dist-all') distAll.generate() # # copy the 'everything' node and graph file into the distro # shutil.copy(os.path.join('nodes', 'everything.xml'), os.path.join(distAll.getPath(), 'build', 'nodes')) shutil.copy( os.path.join('graphs', 'default', 'os.xml'), os.path.join(distAll.getPath(), 'build', 'graphs', 'default')) basedir = os.path.join(distAll.getPath(), 'build') xml = self.command( 'list.node.xml', ['everything', 'basedir=%s' % basedir, 'eval=n']) os.chdir(cwd) # # make sure the XML string is ASCII and not unicode, # otherwise, the parser will fail # xmlinput = xml.encode('ascii', 'ignore') generator = rocks.gen.Generator_linux() generator.setArch(self.arch) generator.setOS('linux') generator.parse(xmlinput) rpms = [] for line in generator.generate('packages'): if len(line) and line[0] not in ['#', '%']: rpms.append(line) # The distOS distribution includes just the source os/update # CDs (already in Roll form). The distAll distribution is # still used for the comps file and the anaconda source # code. We need this since anaconda and comps are missing # from the foreign rolls (os/update CDs). print 'making rocks-dist-os' del os.environ['RPMHOME'] distOS = rocks.roll.Distribution(self.getDistArch(), 'rocks-dist-os') distOS.generate('rolls="%s"' % self.config.getRollRolls()) # # make sure a comps.xml file is present # comps = os.path.join(distOS.getPath(), 'RedHat', 'base', 'comps.xml') if not os.path.exists(comps): print '\n\tCould not find a comps.xml file.' print '\tCopy a comps.xml file into the CentOS roll\n' sys.exit(-1) # # use yum to resolve dependencies # if rocks.version.split('.')[0] == '5': pyver = '2.4' elif rocks.version.split('.')[0] == '6': pyver = '2.6' else: pyver = '2.7' sys.path.append('/usr/lib/python%s/site-packages' % pyver) sys.path.append('/usr/lib64/python%s/site-packages' % pyver) sys.path.append('/usr/lib/python%s/lib-dynload' % pyver) sys.path.append('/usr/lib64/python%s/lib-dynload' % pyver) import yum a = yum.YumBase() a.doConfigSetup(fn='%s' % os.path.join(cwd, 'yum.conf'), init_plugins=False) a.conf.cache = 0 a.doTsSetup() a.doRepoSetup() a.doRpmDBSetup() a.doSackSetup() a.doGroupSetup() selected = [] for rpm in rpms + ['@base', '@core']: if rpm[0] == '@': group = a.comps.return_group(rpm[1:].encode('utf-8')) try: for r in group.mandatory_packages.keys() + \ group.optional_packages.keys() + \ group.default_packages.keys(): if r not in selected: selected.append(r) except: "comps returned no group for %s" % rpm elif rpm not in selected: selected.append(rpm) pkgs = [] avail = a.pkgSack.returnNewestByNameArch() for p in avail: if p.name in selected: pkgs.append(p) done = 0 while not done: done = 1 results = a.findDeps(pkgs) for pkg in results.keys(): for req in results[pkg].keys(): reqlist = results[pkg][req] for r in reqlist: if r.name not in selected: selected.append(r.name) pkgs.append(r) done = 0 # Now build a list of rocks (required) and non-rocks (optional) # rpms and return both of these list. When the ISOs are created # all the required packages are first. rocks = [] nonrocks = [] for rpm in distOS.getRPMS(): if rpm.getBaseName() in selected: rocks.append(rpm) else: nonrocks.append(rpm) return (rocks, nonrocks)
def get_base(): global base if base is None: base = yum.YumBase() setup_exit_handler() return base
def __init__(self, url, name, yumsrc_conf=YUMSRC_CONF, org="1", channel_label="", no_mirrors=False, ca_cert_file=None, client_cert_file=None, client_key_file=None): self.url = url self.name = name self.yumbase = yum.YumBase() self.yumbase.preconf.fn = yumsrc_conf if not os.path.exists(yumsrc_conf): self.yumbase.preconf.fn = '/dev/null' self.configparser = ConfigParser() if org: self.org = org else: self.org = "NULL" self.proxy_addr = None self.proxy_user = None self.proxy_pass = None self.authtoken = None # read the proxy configuration # /etc/rhn/rhn.conf has more priority than yum.conf initCFG('server.satellite') # keep authtokens for mirroring (_scheme, _netloc, _path, query, _fragid) = urlparse.urlsplit(url) if query: self.authtoken = query if CFG.http_proxy: self.proxy_addr = CFG.http_proxy self.proxy_user = CFG.http_proxy_username self.proxy_pass = CFG.http_proxy_password else: yb_cfg = self.yumbase.conf.cfg section_name = None if yb_cfg.has_section(self.name): section_name = self.name elif yb_cfg.has_section(channel_label): section_name = channel_label elif yb_cfg.has_section('main'): section_name = 'main' if section_name: if yb_cfg.has_option(section_name, option='proxy'): self.proxy_addr = yb_cfg.get(section_name, option='proxy') if yb_cfg.has_option(section_name, 'proxy_username'): self.proxy_user = yb_cfg.get(section_name, 'proxy_username') if yb_cfg.has_option(section_name, 'proxy_password'): self.proxy_pass = yb_cfg.get(section_name, 'proxy_password') self._authenticate(url) # Check for settings in yum configuration files (for custom repos/channels only) if org: repos = self.yumbase.repos.repos else: repos = None if repos and name in repos: repo = repos[name] elif repos and channel_label in repos: repo = repos[channel_label] # In case we are using Repo object based on channel config, override it's id to name of the repo # To not create channel directories in cache directory repo.id = name else: # Not using values from config files repo = yum.yumRepo.YumRepository(name) repo.populate(self.configparser, name, self.yumbase.conf) self.repo = repo self.setup_repo(repo, no_mirrors, ca_cert_file, client_cert_file, client_key_file) self.num_packages = 0 self.num_excluded = 0 self.groupsfile = None
def __init__(self, archive_filename, roll_name): """ """ self.archive_filename = archive_filename self.roll_name = roll_name import yum self.yb = yum.YumBase()
def __verify__(cls): try: yum.YumBase().doGenericSetup(cache=1) return True except: return False
def configure_yum_packages(self): """Configure package resources.""" print("- Configuring Packages") runtime_start = time.time() nsync = 0 osync = 0 fail = 0 packages = self.config['packages'] yb = yum.YumBase() yb.preconf.debuglevel = 0 yb.preconf.errorlevel = 0 yb.doTsSetup() yb.doRpmDBSetup() ybc = cli.YumBaseCli() ybc.preconf.debuglevel = 0 ybc.preconf.errorlevel = 0 ybc.conf.assumeyes = True ybc.doTsSetup() ybc.doRpmDBSetup() create_pkg_list = [] remove_pkg_list = [] for package in packages: action = packages[package]['action'] # In the near future, will use install_name vs package # as it includes a more specific package name: "package-version" # install_name = packages[package]['install_name'] if yb.isPackageInstalled(package): if action == 'create': nsync += 1 if action == 'remove': remove_pkg_list.append(package) if not yb.isPackageInstalled(package): if action == 'create': create_pkg_list.append(package) if action == 'remove': nsync += 1 # Don't waste time with YUM if there is nothing to do. doTransaction = False if create_pkg_list: print(" Packages out of sync: %s" % create_pkg_list) ybc.installPkgs(create_pkg_list) osync += len(create_pkg_list) doTransaction = True if remove_pkg_list: print(" Packages out of sync: %s" % remove_pkg_list) ybc.erasePkgs(remove_pkg_list) osync += len(remove_pkg_list) doTransaction = True if doTransaction: ybc.buildTransaction() ybc.doTransaction() runtime_end = time.time() runtime = (runtime_end - runtime_start) self.stats['pkg'] = { 'runtime': runtime, 'nsync': nsync, 'osync': osync, 'fail': fail }
def __init__(self): """ Instantiate the YumUpdates class and creates yum.YumBase class's object """ self.yum_obj = yum.YumBase()
def install_debuginfos(): """Install debuginfo packages""" install_pkgs = [{'name': 'gdb'}, {'name': 'python-magic'}] cmds = [] if USE_DEBUGINFO_INSTALL: cmds.append("sudo debuginfo-install -y " \ "--exclude ompi-debuginfo,gcc-debuginfo," \ "gcc-base-debuginfo " \ "daos-server cart libpmemobj python openmpi3") else: import yum yum_base = yum.YumBase() yum_base.conf.assumeyes = True yum_base.setCacheDir(force=True, reuse=True) yum_base.repos.enableRepo('*debug*') debuginfo_map = { 'glibc': 'glibc-debuginfo-common', 'libpmem': 'pmdk-debuginfo' } # We're not using the yum API to install packages # See the comments below. #kwarg = {'name': 'gdb'} #yum_base.install(**kwarg) for pkg in [ 'python', 'glibc', 'daos', 'systemd', 'ndctl', 'libpmem', 'mercury', 'cart', 'libfabric', 'argobots' ]: try: debug_pkg = debuginfo_map[pkg] except KeyError: debug_pkg = pkg + "-debuginfo" try: pkg_data = yum_base.rpmdb.returnNewestByName(name=pkg)[0] except yum.Errors.PackageSackError as expn: if expn.__str__().rstrip() == "No Package Matching " + pkg: print("Package {} not installed, " "skipping debuginfo".format(pkg)) continue else: raise # This is how you actually use the API to add a package # But since we need sudo to do it, we need to call out to yum #kwarg = {'name': debug_pkg, # 'version': pkg_data['version'], # 'release': pkg_data['release']} #yum_base.install(**kwarg) install_pkgs.append({ 'name': debug_pkg, 'version': pkg_data['version'], 'release': pkg_data['release'], 'epoch': pkg_data['epoch'] }) # This is how you normally finish up a yum transaction, but # again, we need to employ sudo #yum_base.resolveDeps() #yum_base.buildTransaction() #yum_base.processTransaction(rpmDisplay=yum.rpmtrans.NoOutputCallBack()) cmd = "sudo yum -y --enablerepo=\\*debug\\* install" for pkg in install_pkgs: try: cmd += " {}-{}-{}".format(pkg['name'], pkg['version'], pkg['release']) except KeyError: cmd += " {}".format(pkg['name']) cmds.append(cmd) print(get_output(';'.join(cmds)))
def main(): parser = OptionParser() parser.usage = """ verifytree - verify that a local yum repository is consistent verifytree /path/to/repo""" parser.add_option("-a","--checkall",action="store_true",default=False, help="Check all packages in the repo") parser.add_option("--nocomps", "--nogroups",action="store_true", default=False, help="Do not read and check comps") parser.add_option("--noplugins",action="store_true",default=False, help="Do not load any plugins") parser.add_option("-t","--testopia",action="store",type="int", help="Report results to the given testopia run number") parser.add_option("-r","--treeinfo", action="store_true", default=False, help="check the checksums of listed files in a .treeinfo file, if available") opts, args = parser.parse_args() if not args: print "Must provide a file url to the repo" sys.exit(1) # FIXME: check that "args" is a valid dir before proceeding # (exists, isdir, contains .treeinfo, etc) url = args[0] if url[0] == '/': url = 'file://' + url s = urlparse.urlsplit(url)[0] h,d = urlparse.urlsplit(url)[1:3] if s != 'file': print "Must be a file:// url or you will not like this" sys.exit(1) repoid = '%s/%s' % (h, d) repoid = repoid.replace('/', '_') # Bad things happen if we're missing a trailing slash here if url[-1] != '/': url += '/' basedir = url.replace('file://', '') # for a normal path thing my = yum.YumBase() if opts.noplugins: my.preconf.init_plugins = False my.conf.cachedir = getCacheDir() my.repos.disableRepo('*') newrepo = yum.yumRepo.YumRepository(repoid) newrepo.name = repoid newrepo.baseurl = [url] newrepo.basecachedir = my.conf.cachedir newrepo.metadata_expire = 0 newrepo.timestamp_check = False newrepo.enablegroups = 1 # we want *all* metadata newrepo.mdpolicy = 'group:all' # add our new repo my.repos.add(newrepo) # enable that repo my.repos.enableRepo(repoid) # setup the repo dirs/etc my.doRepoSetup(thisrepo=repoid) # Initialize results and reporting retval = 0 if opts.testopia: run_id = testopia_create_run(opts.testopia) report = lambda case,result: testopia_report(run_id,case,result) else: report = lambda case,result: None # Check the metadata print "Checking repodata:" try: md_types = newrepo.repoXML.fileTypes() print " verifying repomd.xml with yum" except yum.Errors.RepoError: print " failed to load repomd.xml." report('REPODATA','FAILED') report('CORE_PACKAGES','BLOCKED') report('COMPS','BLOCKED') return retval | BAD_REPOMD for md_type in md_types: try: print " verifying %s checksum" % md_type newrepo.retrieveMD(md_type) except Errors.RepoError, e: print " %s metadata missing or does not match checksum" % md_type retval = retval | BAD_METADATA
def __init__(self): self.yb = yum.YumBase() super(YumValidation, self).__init__()
def main(vardir, factfile, defurl, deffixes=(), chsevs=(), rebootpkgs=()): tries = 1 success = False local_defs = os.path.join(vardir, 'oval-definitions.xml') while tries < 4: logger.info('Downloading %s (try %s)' % (defurl, tries)) try: r = requests.get(defurl, stream=True) with open(local_defs, 'w') as fh: for chunk in r.iter_content(chunk_size=8092): if chunk: fh.write(chunk) fh.close() success = True break except Exception as ex: logger.info('Error downloading: %s' % ex) logger.info('Sleeping for 1 minute') tries += 1 time.sleep(60) if not success: logger.info('Was not able to download %s, giving up' % defurl) # We exit with code 0 and will let nagios file age monitoring to alert # when an oscap report hasn't run in a bit sys.exit(0) if len(deffixes): try: root = etree.parse(local_defs).getroot() for (ovalid, fix) in deffixes.items(): matchelt = root.find('.//*[@id="%s"]' % ovalid) if matchelt is not None: child = matchelt.getchildren()[0] child.text = fix logger.info('Fixed definition %s=%s' % (ovalid, fix)) else: logger.info('Did not find anything matching %s' % ovalid) # This will probably give us false-negatives for results, so # exit now and let nagios alert us. sys.exit(0) fh = open(local_defs, 'w') fh.write(etree.tostring(root, pretty_print=True)) fh.close() except Exception as ex: logger.info('Error mangling %s' % local_defs) logger.info('Exception: %s' % ex) sys.exit(0) # Next we run oscap oval eval resfile = os.path.join(vardir, 'oval-results.xml') repfile = os.path.join(vardir, 'oval-report.html') args = ['oscap', 'oval', 'eval', '--results', resfile, '--report', repfile, local_defs] logger.info('Running: %s' % ' '.join(args)) (output, error) = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() error = error.strip() if error: logger.info('Error running oscap eval: %s' % error) # We exit with code 0 and will let nagios file age monitoring to alert # when an oscap report hasn't run in a bit sys.exit(0) logger.info('Parsing %s' % resfile) from pprint import pprint try: doc = etree.parse(resfile).getroot() res = doc.find('res:results', ns) defs = doc.find('def:oval_definitions', ns) oval = { 'rhsa': {}, 'cve' : {}, 'severity': { 'cve': {} , 'rhsa': {} }, } for sddelt in res.findall('res:system/res:definitions/res:definition[@result="true"]', ns): defid = sddelt.get('definition_id') defelt = defs.find('def:definitions/def:definition[@id="%s"]' % defid, ns) metaelt = defelt.find('def:metadata', ns) title = metaelt.find('def:title', ns).text if metaelt.find('def:advisory/def:severity', ns) != None: severity = metaelt.find('def:advisory/def:severity', ns).text.lower() else: severity = 'unknown' for refelt in metaelt.findall('def:reference', ns): refid = refelt.get('ref_id') refurl = refelt.get('ref_url') for chid, new_severity in chsevs.items(): if refid.find(chid) == 0: logger.info('Changed severity on %s: %s => %s' % (refid, severity, new_severity)) severity = new_severity.lower() break source = refelt.get('source').lower() if refid not in oval[source]: logger.info('Found %s Reference : %s' % (title, refid)) oval[source][refid] = refurl # Count RHSA CVE references if refid.startswith('CVE'): if severity not in oval['severity']['cve']: oval['severity']['cve'][severity] = { 'count': 0, 'titles': [], } oval['severity']['cve'][severity]['count'] += 1 oval['severity']['cve'][severity]['titles'].append(refid) elif refid.startswith('RHSA'): if severity not in oval['severity']['rhsa']: oval['severity']['rhsa'][severity] = { 'count': 0, 'titles': [], } oval['severity']['rhsa'][severity]['count'] += 1 oval['severity']['rhsa'][severity]['titles'].append(refid) except Exception as ex: logger.info('Was not able to parse %s' % resfile) logger.info('Error returned: %s' % ex) # We exit with code 0 and will let nagios file age monitoring to alert # when an oscap report hasn't run in a bit sys.exit(0) facts = { 'openscap': { 'oval': oval, } } if rebootpkgs: # Some magic taken from yum-utils needs-restarting # I miss you Seth. import yum sys.path.insert(0,'/usr/share/yum-cli') import utils my = yum.YumBase() my.preconf.init_plugins = False if hasattr(my, 'setCacheDir'): my.conf.cache = True boot_time = utils.get_boot_time() stale_pkgs = [] for pkg in my.rpmdb.searchNames(rebootpkgs): if float(pkg.installtime) > float(boot_time): logger.info('Core package %s updated, system needs reboot.' % pkg) stale_pkgs.append(str(pkg)) if len(stale_pkgs): facts['openscap']['oval']['needs_reboot'] = True facts['openscap']['oval']['reboot_pkgs'] = stale_pkgs try: logger.info('Writing %s' % factfile) fout = open(factfile, 'w') yaml.safe_dump(facts, fout, default_flow_style=False, explicit_start=True) fout.close() # set perms on that file to 0600 just in case it's not already os.chmod(factfile, 0o600) except Exception as ex: # The only critical error logger.critical('Was not able to write to %s' % factfile) sys.exit(1)
def __init__(self, logger, setup, config): self.yb = yum.YumBase() if setup['debug']: debuglevel = 3 elif setup['verbose']: debuglevel = 2 else: debuglevel = 1 try: self.yb.preconf.debuglevel = debuglevel except AttributeError: self.yb._getConfig(self.yb.conf.config_file_path, debuglevel=debuglevel) Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) self.ignores = [entry.get('name') for struct in config \ for entry in struct \ if entry.tag == 'Path' and \ entry.get('type') == 'ignore'] self.instance_status = {} self.extra_instances = [] self.modlists = {} self._loadConfig() self.__important__ = self.__important__ + \ [entry.get('name') for struct in config \ for entry in struct \ if entry.tag == 'Path' and \ (entry.get('name').startswith('/etc/yum.d') \ or entry.get('name').startswith('/etc/yum.repos.d')) \ or entry.get('name') == '/etc/yum.conf'] self.yum_avail = dict() self.yum_installed = dict() try: self.yb.doConfigSetup() self.yb.doTsSetup() self.yb.doRpmDBSetup() except yum.Errors.RepoError: e = sys.exc_info()[1] self.logger.error("YUMng Repository error: %s" % e) raise Bcfg2.Client.Tools.toolInstantiationError except Exception: e = sys.exc_info()[1] self.logger.error("YUMng error: %s" % e) raise Bcfg2.Client.Tools.toolInstantiationError yup = self.yb.doPackageLists(pkgnarrow='updates') if hasattr(self.yb.rpmdb, 'pkglist'): yinst = self.yb.rpmdb.pkglist else: yinst = self.yb.rpmdb.getPkgList() for dest, source in [(self.yum_avail, yup.updates), (self.yum_installed, yinst)]: for pkg in source: if dest is self.yum_avail: pname = pkg.name data = [(pkg.arch, (pkg.epoch, pkg.version, pkg.release))] else: pname = pkg[0] data = [(pkg[1], (pkg[2], pkg[3], pkg[4]))] if pname in dest: dest[pname].update(data) else: dest[pname] = dict(data)