def load(self): """(Re)loads the portage settings and sets the variables.""" kwargs = {} for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")): kwargs[k] = os.environ.get(envvar, None) self.trees = portage.create_trees(trees=self.trees, **kwargs) self.settings = self.trees["/"]["vartree"].settings for myroot in self.trees: if myroot != "/": self.settings = self.trees[myroot]["vartree"].settings break self.settings.unlock() root = self.settings["ROOT"] self.porttree = self.trees[root]["porttree"] self.vartree = self.trees[root]["vartree"] self.virtuals = self.trees[root]["virtuals"] self.global_settings = portage.config(clone=self.settings) self._cpv = None portage.settings = None # we use our own one ...
def __init__(self): self.settings=portage.config(clone=portage.settings) self.vartree=portage.db[portage.root]['vartree'] self.vardb=self.vartree.dbapi self.portdb=portage.portdb self.metadata_keys = [k for k in portage.auxdbkeys if not k.startswith("UNUSED_")] self.use=self.settings["USE"]
def __init__(self): self._publish = Gate().grants('gentoo', 'repositories') self._fill_overlays() tree_config = config() tree_config['PORTDIR_OVERLAY'] = ' '.join(self._get_active_paths()) tree_config['PORTDIR'] = main_tree_dir() self._dbapi = portdbapi(main_tree_dir(), tree_config)
def _NewPortageConfig(local_config): ret = portage.config(local_config=local_config, eprefix=EPREFIX if EPREFIX else None, config_root=os.environ.get('PORTAGE_CONFIGROOT', None), target_root=os.environ.get('ROOT', None)) ret.lock() return ret
def look_for_keywords(self): # work out ebuild's category split = self.ebuild.split(os.sep) split.pop() split.pop() cat = split.pop() # work out ebuild's name ebuild = os.path.basename(self.ebuild) ebuild = os.path.splitext(ebuild)[0] self.ebuild = '{0}/{1}'.format(cat, ebuild) # perform look up mysettings = portage.config(local_config=False) dbapi = portage.portdbapi(mysettings=mysettings) dbapi.porttrees = [dbapi.porttree_root] keywords = dbapi.aux_get(self.ebuild, ['KEYWORDS'], dbapi.porttree_root)[0] if len(keywords) == 0: sys.exit(ERROR_MESSAGES['keywords'].format(ebuild)) for arch in keywords.split(): # keep keywords in ~arch only if '~' in arch: # skip "exotic" arches such as ~amd64-macos and such if '-' in arch: continue arch = arch.strip('~') self.arches.append(arch) self.cc.append(arch + '@gentoo.org')
def __init__(self, repo_conf): """ This is the repository constructor. Parameter: repo_conf (repository.config): the configuration of this repository """ self._packages = {} # map cpv => package self._atoms = {} # map atom => list of matched cpvs self._cps = {} # map cp => constraint encoding SLOT conflicts self._portdb = portage.portdb # reference to the portage database self._vardb = portage.db['/'][ 'vartree'].dbapi # reference to the installed package database self._config = portage.config( clone=portage.settings ) # reference to the portage utility that get core information for a specify cpv self._installed_packages = frozenset(self._vardb.cpv_all( )) # I do that because cpv_exists is a disk access... # computation of the global fixed product, i.e., the features set by the profile and that cannot be changed by the user self._fixed_use = self._config.useforce.difference( self._config.usemask) self._fixed_iuse = self._config.useforce.union(self._config.usemask) self._fixed_product = self._compute_fixed_product( self._fixed_iuse, self._fixed_use) # setup the repository w.r.t. the given configuration self._setup_get_atom(repo_conf._mask) self._setup_get_fixed_product(repo_conf._use, repo_conf._installed) self._setup_filter_constraint(repo_conf._installed) self._dummy_count = 0 # for dummy packages
def postbuild_success(self, build_info): """ Analyze compilation logs """ statusdict = dict(NO_WALL=list(), NO_WEXTRA=list()) target = os.path.join( '/usr/targets', os.getenv('CURRENT_TARGET', os.path.basename(self.cfg['build']['workdir'])), 'root' ) myconfig = config(target_root=target, config_root=target) for item in myconfig.packages: if item.startswith('*'): continue cp, v, r = pkgsplit(item[1:]) noflags = self.validateLogfile(cp, myconfig, os.path.basename(self.cfg['build']['workdir'])) if noflags: s = '%s-%s%s' % (cp, v, '-%s' % r if r != 'r0' else '') for k in noflags: statusdict[k].append(s) report = ['#. Log file analysis report\n\n'] if statusdict['NO_WALL']: report.append( "Packages which do not have '-Wall' compiler flag enabled:\n\n" + ''.join('\t* %s\n' % s for s in statusdict['NO_WALL']) ) if statusdict['NO_WEXTRA']: report.append( "Packages which do not have '-Wextra' compiler flag enabled:\n\n" + ''.join('\t* %s\n' % s for s in statusdict['NO_WEXTRA']) ) if len(report) == 1: report.append('Congratulations: everything is perfect!') build_info['analyzer'] = ''.join(report)
def get_portage_environ(var): """Returns environment variable from portage if possible, else None""" try: temp = portage.config(clone=portage.settings).environ()[var] except: temp = None return temp
def postbuild_success(self, build_info): """ Encryption of rootfs.tgz """ workdir = self.cfg['build']['workdir'] paths = [os.path.join(workdir, 'root/etc/portage/gpg')] target_root = os.path.join(workdir, 'root') profile_paths = config(config_root=target_root, target_root=target_root).profiles paths.extend(profile_paths) def find_pubring(): for path in reversed(paths): path = os.path.join(path, 'pubring.gpg') if os.path.isfile(path): yield path try: keysfile = next(find_pubring()) except StopIteration: self.info('No encryption on this target') return self.redirect_logging() self.gpg = gnupg.GPG(keyring=keysfile) self.gpg_allkeyids = [i['keyid'] for i in self.gpg.list_keys()] if not self.gpg_allkeyids: self.clean_up() raise XUtilsError( 'No gpg keys, externalkeyring=%r, see GnuPG log %r.' % (keysfile, self.cfg['gpg']['logfile']) ) self.process_file('debuginfo', build_info) self.process_file('root', build_info) self.clean_up()
def _init_configs(self, profiles): paths = set() # unique paths for profile in profiles: paths.add(os.path.realpath(profile)) self.configs = set() # unique configs for path in paths: self.configs.add(portage.config(config_profile_path=path))
def fetch_deltas(self, root_url, output_dir=None): if output_dir is None: output_dir = os.path.join(portage.settings['DISTDIR'], 'patches') mysettings = portage.config(clone=portage.settings) mysettings['DISTDIR'] = output_dir urls = [] for record in self.dbrecords: urls.append(posixpath.join(root_url, record.delta.fname)) if 'distpatch' in mysettings.features: mysettings.features.remove('distpatch') if not fetch(urls, mysettings): raise PatchException('Failed to fetch deltas: %s' % urls)
def _get_dbs(self): """ Return a tuple containing (vardb, portdb) """ emerge_config = load_emerge_config() emerge_settings, emerge_trees, _mtimedb = emerge_config settings = portage.config(clone=emerge_settings) portdb = emerge_trees[settings["ROOT"]]["porttree"].dbapi if not portdb.frozen: portdb.freeze() vardb = emerge_trees[settings["ROOT"]]["vartree"].dbapi return vardb, portdb
def fetch(self, myfile=None): mysettings = portage.config(clone=portage.settings) mysettings['O'] = os.path.dirname(dbapi.findname(self.cpv)) available_files = self.src_uri_map if myfile is None: files = available_files else: if myfile not in available_files: raise EbuildException('Invalid distfile: %s' % myfile) files = OrderedDict() files[myfile] = available_files[myfile] if 'distpatch' in mysettings.features: mysettings.features.remove('distpatch') if not fetch(files, mysettings, allow_missing_digests=False): raise EbuildException('Failed to fetch distfiles for %s' % self.cpv)
def init_portage_settings(session, config_id, zobcs_settings_dict): # check config setup check_make_conf(session, config_id, zobcs_settings_dict) log_msg = "Check configs done" add_logs(session, log_msg, "info", config_id) # Get default config from the configs table and default_config=1 host_config = zobcs_settings_dict['hostname'] +"/" + zobcs_settings_dict['zobcs_config'] default_config_root = "/var/cache/zobcs/" + zobcs_settings_dict['zobcs_gitreponame'] + "/" + host_config + "/" # Set config_root (PORTAGE_CONFIGROOT) to default_config_root mysettings = portage.config(config_root = default_config_root) log_msg = "Setting default config to: %s" % (host_config,) add_logs(session, log_msg, "info", config_id) return mysettings
def portage_configuration(self): '''System's Portage configuration. Basically, a dictionary of `emerge -info` output. .. note:: This property is cached after the first invocation until the object is garbage collected. ''' if not hasattr(self, '_portage_configuration'): self._portage_configuration = portage.config() return self._portage_configuration
def get_cmake_deps(ebuild, repo): cpv = qatom(ebuild) settings = portage.config(config_root='/') tmpdir = os.path.join(settings.get('PORTAGE_TMPDIR'), 'portage') tmp_path = os.path.join(tmpdir, cpv[0], cpv[1] + '-' + cpv[2]) repo_path = portage.db['/']["vartree"].settings.repositories.treemap.get(repo) ebuild_path = os.path.join(repo_path, cpv[0], cpv[1], cpv[1] + '-' + cpv[2] + '.ebuild') subprocess.call(['ebuild', ebuild_path, 'clean', 'prepare'], stdout=subprocess.DEVNULL) deps = cdc.getDeps(os.path.join(tmp_path)) subprocess.call(['ebuild', ebuild_path, 'clean']) return deps
def check_double_inclusion(repo_path, profile, out): profiles_dir = os.path.join(repo_path, "profiles") profile_path = os.path.join(profiles_dir, profile[1]) c = portage.config(config_profile_path=profile_path) if len(c.profiles) != len(set(c.profiles)): previous = set() duplicates = set() for x in c.profiles: if x in previous: duplicates.add(x) previous.add(x) out.write("%s\t%s\n" % (profile[1], "\t".join((x[len(profiles_dir)+1:] for x in sorted(duplicates)))))
def get_cmake_deps(ebuild, repo): cpv = qatom(ebuild) settings = portage.config(config_root='/') tmpdir = os.path.join(settings.get('PORTAGE_TMPDIR'), 'portage') tmp_path = os.path.join(tmpdir, cpv[0], cpv[1] + '-' + cpv[2]) repo_path = portage.db['/']["vartree"].settings.repositories.treemap.get( repo) ebuild_path = os.path.join(repo_path, cpv[0], cpv[1], cpv[1] + '-' + cpv[2] + '.ebuild') subprocess.call(['ebuild', ebuild_path, 'clean', 'prepare'], stdout=subprocess.DEVNULL) deps = cdc.getDeps(os.path.join(tmp_path)) subprocess.call(['ebuild', ebuild_path, 'clean']) return deps
def get_settings(conf=None): if not isinstance(conf, dict) and conf: raise TypeError("conf must be dict() or None") if not conf: conf = {} # TODO: maybe we should improve it a bit ;) mysettings = portage.config(config_incrementals=portage.const.INCREMENTALS, local_config=False) if conf["MAIN_ARCH"] == "auto": conf["MAIN_ARCH"] = "%s" % mysettings["ACCEPT_KEYWORDS"].split( " ")[0].lstrip("~") if conf["TARGET_ARCH"] == "auto": conf["TARGET_ARCH"] = "~%s" % mysettings["ACCEPT_KEYWORDS"].split( " ")[0].lstrip("~") # TODO: exclude overlay categories from check if conf["CATEGORIES"]: _mycats = [] for _cat in conf["CATEGORIES"].split(","): _cat = _cat.strip() _mycats.append(_cat) if _cat not in mysettings.categories: raise ValueError("invalid category for -C switch '%s'" % _cat) mysettings.categories = _mycats # maybe thats not necessary because we override porttrees below.. _portage_settings("PORTDIR_OVERLAY", "", mysettings) trees = portage.create_trees() trees["/"]["porttree"].settings = mysettings portdb = trees["/"]["porttree"] portdb.dbapi.settings = mysettings portdb.dbapi.porttrees = [portage.portdb.porttree_root] # does it make sense to remove _all_ useless stuff or just leave it as it is? #portdb.dbapi._aux_cache_keys.clear() #portdb.dbapi._aux_cache_keys.update(["EAPI", "KEYWORDS", "SLOT"]) conf["PORTDIR"] = portage.settings["PORTDIR"] conf["portdb"] = portdb return conf
def parse_config(self, root="/", config_root=None, clone=None, store=True): root = os.path.realpath(root) + '/' if not config_root: config_root = root else: config_root = os.path.realpath(config_root) + '/' if not os.path.isdir(root): raise XPortageError("Can't find directory %s" % root) try: config = portage.config(clone=clone, mycpv=None, config_profile_path=None, config_incrementals=INCREMENTALS, config_root = config_root, target_root = root, local_config=True) except Exception, e: raise XPortageError('Portage error: %s' % e)
def check_make_conf_guest(session, zobcs_settings_dict, config_id): git_repo = "/var/cache/zobcs/" + zobcs_settings_dict['zobcs_gitreponame'] + "/" git_pull(session, git_repo, config_id) make_conf_file = "/etc/portage/make.conf" # Check if we can open the file and close it # Check if we have some error in the file (portage.util.getconfig) # Check if we envorment error with the config (settings.validate) try: make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0] portage.util.getconfig(make_conf_file, tolerant=0, allow_sourcing=True, expand=True) mysettings = portage.config(config_root = "/") mysettings.validate() # With errors we return false except Exception as e: return False ConfigsMetaDataInfo = get_configmetadata_info(session, config_id) print('make_conf_checksum_tree', make_conf_checksum_tree) print('make_conf_checksum_db', ConfigsMetaDataInfo.Checksum) if make_conf_checksum_tree != ConfigsMetaDataInfo.Checksum: return False return True
def create_portdb(portdir=None, cachedir=None, config_root=None, target_root=None, profile=None, **kwargs): if cachedir is not None: os.environ["PORTAGE_DEPCACHEDIR"] = cachedir if config_root is None: config_root = os.environ.get("PORTAGE_CONFIGROOT", "/") if target_root is None: target_root = os.environ.get("ROOT", "/") if profile is None: profile = "" portage = import_portage() try: from portage import const as portage_const except ImportError: import portage_const # Disable overlays because we only generate metadata for the main repo. os.environ["PORTDIR_OVERLAY"] = "" conf = portage.config(config_profile_path=profile, config_incrementals=portage_const.INCREMENTALS, target_root=target_root, config_root=config_root) if portdir is None: portdir = conf["PORTDIR"] # The cannonical path is the key for portdb.auxdb. portdir = os.path.realpath(portdir) conf["PORTDIR"] = portdir conf.backup_changes("PORTDIR") portdb = portage.portdbapi(portdir, mysettings=conf) return portdb
def check_make_conf(session, config_id, zobcs_settings_dict): log_msg = "Checking configs for changes and errors" add_logs(session, log_msg, "info", config_id) git_repo = "/var/cache/zobcs/" + zobcs_settings_dict['zobcs_gitreponame'] + "/" git_pull(session, git_repo, config_id) configsDict = {} for ConfigInfo in get_config_all_info(session): attDict={} # Set the config dir SetupInfo = get_setup_info(session, ConfigInfo.ConfigId) check_config_dir = git_repo + ConfigInfo.Hostname +"/" + SetupInfo.Setup + "/" make_conf_file = check_config_dir + "etc/portage/make.conf" ConfigsMetaDataInfo = get_configmetadata_info(session, ConfigInfo.ConfigId) # Check if we can take a checksum on it. # Check if we have some error in the file. (portage.util.getconfig) # Check if we envorment error with the config. (settings.validate) try: make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0] portage.util.getconfig(make_conf_file, tolerant=0, allow_sourcing=True, expand=True) mysettings = portage.config(config_root = check_config_dir) mysettings.validate() # With errors we update the db on the config and disable the config except ParseError as e: ConfigsMetaDataInfo.ConfigErrorText = str(e) ConfigsMetaDataInfo.Active = False log_msg = "%s FAIL!" % (ConfigInfo.Hostname,) add_logs(session, log_msg, "info", config_id) session.commit() else: ConfigsMetaDataInfo.Active = True log_msg = "%s PASS" % (ConfigInfo.Hostname,) add_logs(session, log_msg, "info", config_id) session.commit() if make_conf_checksum_tree != ConfigsMetaDataInfo.Checksum: ConfigsMetaDataInfo.MakeConfText = get_file_text(make_conf_file) ConfigsMetaDataInfo.Checksum = make_conf_checksum_tree session.commit() log_msg = "Checking configs for changes and errors ... Done" add_logs(session, log_msg, "info", config_id)
def add_buildquery_main(config_id): conn = 0 config_setup = get_config(conn, config_id) log_msg = "Adding build jobs for: %s" % (config_setup,) add_zobcs_logs(conn, log_msg, "info", config_id) check_make_conf() log_msg = "Check configs done" add_zobcs_logs(conn, log_msg, "info", config_profile) # Get default config from the configs table and default_config=1 default_config_root = "/var/cache/zobcs/" + zobcs_settings_dict['zobcs_gitreponame'] + "/" + config_setup + "/" # Set config_root (PORTAGE_CONFIGROOT) to default_config_root mysettings = portage.config(config_root = default_config_root) myportdb = portage.portdbapi(mysettings=mysettings) init_package = zobcs_package(mysettings, myportdb) log_msg = "Setting default config to: %s" % (config_setup) add_zobcs_logs(conn, log_msg, "info", config_is) # Use all exept 2 cores when multiprocessing pool_cores= multiprocessing.cpu_count() if pool_cores >= 3: use_pool_cores = pool_cores - 2 else: use_pool_cores = 1 pool = multiprocessing.Pool(processes=use_pool_cores) repo_trees_list = myportdb.porttrees for repo_dir in repo_trees_list: repo = myportdb.getRepositoryName(repo_dir) repo_dir_list = [] repo_dir_list.append(repo_dir) # Get the package list from the repo package_list_tree = myportdb.cp_all(trees=repo_dir_list) for cp in sorted(package_list_tree): pool.apply_async(add_cpv_query_pool, (mysettings, myportdb, config_id, cp, repo,)) pool.close() pool.join() log_msg = "Adding build jobs for: %s ... Done." % (config_setup,) add_zobcs_logs(conn, log_msg, "info", config_profile) return True
def get_settings( conf = None ): if not isinstance( conf, dict ) and conf: raise TypeError("conf must be dict() or None") if not conf: conf = {} # TODO: maybe we should improve it a bit ;) mysettings = portage.config( config_incrementals = portage.const.INCREMENTALS, local_config = False ) if conf["MAIN_ARCH"] == "auto": conf["MAIN_ARCH"] = "%s" % mysettings["ACCEPT_KEYWORDS"].split(" ")[0].lstrip("~") if conf["TARGET_ARCH"] == "auto": conf["TARGET_ARCH"] = "~%s" % mysettings["ACCEPT_KEYWORDS"].split(" ")[0].lstrip("~") # TODO: exclude overlay categories from check if conf["CATEGORIES"]: _mycats = [] for _cat in conf["CATEGORIES"].split(","): _cat = _cat.strip() _mycats.append(_cat ) if _cat not in mysettings.categories: raise ValueError("invalid category for -C switch '%s'" % _cat) mysettings.categories = _mycats # maybe thats not necessary because we override porttrees below.. _portage_settings( "PORTDIR_OVERLAY", "", mysettings ) trees = portage.create_trees() trees["/"]["porttree"].settings = mysettings portdb = trees["/"]["porttree"] portdb.dbapi.settings = mysettings portdb.dbapi.porttrees = [portage.portdb.porttree_root] # does it make sense to remove _all_ useless stuff or just leave it as it is? #portdb.dbapi._aux_cache_keys.clear() #portdb.dbapi._aux_cache_keys.update(["EAPI", "KEYWORDS", "SLOT"]) conf["PORTDIR"] = portage.settings["PORTDIR"] conf["portdb"] = portdb return conf
def __automask(myroot): cfg = config(config_root=unicode(myroot), target_root=unicode(myroot)) for directory in cfg.profiles: mask_pkgs = Packages() unmask_pkgs = Packages() pf = PackagesFile(directory) for package in pf.list_pkgs().list(): if package.version: if package.operator == '=' and not package.removal: mask_pkgs += Package(package.name, version=package.version, operator='>') if package.operator == '=' and package.removal: lowest_version = str() for pkg in pf.list_pkgs().lookup(package.name): lowest_version = package.version if vercmp( package.version, pkg.version ) <= 0 else pkg.version if package.version == lowest_version: continue else: unmask_pkgs += Package(package.name, version=lowest_version, operator='<=') pm = PackageMaskFile(directory) pm.update(mask_pkgs) pu = PackageUnmaskFile(directory) pu.update(unmask_pkgs)
def config_get_use(ebuild_id, config_id): CS = get_object_or_404(Configs, ConfigId = config_id) ConfigSetupName = CS.Config ConfigSetupHostName = CS.HostName # Change config/setup # my_new_setup = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + host_config + "/" my_new_setup = "/" mysettings_setup = portage.config(config_root = my_new_setup) myportdb_setup = portage.portdbapi(mysettings=mysettings_setup) # get cpv EM = EbuildsMetadata.objects.get(EbuildId = ebuild_id) C = EM.EbuildId.PackageId.CategoryId.Category P = EM.EbuildId.PackageId.Package V = EM.EbuildId.Version build_cpv = C + "/" + P + "-" + V # Get the iuse and use flags for that config/setup and cpv init_useflags = zobcs_use_flags(mysettings_setup, myportdb_setup, build_cpv) iuse_flags_list, final_use_list = init_useflags.get_flags() iuse_flags_list2 = [] for iuse_line in iuse_flags_list: iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line)) final_use, use_expand_hidden, usemasked, useforced = init_useflags.get_all_cpv_use() # Dict the needed info attDict = {} attDict['useflags'] = final_use_list attDict['iuse'] = iuse_flags_list2 attDict['usemasked'] = usemasked attDict['use_expand_hidden'] = use_expand_hidden # Clean some cache myportdb_setup.close_caches() portage.portdbapi.portdbapi_instances.remove(myportdb_setup) return attDict
def testDoebuild(self): """ Invoke portage.doebuild() with the fd_pipes parameter, and check that the expected output appears in the pipe. This functionality is not used by portage internally, but it is supported for API consumers (see bug #475812). """ output_fd = 200 ebuild_body = ["S=${WORKDIR}"] for phase_func in ( "pkg_info", "pkg_nofetch", "pkg_pretend", "pkg_setup", "src_unpack", "src_prepare", "src_configure", "src_compile", "src_test", "src_install", ): ebuild_body.append(("%s() { echo ${EBUILD_PHASE}" " 1>&%s; }") % (phase_func, output_fd)) ebuild_body.append("") ebuild_body = "\n".join(ebuild_body) ebuilds = { "app-misct/foo-1": { "EAPI": "5", "MISC_CONTENT": ebuild_body, } } # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ("find", "prepstrip", "sed", "scanelf") true_binary = portage.process.find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") dev_null = open(os.devnull, "wb") playground = ResolverPlayground(ebuilds=ebuilds) try: QueryCommand._db = playground.trees root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi settings = portage.config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") settings.features.add("noauto") settings.features.add("test") settings["PORTAGE_PYTHON"] = portage._python_interpreter settings["PORTAGE_QUIET"] = "1" settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get( "PYTHONDONTWRITEBYTECODE", "") fake_bin = os.path.join(settings["EPREFIX"], "bin") portage.util.ensure_dirs(fake_bin) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE") cpv = "app-misct/foo-1" metadata = dict( zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) ebuildpath = portdb.findname(cpv) self.assertNotEqual(ebuildpath, None) for phase in ( "info", "nofetch", "pretend", "setup", "unpack", "prepare", "configure", "compile", "test", "install", "qmerge", "clean", "merge", ): pr, pw = os.pipe() producer = DoebuildProcess( doebuild_pargs=(ebuildpath, phase), doebuild_kwargs={ "settings": settings, "mydbapi": portdb, "tree": "porttree", "vartree": root_config.trees["vartree"], "fd_pipes": { 1: dev_null.fileno(), 2: dev_null.fileno(), output_fd: pw, }, "prev_mtimes": {}, }, ) consumer = PipeReader(input_files={"producer": pr}) task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2) try: task_scheduler.start() finally: # PipeReader closes pr os.close(pw) task_scheduler.wait() output = portage._unicode_decode( consumer.getvalue()).rstrip("\n") if task_scheduler.returncode != os.EX_OK: portage.writemsg(output, noiselevel=-1) self.assertEqual(task_scheduler.returncode, os.EX_OK) if phase not in ("clean", "merge", "qmerge"): self.assertEqual(phase, output) finally: dev_null.close() playground.cleanup() QueryCommand._db = None
def __init__(self, packages): """Query all relevant data for version data formatting""" self.vartree = port.db[port.root]['vartree'].dbapi self.mysettings = port.config(local_config=False) self.versions = self.__getVersions(packages) self.masks = list(map(lambda x: self.__getMaskStatus(x), packages))
import popen2 import shutil import wx from wx.lib.dialogs import MultipleChoiceDialog from portage import config, portdb, db, pkgsplit, catpkgsplit, settings sys.path.insert(0, "/usr/lib/gentoolkit/pym") import gentoolkit import options import __version__ modulePath = "/usr/lib/python%s/site-packages/abeni" % sys.version[0:3] try: env = config(clone=settings).environ() except: print "ERROR: Can't read portage configuration from /etc/make.conf" sys.exit(1) defaults = ["SRC_URI", "HOMEPAGE", "DEPEND", "RDEPEND", "DESCRIPTION", \ "S", "IUSE", "SLOT", "KEYWORDS", "LICENSE"] distdir = env['DISTDIR'] portdir = env['PORTDIR'] portdir_overlay = env['PORTDIR_OVERLAY'].split(" ")[0] if portdir_overlay[-1] == "/": portdir_overlay = portdir_overlay[:-1] portage_tmpdir = env['PORTAGE_TMPDIR']
DEP_SLOT = 2 dep_cache = {} # very simply, we extract the dependencies for each package for pkg in pkgs_to_reorder: try: deps, slot = varapi.aux_get(pkg, ["DEPEND", "SLOT"]) except ValueError: sys.stderr.write("Error getting dependency information off " + pkg + "\n") continue try: realdeps = portage.dep_check(deps, fakedbapi) except TypeError: # we're probably running >=portage-2.0.50 pkgsettings = portage.config(clone=portage.settings) realdeps = portage.dep_check(deps, fakedbapi, pkgsettings) vardeps = [] # match() finds the versions of all those that are installed for dep in realdeps[1]: vardeps = vardeps + varapi.match(dep) dep_cache[pkg] = (0, vardeps, slot) # then we just naively append to a sorted list of deps using this rule. # if a dependency is going to be merged, we add it to the list like # with the dep then the pkg itself. # eg: dev-python/pyqt deps on dev-python/sip, so we the list will look like # [dev-python/sip, dev-python/pyqt] for pkg, depinfo in dep_cache.items(): dep_to_add = []
#!/usr/bin/env python import os import sys import portage if len(sys.argv) != 2 or not portage.isvalidatom(sys.argv[1]): sys.stderr.write("usage: %s <atom>\n" % os.path.basename(sys.argv[0])) sys.exit(1) input_atom = portage.dep.Atom(sys.argv[1]) settings = portage.config(config_profile_path="", local_config=False) settings["ACCEPT_KEYWORDS"] = "**" settings.backup_changes("ACCEPT_KEYWORDS") settings.lock() porttree = portage.portagetree(settings=settings) portdb = porttree.dbapi trees = {"/" : {"porttree":porttree}} dep_keys = ("DEPEND", "RDEPEND", "PDEPEND") for cp in portdb.cp_all(): for cpv in portdb.cp_list(cp): metadata = dict(zip(dep_keys, portdb.aux_get(cpv, dep_keys))) dep_str = " ".join(metadata[k] for k in dep_keys) success, atoms = portage.dep_check(dep_str, None, settings, use="all", trees=trees, myroot=settings["ROOT"]) if not success:
def __init__(self, _unused_param=DeprecationWarning, mysettings=None): """ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead @type _unused_param: None @param mysettings: an immutable config instance @type mysettings: portage.config """ from portage import config if mysettings: self.settings = mysettings else: from portage import settings self.settings = config(clone=settings) if _unused_param is not DeprecationWarning: warnings.warn("The first parameter of the " + \ "portage.dbapi.porttree.portdbapi" + \ " constructor is unused since portage-2.1.8. " + \ "mysettings['PORTDIR'] is used instead.", DeprecationWarning, stacklevel=2) self.repositories = self.settings.repositories self.treemap = self.repositories.treemap # This is strictly for use in aux_get() doebuild calls when metadata # is generated by the depend phase. It's safest to use a clone for # this purpose because doebuild makes many changes to the config # instance that is passed in. self.doebuild_settings = config(clone=self.settings) self.depcachedir = os.path.realpath(self.settings.depcachedir) if os.environ.get("SANDBOX_ON") == "1": # Make api consumers exempt from sandbox violations # when doing metadata cache updates. sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":") if self.depcachedir not in sandbox_write: sandbox_write.append(self.depcachedir) os.environ["SANDBOX_WRITE"] = \ ":".join(filter(None, sandbox_write)) self.porttrees = list(self.settings.repositories.repoLocationList()) # This is used as sanity check for aux_get(). If there is no # root eclass dir, we assume that PORTDIR is invalid or # missing. This check allows aux_get() to detect a missing # repository and return early by raising a KeyError. self._have_root_eclass_dir = os.path.isdir( os.path.join(self.settings.repositories.mainRepoLocation(), "eclass")) #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening) self.xcache = {} self.frozen = 0 #Keep a list of repo names, sorted by priority (highest priority first). self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order)) self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule") self.auxdb = {} self._pregen_auxdb = {} # If the current user doesn't have depcachedir write permission, # then the depcachedir cache is kept here read-only access. self._ro_auxdb = {} self._init_cache_dirs() try: depcachedir_st = os.stat(self.depcachedir) depcachedir_w_ok = os.access(self.depcachedir, os.W_OK) except OSError: depcachedir_st = None depcachedir_w_ok = False cache_kwargs = {} depcachedir_unshared = False if portage.data.secpass < 1 and \ depcachedir_w_ok and \ depcachedir_st is not None and \ os.getuid() == depcachedir_st.st_uid and \ os.getgid() == depcachedir_st.st_gid: # If this user owns depcachedir and is not in the # portage group, then don't bother to set permissions # on cache entries. This makes it possible to run # egencache without any need to be a member of the # portage group. depcachedir_unshared = True else: cache_kwargs.update({ 'gid' : portage_gid, 'perms' : 0o664 }) # If secpass < 1, we don't want to write to the cache # since then we won't be able to apply group permissions # to the cache entries/directories. if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok: for x in self.porttrees: self.auxdb[x] = volatile.database( self.depcachedir, x, self._known_keys, **cache_kwargs) try: self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x, self._known_keys, readonly=True, **cache_kwargs) except CacheError: pass else: for x in self.porttrees: if x in self.auxdb: continue # location, label, auxdbkeys self.auxdb[x] = self.auxdbmodule( self.depcachedir, x, self._known_keys, **cache_kwargs) if "metadata-transfer" not in self.settings.features: for x in self.porttrees: if x in self._pregen_auxdb: continue cache = self._create_pregen_cache(x) if cache is not None: self._pregen_auxdb[x] = cache # Selectively cache metadata in order to optimize dep matching. self._aux_cache_keys = set( ["BDEPEND", "DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES", "RDEPEND", "repository", "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"]) self._aux_cache = {} self._better_cache = None self._broken_ebuilds = set()
import sys sys.path.insert(0, "/usr/lib/portage/pym") from portage import config from wxPython.wx import * from wxPython.grid import * import popen2, gadfly, os, string, options env = config().environ() portdir_overlay = env['PORTDIR_OVERLAY'].split(":")[0] portdir = env['PORTDIR'] if portdir_overlay[-1] == "/": portdir_overlay = portdir_overlay[:-1] #--------------------------------------------------------------------------- class CustomDataTable(wxPyGridTableBase): def __init__(self): wxPyGridTableBase.__init__(self) loc = os.path.expanduser('~/.abeni/bugz') if not os.path.exists("%s/EBUILDS.grl" % loc): self.createDB() else: self.ConnectDB() self.colLabels = [ 'Package', 'Bugz Nbr', 'Bugzilla Status', 'Bugzilla Rsltn', 'Mine', 'In Portage', ' Abeni Status '
import os import sys import portage from portage.dbapi.porttree import portdbapi portdir = os.path.realpath(sys.argv[1]) env = {} env["PORTAGE_REPOSITORIES"] = """ [gentoo_prefix] location = %s """ % portdir settings = portage.config(env=env) portdb = portdbapi(mysettings=settings) portdb.porttrees = [portdir] for i in range(2, len(sys.argv)): for ebuild in portdb.cp_list(sys.argv[i]): for distfile in portdb.getFetchMap(ebuild): print(distfile)
def __init__(self, _unused_param=DeprecationWarning, mysettings=None): """ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead @type _unused_param: None @param mysettings: an immutable config instance @type mysettings: portage.config """ from portage import config if mysettings: self.settings = mysettings else: from portage import settings self.settings = config(clone=settings) if _unused_param is not DeprecationWarning: warnings.warn("The first parameter of the " + \ "portage.dbapi.porttree.portdbapi" + \ " constructor is unused since portage-2.1.8. " + \ "mysettings['PORTDIR'] is used instead.", DeprecationWarning, stacklevel=2) self.repositories = self.settings.repositories self.treemap = self.repositories.treemap # This is strictly for use in aux_get() doebuild calls when metadata # is generated by the depend phase. It's safest to use a clone for # this purpose because doebuild makes many changes to the config # instance that is passed in. self.doebuild_settings = config(clone=self.settings) self.depcachedir = os.path.realpath(self.settings.depcachedir) if os.environ.get("SANDBOX_ON") == "1": # Make api consumers exempt from sandbox violations # when doing metadata cache updates. sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":") if self.depcachedir not in sandbox_write: sandbox_write.append(self.depcachedir) os.environ["SANDBOX_WRITE"] = \ ":".join(filter(None, sandbox_write)) self.porttrees = list(self.settings.repositories.repoLocationList()) # This is used as sanity check for aux_get(). If there is no # root eclass dir, we assume that PORTDIR is invalid or # missing. This check allows aux_get() to detect a missing # portage tree and return early by raising a KeyError. self._have_root_eclass_dir = os.path.isdir( os.path.join(self.settings.repositories.mainRepoLocation(), "eclass")) #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening) self.xcache = {} self.frozen = 0 #Keep a list of repo names, sorted by priority (highest priority first). self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order)) self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule") self.auxdb = {} self._pregen_auxdb = {} # If the current user doesn't have depcachedir write permission, # then the depcachedir cache is kept here read-only access. self._ro_auxdb = {} self._init_cache_dirs() try: depcachedir_st = os.stat(self.depcachedir) depcachedir_w_ok = os.access(self.depcachedir, os.W_OK) except OSError: depcachedir_st = None depcachedir_w_ok = False cache_kwargs = {} depcachedir_unshared = False if portage.data.secpass < 1 and \ depcachedir_w_ok and \ depcachedir_st is not None and \ os.getuid() == depcachedir_st.st_uid and \ os.getgid() == depcachedir_st.st_gid: # If this user owns depcachedir and is not in the # portage group, then don't bother to set permissions # on cache entries. This makes it possible to run # egencache without any need to be a member of the # portage group. depcachedir_unshared = True else: cache_kwargs.update(portage._native_kwargs({ 'gid' : portage_gid, 'perms' : 0o664 })) # If secpass < 1, we don't want to write to the cache # since then we won't be able to apply group permissions # to the cache entries/directories. if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok: for x in self.porttrees: self.auxdb[x] = volatile.database( self.depcachedir, x, self._known_keys, **cache_kwargs) try: self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x, self._known_keys, readonly=True, **cache_kwargs) except CacheError: pass else: for x in self.porttrees: if x in self.auxdb: continue # location, label, auxdbkeys self.auxdb[x] = self.auxdbmodule( self.depcachedir, x, self._known_keys, **cache_kwargs) if "metadata-transfer" not in self.settings.features: for x in self.porttrees: if x in self._pregen_auxdb: continue cache = self._create_pregen_cache(x) if cache is not None: self._pregen_auxdb[x] = cache # Selectively cache metadata in order to optimize dep matching. self._aux_cache_keys = set( ["DEPEND", "EAPI", "HDEPEND", "INHERITED", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository", "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"]) self._aux_cache = {} self._broken_ebuilds = set()
def git_repo_sync_main(session): zobcs_settings_dict = read_config_settings() _hostname = zobcs_settings_dict['hostname'] _config = zobcs_settings_dict['zobcs_config'] config_id = get_config_id(session, _config, _hostname) host_config = _hostname +"/" + _config default_config_root = "/var/cache/zobcs/" + zobcs_settings_dict['zobcs_gitreponame'] + "/" + host_config + "/" mysettings = portage.config(config_root = default_config_root) myportdb = portage.portdbapi(mysettings=mysettings) GuestBusy = True log_msg = "Waiting for Guest to be idel" add_logs(session, log_msg, "info", config_id) guestid_list = [] for config in get_config_all_info(session): if not config.Host: guestid_list.append(config.ConfigId) while GuestBusy: Status_list = [] for guest_id in guestid_list: ConfigMetadata = get_configmetadata_info(session, guest_id) Status_list.append(ConfigMetadata.Status) if not 'Runing' in Status_list: GuestBusy = False else: time.sleep(30) try: os.remove(mysettings['PORTDIR'] + "/profiles/config/parent") os.rmdir(mysettings['PORTDIR'] + "/profiles/config") except: pass repo_cp_dict = {} for repo_dir in git_repos_list(myportdb): reponame = myportdb.getRepositoryName(repo_dir) attr = {} repo = git.Repo(repo_dir) info_list, repouptodate = git_fetch(repo) if not repouptodate: cp_list = [] for diff_line in repo.git.diff('HEAD^').splitlines(): if re.search("^diff --git.*/Manifest", diff_line): diff_line2 = re.split(' b/', re.sub('diff --git', '', diff_line)) diff_line3 = re.sub(' a/', '', diff_line2[0]) if diff_line3 == diff_line2[1] or "Manifest" in diff_line3: cp = re.sub('/Manifest', '', diff_line3) cp_list.append(cp) else: cp = re.sub('/Manifest', '', diff_line2[1]) cp_list.append(cp) attr['cp_list'] = cp_list repo_cp_dict[reponame] = attr git_merge(repo, info_list[0]) else: log_msg = "Repo %s is up to date" % (reponame) add_logs(session, log_msg, "info", config_id) # Need to add a config dir so we can use profiles/base for reading the tree. # We may allready have the dir on local repo when we sync. try: os.mkdir(mysettings['PORTDIR'] + "/profiles/config", 0o777) with open(mysettings['PORTDIR'] + "/profiles/config/parent", "w") as f: f.write("../base\n") f.close() except: pass log_msg = "Repo sync ... Done." add_logs(session, log_msg, "info", config_id) return repo_cp_dict
def main(): '''Get atoms from a stabilisation bug. This tool requires a Bugzilla API key to operate, read from the envvar APIKEY. Generate one at https://bugs.gentoo.org/userprefs.cgi?tab=apikey If the variable TESTFILE is defined, the batch_stabilize-compatible output will be written to that file. ''' # logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description=main.__doc__) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--all-bugs', action='store_true', help='process all bugs for the active architecture') group.add_argument('-b', '--bug', type=int, help='bug to process') group.add_argument('-s', '--security', action='store_true', help='fetch only security bugs') buggroup = parser.add_mutually_exclusive_group() buggroup.add_argument('--keywordreq', action='store_true', help='work on keywording bugs') buggroup.add_argument('--stablereq', action='store_true', help='work on stabilisation bugs') parser.add_argument('-a', '--arch', type=str, help='target architecture (defaults to current)') parser.add_argument('-n', '--no-depends', action='store_true', help='exclude bugs that depend on other bugs') parser.add_argument( '--no-sanity-check', action='store_true', help='include bugs that are not marked as sanity checked') args = parser.parse_args() if args.all_bugs is True and args.keywordreq is False and args.stablereq is False: print( '--all-bugs must be called with one of --keywordreq or --stablereq' ) return 2 if 'APIKEY' in os.environ: session.params.update({'Bugzilla_api_key': os.environ['APIKEY']}) else: print('FATAL ERROR: Gentoo Bugzilla API key not defined.') print( 'Generate one at https://bugs.gentoo.org/userprefs.cgi?tab=apikey and export in envvar APIKEY.' ) return 2 if 'TESTFILE' in os.environ: global file file = open(os.environ['TESTFILE'], 'w') arch = args.arch if not arch: # This is usually frowned upon, but portage is heavy, so only import it if necessary import portage arch = portage.config().get('ARCH') arch_email = arch + '@gentoo.org' if args.bug: params = {'id': args.bug} else: params = { 'resolution': '---', 'email1': arch_email, 'emailassigned_to1': 1, 'emailcc1': 1, 'emailtype1': 'equals', } if args.no_sanity_check is not True: params['f1'] = 'flagtypes.name' params['o1'] = 'equals' params['v1'] = 'sanity-check+' if args.keywordreq is True: params['component'] = ['Keywording'] elif args.stablereq is True: params['component'] = ['Stabilization', 'Vulnerabilities'] elif args.security is True: params['component'] = ['Vulnerabilities'] bugs = get_bugs(params) depends_bugs = [] for bug in bugs: depends_bugs += bug['depends_on'] # otherwise, id == '' which will query every single bug ever filed if len(depends_bugs) >= 1: params = {'id': depends_bugs} depends_bugs = get_bugs(params) depends_bugs_dict = {} for bug in depends_bugs: depends_bugs_dict[bug['id']] = bug all_attachments = xmlrpc.client.ServerProxy( 'https://bugs.gentoo.org/xmlrpc.cgi').Bug.attachments( {'ids': [x['id'] for x in bugs]})['bugs'] return_value = 1 for bug in bugs: if arch_email not in bug['cc']: error('# {} is not in CC for bug #{}, skipping...'.format( arch, bug['id'])) error() continue atoms = '' if bug['cf_stabilisation_atoms']: atoms += bug['cf_stabilisation_atoms'] for attachment in all_attachments[str(bug['id'])]: if not attachment: continue if attachment['is_obsolete'] == 1: continue for flag in attachment['flags']: if flag['name'] == 'stabilization-list' and flag[ 'status'] == '+': if atoms[-1] is not "\n": atoms += "\n" atoms += str(attachment['data']) if not atoms: error('# No atoms found in bug #{}, skipping...'.format(bug['id'])) error() continue if bug['depends_on']: unresolved_depends = False for depends_bug in bug['depends_on']: current_bug = depends_bugs_dict[depends_bug] if current_bug['status'] == 'RESOLVED': continue if current_bug['component'] == 'Stabilization' or current_bug[ 'component'] == 'Keywording': sanity_checked = False for flag in current_bug['flags']: if flag['name'] == 'sanity-check' and flag[ 'status'] == '+': sanity_checked = True break if arch_email not in current_bug['cc'] and sanity_checked: continue unresolved_depends = True break if unresolved_depends is True and args.no_depends is True: error( '# bug #{} depends on other unresolved bugs, skipping...'. format(bug['id'])) error() continue eprint('# bug #{}'.format(bug['id'])) atoms_to_print = set() for line in atoms.splitlines(): if not line: continue atom, _, arches = line.partition(' ') if not atom.startswith('='): atom = '=' + atom if not arches or arch in arches.split( ' ') or '~' + arch in arches.split(' '): atoms_to_print.add(atom) return_value = 0 eprint("\n".join(sorted(atoms_to_print))) eprint('') if 'TESTFILE' in os.environ: file.close() return return_value
import os import sys sys.path.insert(0, "/usr/lib/portage/pym") import portage REPO_NAME = open('profiles/repo_name').read().strip() REMIND = True # This is awful, but portage really doesn't provide a much better way to get # the data REPO = portage.config().repositories.prepos[REPO_NAME] CACHE = next(REPO.iter_pregenerated_caches('')) DUE = {'amd64': {}, 'x86': {}} for line in open("support/stabilisation.%s" % ("rem" if REMIND else "org")): if not line.strip(): continue words = line.split() if REMIND: DUE[words[7]][words[8][:-2]] = words[1] else: DUE[words[2]][words[1]] = words[4][1:] MASKED = [] for line in open('profiles/package.mask'):
import gentoolkit __version__="0.0.10" if os.getuid() != 0: print red("You must be root to run bumper.") sys.exit(1) if len(sys.argv) < 3: print red("\nI need a category/package to bump up and a destination version.\n") print green("Example:") print "bumper games-arcade/pacman-1.0 2.0\n" sys.exit(1) try: env = config(clone=settings).environ() except: print "ERROR: Can't read portage configuration from /etc/make.conf" sys.exit(1) try: #In case people have multiple PORTDIR_OVERLAY directories, use first one. # See http://bugs.gentoo.org/show_bug.cgi?id=10803 PORTDIR_OVERLAY = env['PORTDIR_OVERLAY'].split(" ")[0] except: print red("ERROR: You must define PORTDIR_OVERLAY in your /etc/make.conf") print green("You can simply uncomment this line:") print green("#PORTDIR_OVERLAY='/usr/local/portage'") print green("Then: mkdir -p /usr/local/portage") sys.exit(1)
import sys import string import re import os sys.path.insert(0, "/usr/lib/portage/pym") import portage settings = portage.config().environ() portdir = settings['PORTDIR'] overlays = settings['PORTDIR_OVERLAY'].split(" ") # error codes ERR_FILE_NOT_FOUND = 1 ERR_SYNTAX = 2 # class for code blocks like variables, functions # or generic statements including leading comments class element: def __init__(self, pos, etype="unknown", content=""): self.pos = pos self.content = content self.type = etype def getPos(self): return self.pos def getContent(self): return self.content
def deprecated_profile_check(settings=None): config_root = None eprefix = None deprecated_profile_file = None if settings is not None: config_root = settings["PORTAGE_CONFIGROOT"] eprefix = settings["EPREFIX"] for x in reversed(settings.profiles): deprecated_profile_file = os.path.join(x, "deprecated") if os.access(deprecated_profile_file, os.R_OK): break else: deprecated_profile_file = None if deprecated_profile_file is None: deprecated_profile_file = os.path.join(config_root or "/", DEPRECATED_PROFILE_FILE) if not os.access(deprecated_profile_file, os.R_OK): deprecated_profile_file = os.path.join(config_root or "/", 'etc', 'make.profile', 'deprecated') if not os.access(deprecated_profile_file, os.R_OK): return with io.open(_unicode_encode(deprecated_profile_file, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') as f: dcontent = f.readlines() writemsg(colorize( "BAD", _("\n!!! Your current profile is " "deprecated and not supported anymore.")) + "\n", noiselevel=-1) writemsg( colorize("BAD", _("!!! Use eselect profile to update your " "profile.")) + "\n", noiselevel=-1) if not dcontent: writemsg(colorize( "BAD", _("!!! Please refer to the " "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1) return True newprofile = dcontent[0].rstrip("\n") writemsg(colorize( "BAD", _("!!! Please upgrade to the " "following profile if possible:")) + "\n\n", noiselevel=-1) writemsg(8 * " " + colorize("GOOD", newprofile) + "\n\n", noiselevel=-1) if len(dcontent) > 1: writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1) for myline in dcontent[1:]: writemsg(myline, noiselevel=-1) writemsg("\n\n", noiselevel=-1) else: writemsg(_("You may use the following command to upgrade:\n\n"), noiselevel=-1) writemsg(8 * " " + colorize("INFORM", 'eselect profile set ' + newprofile) + "\n\n", noiselevel=-1) if settings is not None: main_repo_loc = settings.repositories.mainRepoLocation() new_profile_path = os.path.join(main_repo_loc, "profiles", newprofile.rstrip("\n")) if os.path.isdir(new_profile_path): new_config = portage.config(config_root=config_root, config_profile_path=new_profile_path, eprefix=eprefix) if not new_config.profiles: writemsg("\n %s %s\n" % (colorize("WARN", "*"), _("You must update portage before you " "can migrate to the above profile.")), noiselevel=-1) writemsg(" %s %s\n\n" % (colorize("WARN", "*"), _("In order to update portage, " "run 'emerge --oneshot sys-apps/portage'.")), noiselevel=-1) return True
def deprecated_profile_check(settings=None): config_root = None eprefix = None deprecated_profile_file = None if settings is not None: config_root = settings["PORTAGE_CONFIGROOT"] eprefix = settings["EPREFIX"] for x in reversed(settings.profiles): deprecated_profile_file = os.path.join(x, "deprecated") if os.access(deprecated_profile_file, os.R_OK): break else: deprecated_profile_file = None if deprecated_profile_file is None: deprecated_profile_file = os.path.join(config_root or "/", DEPRECATED_PROFILE_FILE) if not os.access(deprecated_profile_file, os.R_OK): deprecated_profile_file = os.path.join(config_root or "/", 'etc', 'make.profile', 'deprecated') if not os.access(deprecated_profile_file, os.R_OK): return with io.open(_unicode_encode(deprecated_profile_file, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') as f: dcontent = f.readlines() writemsg(colorize("BAD", _("\n!!! Your current profile is " "deprecated and not supported anymore.")) + "\n", noiselevel=-1) writemsg(colorize("BAD", _("!!! Use eselect profile to update your " "profile.")) + "\n", noiselevel=-1) if not dcontent: writemsg(colorize("BAD", _("!!! Please refer to the " "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1) return True newprofile = dcontent[0].rstrip("\n") writemsg(colorize("BAD", _("!!! Please upgrade to the " "following profile if possible:")) + "\n\n", noiselevel=-1) writemsg(8*" " + colorize("GOOD", newprofile) + "\n\n", noiselevel=-1) if len(dcontent) > 1: writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1) for myline in dcontent[1:]: writemsg(myline, noiselevel=-1) writemsg("\n\n", noiselevel=-1) else: writemsg(_("You may use the following command to upgrade:\n\n"), noiselevel=-1) writemsg(8*" " + colorize("INFORM", 'eselect profile set ' + newprofile) + "\n\n", noiselevel=-1) if settings is not None: main_repo_loc = settings.repositories.mainRepoLocation() new_profile_path = os.path.join(main_repo_loc, "profiles", newprofile.rstrip("\n")) if os.path.isdir(new_profile_path): new_config = portage.config(config_root=config_root, config_profile_path=new_profile_path, eprefix=eprefix) if not new_config.profiles: writemsg("\n %s %s\n" % (colorize("WARN", "*"), _("You must update portage before you " "can migrate to the above profile.")), noiselevel=-1) writemsg(" %s %s\n\n" % (colorize("WARN", "*"), _("In order to update portage, " "run 'emerge --oneshot %s'." % (portage.const.PORTAGE_PACKAGE_ATOM,))), noiselevel=-1) return True
def unmerge(root_config, myopts, unmerge_action, unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, clean_delay=1, ordered=0, raise_on_error=0, scheduler=None, writemsg_level=portage.util.writemsg_level): """ Returns os.EX_OK if no errors occur, 1 if an error occurs, and 130 if interrupted due to a 'no' answer for --ask. """ if clean_world: clean_world = myopts.get('--deselect') != 'n' rval, pkgmap = _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=clean_delay, ordered=ordered, writemsg_level=writemsg_level) if rval != os.EX_OK: return rval enter_invalid = '--ask-enter-invalid' in myopts vartree = root_config.trees["vartree"] sets = root_config.sets settings = root_config.settings mysettings = portage.config(clone=settings) xterm_titles = "notitles" not in settings.features if "--pretend" in myopts: #we're done... return return os.EX_OK if "--ask" in myopts: uq = UserQuery(myopts) if uq.query("Would you like to unmerge these packages?", enter_invalid) == "No": # enter pretend mode for correct formatting of results myopts["--pretend"] = True print() print("Quitting.") print() return 128 + signal.SIGINT if not vartree.dbapi.writable: writemsg_level("!!! %s\n" % _("Read-only file system: %s") % vartree.dbapi._dbroot, level=logging.ERROR, noiselevel=-1) return 1 #the real unmerging begins, after a short delay unless we're raging.... if not unmerge_action == "rage-clean" and clean_delay and not autoclean: countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") all_selected = set() all_selected.update(*[x["selected"] for x in pkgmap]) # Set counter variables curval = 1 maxval = len(all_selected) for x in range(len(pkgmap)): for y in pkgmap[x]["selected"]: emergelog(xterm_titles, "=== Unmerging... (" + y + ")") message = ">>> Unmerging ({0} of {1}) {2}...\n".format( colorize("MERGE_LIST_PROGRESS", str(curval)), colorize("MERGE_LIST_PROGRESS", str(maxval)), y) writemsg_level(message, noiselevel=-1) curval += 1 mysplit = y.split("/") #unmerge... retval = portage.unmerge(mysplit[0], mysplit[1], settings=mysettings, vartree=vartree, ldpath_mtimes=ldpath_mtimes, scheduler=scheduler) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: " + y) if raise_on_error: raise UninstallFailure(retval) sys.exit(retval) else: if clean_world and hasattr(sets["selected"], "cleanPackage")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() if hasattr(sets["selected"], "load"): sets["selected"].load() sets["selected"].cleanPackage(vartree.dbapi, y) sets["selected"].unlock() emergelog(xterm_titles, " >>> unmerge success: " + y) if clean_world and hasattr(sets["selected"], "remove")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() # load is called inside remove() for s in root_config.setconfig.active: sets["selected"].remove(SETPREFIX + s) sets["selected"].unlock() return os.EX_OK
def unmerge(root_config, myopts, unmerge_action, unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, clean_delay=1, ordered=0, raise_on_error=0, scheduler=None, writemsg_level=portage.util.writemsg_level): if clean_world: clean_world = myopts.get('--deselect') != 'n' quiet = "--quiet" in myopts enter_invalid = '--ask-enter-invalid' in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs=[] global_unmerge=0 xterm_titles = "notitles" not in settings.features out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vdb_lock = portage.locks.lockdir(vdb_path) realsyslist = sets["system"].getAtoms() syslist = [] for x in realsyslist: mycp = portage.dep_getkey(x) if mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) mysettings = portage.config(clone=settings) if not unmerge_files: if unmerge_action == "unmerge": print() print(bold("emerge unmerge") + " can only be used with specific package names") print() return 0 else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 0 for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune","clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '"+x+"' doesn't exist.\n") return 0 absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) vdb_len = len(vdb_path) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx+"/CONTENTS"): print("!!! Not a valid db dir: "+str(absx)) return 0 if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 0 for idx in range(0,sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 0 print("="+"/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append( "="+"/".join(sp_absx[sp_vdb_len:])) newline="" if (not "--quiet" in myopts): newline="\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x, unmerge_action), noiselevel=-1) continue pkgmap.append( {"protected": set(), "selected": set(), "omitted": set()}) mykey = len(pkgmap) - 1 if unmerge_action=="unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap={} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n") return 0 if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 finally: if vdb_lock: vartree.dbapi.flush_cache() portage.locks.unlockdir(vdb_lock) from portage._sets.base import EditablePackageSet # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)] if candidates: stop = False installed_sets += candidates installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and \ root_config.root == "/" and \ portage.match_from_list( portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s since there is no valid " + \ "reason for portage to unmerge itself.") % (pkg.cpv,) for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.root, portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print(colorize("WARN", "Package %s is going to be unmerged," % cpv)) print(colorize("WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: writemsg_level(colorize("BAD","\a\n\n!!! " + \ "'%s' is part of your system profile.\n" % cp), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if clean_delay and "--pretend" not in myopts and "--ask" not in myopts: countdown(int(settings["EMERGE_WARNING_DELAY"]), colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) if not quiet: writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected","protected","omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]] sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp)) for pn, ver, rev in sorted_pkgs: if rev == "r0": myversion = ver else: myversion = ver + "-" + rev if mytype == "selected": writemsg_level( colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1) else: writemsg_level( colorize("GOOD", myversion + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") if "--pretend" in myopts: #we're done... return return 0 if "--ask" in myopts: if userquery("Would you like to unmerge these packages?", enter_invalid) == "No": # enter pretend mode for correct formatting of results myopts["--pretend"] = True print() print("Quitting.") print() return 0 #the real unmerging begins, after a short delay.... if clean_delay and not autoclean: countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") for x in range(len(pkgmap)): for y in pkgmap[x]["selected"]: writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1) emergelog(xterm_titles, "=== Unmerging... ("+y+")") mysplit = y.split("/") #unmerge... retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], mysettings, unmerge_action not in ["clean","prune"], vartree=vartree, ldpath_mtimes=ldpath_mtimes, scheduler=scheduler) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: "+y) if raise_on_error: raise UninstallFailure(retval) sys.exit(retval) else: if clean_world and hasattr(sets["selected"], "cleanPackage")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() if hasattr(sets["selected"], "load"): sets["selected"].load() sets["selected"].cleanPackage(vartree.dbapi, y) sets["selected"].unlock() emergelog(xterm_titles, " >>> unmerge success: "+y) if clean_world and hasattr(sets["selected"], "remove")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() # load is called inside remove() for s in root_config.setconfig.active: sets["selected"].remove(SETPREFIX + s) sets["selected"].unlock() return 1
#!/usr/bin/env python # Checking installed packages using linux-info.eclass # for necessary kernel options import portage vartree = portage.db[portage.root]['vartree'] all_cpvs = vartree.dbapi.cpv_all() settings = portage.config() for cpv in all_cpvs: inherit = vartree.dbapi.aux_get(cpv, ['INHERITED'])[0] if 'linux-info' in inherit: pv = portage.catsplit(cpv)[1] cpvpath = vartree.dbapi.getpath(cpv) + '/' + pv + '.ebuild' print('Checking: ' + cpv) portage.doebuild(cpvpath, 'clean', settings=settings, tree='vartree', vartree=vartree) portage.doebuild(cpvpath, 'setup', settings=settings, tree='vartree', vartree=vartree)
__email__ = "*****@*****.**" __productname__ = "herdstat" __description__ = "display herd statistics" __version__ = "0.1" import os import sys from xml.sax import make_parser,SAXException sys.path.insert(0, "/usr/lib/portage/pym") import portage from output import * from portage_herds import Herds_XML #from portage_metadata import Metadata_XML settings = portage.config(clone=portage.settings) herds_url = "http://www.gentoo.org/cgi-bin/viewcvs.cgi/misc/herds.xml?rev=HEAD;cvsroot=gentoo;content-type=text/plain" def print_usage(): print "usage: " + __productname__ + " [options] herd(s)\n" print " -v,--verbose Display verbose output" print " -p,--package Show statistics regarding packages for specified herd." print " Note that this option may take a while." print " -h,--help Display this help message" print '\n' + __productname__ + " will check the HERDS environment variable for the" print "location of a local herds.xml. If it is not set, or the file does" print "not exist, " + __productname__ + " will try to retrieve it from gentoo.org" def get_herds_xml(filename): """open herds.xml and return file object"""
# replace the custom SRC_URI with the proper upstream one upstream_line = 'SRC_URI="https://github.com/lxc/lxd/archive/lxd-{}.tar.gz"\n'.format( upstream_version) foundline = False with open(ebuildpath, "rt") as ebuildfile: for line in ebuildfile.readlines(): if line == upstream_line: foundline = True break if not foundline: with open(ebuildpath, "a") as ebuildfile: ebuildfile.write(upstream_line) # Run through the unpack stage and stop conf = portage.config() portage.doebuild(ebuildpath, "clean", settings=conf, tree="porttree") portage.doebuild(ebuildpath, "manifest", settings=conf, tree="porttree") print('completed manifest') portage.doebuild(ebuildpath, "unpack", settings=conf, tree="porttree") print('completed unpack') # unpack is racy on the first download time.sleep(2) # Shuffle workdir so it looks like a go hierarchy # EGO_PN_PARENT="github.com/lxc" # EGO_PN="${EGO_PN_PARENT}/lxd" workdir = "/var/tmp/portage/app-emulation/lxd-{}/work".format(version) port_s = path.join(workdir, "lxd-{}".format(upstream_version))
def __init__(self, _unused_param=None, mysettings=None): """ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead @type _unused_param: None @param mysettings: an immutable config instance @type mysettings: portage.config """ portdbapi.portdbapi_instances.append(self) from portage import config if mysettings: self.settings = mysettings else: from portage import settings self.settings = config(clone=settings) porttree_root = self.settings['PORTDIR'] # always show this warning after this parameter # is unused in stable portage if _unused_param is not None and _unused_param != porttree_root: warnings.warn("The first parameter of the " + \ "portage.dbapi.porttree.portdbapi" + \ " constructor is now unused. Use " + \ "mysettings['PORTDIR'] instead.", DeprecationWarning, stacklevel=2) # This is strictly for use in aux_get() doebuild calls when metadata # is generated by the depend phase. It's safest to use a clone for # this purpose because doebuild makes many changes to the config # instance that is passed in. self.doebuild_settings = config(clone=self.settings) self.depcachedir = os.path.realpath(self.settings.depcachedir) if os.environ.get("SANDBOX_ON") == "1": # Make api consumers exempt from sandbox violations # when doing metadata cache updates. sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":") if self.depcachedir not in sandbox_write: sandbox_write.append(self.depcachedir) os.environ["SANDBOX_WRITE"] = \ ":".join(filter(None, sandbox_write)) porttrees = [os.path.realpath(porttree_root)] porttrees.extend(os.path.realpath(x) for x in \ shlex_split(self.settings.get('PORTDIR_OVERLAY', ''))) treemap = {} repository_map = {} self.treemap = treemap self._repository_map = repository_map identically_named_paths = {} for path in porttrees: if path in repository_map: continue repo_name_path = os.path.join(path, REPO_NAME_LOC) try: repo_name = codecs.open( _unicode_encode(repo_name_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace').readline().strip() except EnvironmentError: # warn about missing repo_name at some other time, since we # don't want to see a warning every time the portage module is # imported. pass else: identically_named_path = treemap.get(repo_name) if identically_named_path is not None: # The earlier one is discarded. del repository_map[identically_named_path] identically_named_paths[identically_named_path] = repo_name if identically_named_path == porttrees[0]: # Found another repo with the same name as # $PORTDIR, so update porttrees[0] to match. porttrees[0] = path treemap[repo_name] = path repository_map[path] = repo_name # Ensure that each repo_name is unique. Later paths override # earlier ones that correspond to the same name. porttrees = [x for x in porttrees if x not in identically_named_paths] ignored_map = {} for path, repo_name in identically_named_paths.items(): ignored_map.setdefault(repo_name, []).append(path) self._ignored_repos = tuple((repo_name, tuple(paths)) \ for repo_name, paths in ignored_map.items()) self.porttrees = porttrees porttree_root = porttrees[0] self.porttree_root = porttree_root self.eclassdb = eclass_cache.cache(porttree_root) # This is used as sanity check for aux_get(). If there is no # root eclass dir, we assume that PORTDIR is invalid or # missing. This check allows aux_get() to detect a missing # portage tree and return early by raising a KeyError. self._have_root_eclass_dir = os.path.isdir( os.path.join(self.porttree_root, "eclass")) self.metadbmodule = self.settings.load_best_module("portdbapi.metadbmodule") #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening) self.xcache = {} self.frozen = 0 self._repo_info = {} eclass_dbs = {porttree_root : self.eclassdb} local_repo_configs = self.settings._local_repo_configs default_loc_repo_config = None repo_aliases = {} if local_repo_configs is not None: default_loc_repo_config = local_repo_configs.get('DEFAULT') for repo_name, loc_repo_conf in local_repo_configs.items(): if loc_repo_conf.aliases is not None: for alias in loc_repo_conf.aliases: overridden_alias = repo_aliases.get(alias) if overridden_alias is not None: writemsg_level(_("!!! Alias '%s' " \ "created for '%s' overrides " \ "'%s' alias in " \ "'%s'\n") % (alias, repo_name, overridden_alias, self.settings._local_repo_conf_path), level=logging.WARNING, noiselevel=-1) repo_aliases[alias] = repo_name for path in self.porttrees: if path in self._repo_info: continue repo_name = self._repository_map.get(path) loc_repo_conf = None if local_repo_configs is not None: if repo_name is not None: loc_repo_conf = local_repo_configs.get(repo_name) if loc_repo_conf is None: loc_repo_conf = default_loc_repo_config layout_filename = os.path.join(path, "metadata/layout.conf") layout_file = KeyValuePairFileLoader(layout_filename, None, None) layout_data, layout_errors = layout_file.load() porttrees = [] masters = None if loc_repo_conf is not None and \ loc_repo_conf.masters is not None: masters = loc_repo_conf.masters else: masters = layout_data.get('masters', '').split() for master_name in masters: master_name = repo_aliases.get(master_name, master_name) master_path = self.treemap.get(master_name) if master_path is None: writemsg_level(_("Unavailable repository '%s' " \ "referenced by masters entry in '%s'\n") % \ (master_name, layout_filename), level=logging.ERROR, noiselevel=-1) else: porttrees.append(master_path) if not porttrees and path != porttree_root: # Make PORTDIR the default master, but only if our # heuristics suggest that it's necessary. profiles_desc = os.path.join(path, 'profiles', 'profiles.desc') eclass_dir = os.path.join(path, 'eclass') if not os.path.isfile(profiles_desc) or \ not os.path.isdir(eclass_dir): porttrees.append(porttree_root) porttrees.append(path) if loc_repo_conf is not None and \ loc_repo_conf.eclass_overrides is not None: for other_name in loc_repo_conf.eclass_overrides: other_path = self.treemap.get(other_name) if other_path is None: writemsg_level(_("Unavailable repository '%s' " \ "referenced by eclass-overrides entry in " \ "'%s'\n") % (other_name, self.settings._local_repo_conf_path), level=logging.ERROR, noiselevel=-1) continue porttrees.append(other_path) eclass_db = None for porttree in porttrees: tree_db = eclass_dbs.get(porttree) if tree_db is None: tree_db = eclass_cache.cache(porttree) eclass_dbs[porttree] = tree_db if eclass_db is None: eclass_db = tree_db.copy() else: eclass_db.append(tree_db) self._repo_info[path] = _repo_info(repo_name, path, eclass_db) self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule") self.auxdb = {} self._pregen_auxdb = {} self._init_cache_dirs() depcachedir_w_ok = os.access(self.depcachedir, os.W_OK) cache_kwargs = { 'gid' : portage_gid, 'perms' : 0o664 } if secpass < 1: # portage_gid is irrelevant, so just obey umask cache_kwargs['gid'] = -1 cache_kwargs['perms'] = -1 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys # ~harring filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED_0")] filtered_auxdbkeys.sort() from portage.cache import metadata_overlay, volatile if not depcachedir_w_ok: for x in self.porttrees: db_ro = self.auxdbmodule(self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid, readonly=True) self.auxdb[x] = metadata_overlay.database( self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid, db_rw=volatile.database, db_ro=db_ro) else: for x in self.porttrees: if x in self.auxdb: continue # location, label, auxdbkeys self.auxdb[x] = self.auxdbmodule( self.depcachedir, x, filtered_auxdbkeys, **cache_kwargs) if self.auxdbmodule is metadata_overlay.database: self.auxdb[x].db_ro.ec = self._repo_info[x].eclass_db if "metadata-transfer" not in self.settings.features: for x in self.porttrees: if x in self._pregen_auxdb: continue if os.path.isdir(os.path.join(x, "metadata", "cache")): self._pregen_auxdb[x] = self.metadbmodule( x, "metadata/cache", filtered_auxdbkeys, readonly=True) try: self._pregen_auxdb[x].ec = self._repo_info[x].eclass_db except AttributeError: pass # Selectively cache metadata in order to optimize dep matching. self._aux_cache_keys = set( ["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository", "RESTRICT", "SLOT", "DEFINED_PHASES"]) self._aux_cache = {} self._broken_ebuilds = set()
config_files = ['bashrc', 'categories', 'color.map', 'mirrors', 'modules', \ 'package.keywords', 'package.license', 'package.mask',\ 'package.properties', 'package.unmask', 'package.use', 'repos.conf',\ 'profile', 'sets'] test_path = '/etc/testportage/' try: # >=portage 2.1 modules import portage import portage.const except ImportError, e: print >>stderr, "Portage Import Error: ", e exit('Could not find portage module.\n' 'Are you sure this is a Gentoo system?') print >>stderr, ("Config: portage version = " + portage.VERSION) config_path = "/" + portage.const.USER_CONFIG_PATH + "/" PORTDIR=portage.config(clone=portage.settings).environ()['PORTDIR'] # house cleaning no longer needed imports del portage def set_test_path(): global config_path, test_path config_path = test_path print "CONFIG: new config_path = " + config_path def get_config_path(): return config_path