def __init__(self, main=False): """ @param main: If True then use global_event_loop(), otherwise use a local EventLoop instance (default is False, for safe use in a non-main thread) @type main: bool """ self._terminated = threading.Event() self._terminated_tasks = False self._max_jobs = 1 self._max_load = None self._jobs = 0 self._scheduling = False self._background = False if main: self._event_loop = global_event_loop() else: self._event_loop = EventLoop(main=False) self.sched_iface = self._sched_iface_class( IO_ERR=self._event_loop.IO_ERR, IO_HUP=self._event_loop.IO_HUP, IO_IN=self._event_loop.IO_IN, IO_NVAL=self._event_loop.IO_NVAL, IO_OUT=self._event_loop.IO_OUT, IO_PRI=self._event_loop.IO_PRI, child_watch_add=self._event_loop.child_watch_add, idle_add=self._event_loop.idle_add, io_add_watch=self._event_loop.io_add_watch, iteration=self._event_loop.iteration, output=self._task_output, register=self._event_loop.io_add_watch, source_remove=self._event_loop.source_remove, timeout_add=self._event_loop.timeout_add, unregister=self._event_loop.source_remove)
def _event_loop(self): if portage._internal_caller: # For internal portage usage, the global_event_loop is safe. return global_event_loop() else: # For external API consumers, use a local EventLoop, since # we don't want to assume that it's safe to override the # global SIGCHLD handler. return EventLoop(main=False)
def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None): """ Create a NofetchPrivateTmpdir instance, and execute it synchronously. This function must not be called from asynchronous code, since it will trigger event loop recursion which is incompatible with asyncio. """ nofetch = SpawnNofetchWithoutBuilddir( background=False, portdb=portdb, ebuild_path=ebuild_path, scheduler=SchedulerInterface( portage._internal_caller and global_event_loop() or EventLoop(main=False)), fd_pipes=fd_pipes, settings=settings) nofetch.start() return nofetch.wait()
def __init__(self, main=False, event_loop=None): """ @param main: If True then use global_event_loop(), otherwise use a local EventLoop instance (default is False, for safe use in a non-main thread) @type main: bool """ self._terminated = threading.Event() self._terminated_tasks = False self._max_jobs = 1 self._max_load = None self._scheduling = False self._background = False if event_loop is not None: self._event_loop = event_loop elif main: self._event_loop = global_event_loop() else: self._event_loop = EventLoop(main=False) self._sched_iface = SchedulerInterface( self._event_loop, is_background=self._is_background)
def __init__(self, _unused_param=None, mysettings=None): """ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead @type _unused_param: None @param mysettings: an immutable config instance @type mysettings: portage.config """ portdbapi.portdbapi_instances.append(self) from portage import config if mysettings: self.settings = mysettings else: from portage import settings self.settings = config(clone=settings) if _unused_param is not None: warnings.warn("The first parameter of the " + \ "portage.dbapi.porttree.portdbapi" + \ " constructor is unused since portage-2.1.8. " + \ "mysettings['PORTDIR'] is used instead.", DeprecationWarning, stacklevel=2) self.repositories = self.settings.repositories self.treemap = self.repositories.treemap # This is strictly for use in aux_get() doebuild calls when metadata # is generated by the depend phase. It's safest to use a clone for # this purpose because doebuild makes many changes to the config # instance that is passed in. self.doebuild_settings = config(clone=self.settings) self._event_loop = EventLoop(main=False) self.depcachedir = os.path.realpath(self.settings.depcachedir) if os.environ.get("SANDBOX_ON") == "1": # Make api consumers exempt from sandbox violations # when doing metadata cache updates. sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":") if self.depcachedir not in sandbox_write: sandbox_write.append(self.depcachedir) os.environ["SANDBOX_WRITE"] = \ ":".join(filter(None, sandbox_write)) self.porttrees = list(self.settings.repositories.repoLocationList()) # This is used as sanity check for aux_get(). If there is no # root eclass dir, we assume that PORTDIR is invalid or # missing. This check allows aux_get() to detect a missing # portage tree and return early by raising a KeyError. self._have_root_eclass_dir = os.path.isdir( os.path.join(self.settings.repositories.mainRepoLocation(), "eclass")) #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening) self.xcache = {} self.frozen = 0 #Keep a list of repo names, sorted by priority (highest priority first). self._ordered_repo_name_list = tuple( reversed(self.repositories.prepos_order)) self.auxdbmodule = self.settings.load_best_module( "portdbapi.auxdbmodule") self.auxdb = {} self._pregen_auxdb = {} # If the current user doesn't have depcachedir write permission, # then the depcachedir cache is kept here read-only access. self._ro_auxdb = {} self._init_cache_dirs() try: depcachedir_st = os.stat(self.depcachedir) depcachedir_w_ok = os.access(self.depcachedir, os.W_OK) except OSError: depcachedir_st = None depcachedir_w_ok = False cache_kwargs = {} depcachedir_unshared = False if portage.data.secpass < 1 and \ depcachedir_w_ok and \ depcachedir_st is not None and \ os.getuid() == depcachedir_st.st_uid and \ os.getgid() == depcachedir_st.st_gid: # If this user owns depcachedir and is not in the # portage group, then don't bother to set permissions # on cache entries. This makes it possible to run # egencache without any need to be a member of the # portage group. depcachedir_unshared = True else: cache_kwargs.update({'gid': portage_gid, 'perms': 0o664}) # If secpass < 1, we don't want to write to the cache # since then we won't be able to apply group permissions # to the cache entries/directories. if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok: for x in self.porttrees: self.auxdb[x] = volatile.database(self.depcachedir, x, self._known_keys, **cache_kwargs) try: self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x, self._known_keys, readonly=True, **cache_kwargs) except CacheError: pass else: for x in self.porttrees: if x in self.auxdb: continue # location, label, auxdbkeys self.auxdb[x] = self.auxdbmodule(self.depcachedir, x, self._known_keys, **cache_kwargs) if "metadata-transfer" not in self.settings.features: for x in self.porttrees: if x in self._pregen_auxdb: continue cache = self._create_pregen_cache(x) if cache is not None: self._pregen_auxdb[x] = cache # Selectively cache metadata in order to optimize dep matching. self._aux_cache_keys = set([ "DEPEND", "EAPI", "HDEPEND", "INHERITED", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository", "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE" ]) self._aux_cache = {} self._broken_ebuilds = set()
def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None): """ This spawns pkg_nofetch if appropriate. The settings parameter is useful only if setcpv has already been called in order to cache metadata. It will be cloned internally, in order to prevent any changes from interfering with the calling code. If settings is None then a suitable config instance will be acquired from the given portdbapi instance. Do not use the settings parameter unless setcpv has been called on the given instance, since otherwise it's possible to trigger issues like bug #408817 due to fragile assumptions involving the config state inside doebuild_environment(). A private PORTAGE_BUILDDIR will be created and cleaned up, in order to avoid any interference with any other processes. If PORTAGE_TMPDIR is writable, that will be used, otherwise the default directory for the tempfile module will be used. We only call the pkg_nofetch phase if either RESTRICT=fetch is set or the package has explicitly overridden the default pkg_nofetch implementation. This allows specialized messages to be displayed for problematic packages even though they do not set RESTRICT=fetch (bug #336499). This function does nothing if the PORTAGE_PARALLEL_FETCHONLY variable is set in the config instance. """ if settings is None: settings = config(clone=portdb.settings) else: settings = config(clone=settings) if 'PORTAGE_PARALLEL_FETCHONLY' in settings: return os.EX_OK # We must create our private PORTAGE_TMPDIR before calling # doebuild_environment(), since lots of variables such # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR. portage_tmpdir = settings.get('PORTAGE_TMPDIR') if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK): portage_tmpdir = None private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir) settings['PORTAGE_TMPDIR'] = private_tmpdir settings.backup_changes('PORTAGE_TMPDIR') # private temp dir was just created, so it's not locked yet settings.pop('PORTAGE_BUILDDIR_LOCKED', None) try: doebuild_environment(ebuild_path, 'nofetch', settings=settings, db=portdb) restrict = settings['PORTAGE_RESTRICT'].split() defined_phases = settings['DEFINED_PHASES'].split() if not defined_phases: # When DEFINED_PHASES is undefined, assume all # phases are defined. defined_phases = EBUILD_PHASES if 'fetch' not in restrict and \ 'nofetch' not in defined_phases: return os.EX_OK prepare_build_dirs(settings=settings) ebuild_phase = EbuildPhase( background=False, phase='nofetch', scheduler=SchedulerInterface( portage._internal_caller and global_event_loop() or EventLoop(main=False)), fd_pipes=fd_pipes, settings=settings) ebuild_phase.start() ebuild_phase.wait() elog_process(settings.mycpv, settings) finally: shutil.rmtree(private_tmpdir) return ebuild_phase.returncode
def _sync(self, selected_repos, return_messages, emaint_opts=None): msgs = [] if not selected_repos: if return_messages: msgs.append("Nothing to sync... returning") return (True, msgs) return (True, None) if emaint_opts is not None: for k, v in emaint_opts.items(): if v is not None: k = "--" + k.replace("_", "-") self.emerge_config.opts[k] = v # Portage needs to ensure a sane umask for the files it creates. os.umask(0o22) sync_manager = SyncManager(self.emerge_config.target_config.settings, emergelog) max_jobs = ( self.emerge_config.opts.get('--jobs', 1) if 'parallel-fetch' in self.emerge_config.target_config.settings.features else 1) sync_scheduler = SyncScheduler( emerge_config=self.emerge_config, selected_repos=selected_repos, sync_manager=sync_manager, max_jobs=max_jobs, event_loop=global_event_loop() if portage._internal_caller else EventLoop(main=False)) sync_scheduler.start() sync_scheduler.wait() retvals = sync_scheduler.retvals msgs.extend(sync_scheduler.msgs) returncode = True if retvals: msgs.extend(self.rmessage(retvals, 'sync')) for repo, retval in retvals: if retval != os.EX_OK: returncode = False break else: msgs.extend(self.rmessage([('None', os.EX_OK)], 'sync')) # run the post_sync_hook one last time for # run only at sync completion hooks if sync_scheduler.global_hooks_enabled: rcode = sync_manager.perform_post_sync_hook('') if rcode: msgs.extend(self.rmessage([('None', rcode)], 'post-sync')) if rcode != os.EX_OK: returncode = False # Reload the whole config. portage._sync_mode = False self._reload_config() self._do_pkg_moves() msgs.extend(self._check_updates()) display_news_notification(self.emerge_config.target_config, self.emerge_config.opts) if return_messages: return (returncode, msgs) return (returncode, None)