def _wait(self): AsyncScheduler._wait(self) portdb = self._portdb dead_nodes = {} self._termination_check() if self._terminated_tasks: portdb.flush_cache() self.returncode = self._cancelled_returncode return self.returncode if self._global_cleanse: for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(portdb.auxdb[mytree]) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break else: cp_set = self._cp_set cpv_getkey = portage.cpv_getkey for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(cpv for cpv in \ portdb.auxdb[mytree] \ if cpv_getkey(cpv) in cp_set) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break if dead_nodes: for y in self._valid_pkgs: for mytree in portdb.porttrees: if portdb.findname2(y, mytree=mytree)[0]: dead_nodes[mytree].discard(y) for mytree, nodes in dead_nodes.items(): auxdb = portdb.auxdb[mytree] for y in nodes: try: del auxdb[y] except (KeyError, CacheError): pass portdb.flush_cache() return self.returncode
def _task_exit(self, task): if task.returncode != os.EX_OK: if not self._terminated_tasks: portage.writemsg( "Error processing %s%s%s, continuing...\n" % (task.cp, _repo_separator, task.repo_config.name), noiselevel=-1) AsyncScheduler._task_exit(self, task)
def __init__(self, **kwargs): ''' @param emerge_config: an emerge_config instance @param selected_repos: list of RepoConfig instances @param sync_manager: a SyncManger instance ''' self._emerge_config = kwargs.pop('emerge_config') self._selected_repos = kwargs.pop('selected_repos') self._sync_manager = kwargs.pop('sync_manager') AsyncScheduler.__init__(self, **kwargs) self._init_graph() self.retvals = [] self.msgs = []
def __init__(self, **kwargs): """ @param emerge_config: an emerge_config instance @param selected_repos: list of RepoConfig instances @param sync_manager: a SyncManger instance """ self._emerge_config = kwargs.pop("emerge_config") self._selected_repos = kwargs.pop("selected_repos") self._sync_manager = kwargs.pop("sync_manager") AsyncScheduler.__init__(self, **kwargs) self._init_graph() self.retvals = [] self.msgs = []
def __init__(self, portdb, cp_iter=None, gpg_cmd=None, gpg_vars=None, force_sign_key=None, **kwargs): AsyncScheduler.__init__(self, **kwargs) self._portdb = portdb if cp_iter is None: cp_iter = self._iter_every_cp() self._cp_iter = cp_iter self._gpg_cmd = gpg_cmd self._gpg_vars = gpg_vars self._force_sign_key = force_sign_key self._task_iter = self._iter_tasks()
def _can_add_job(self): ''' Returns False if there are no leaf nodes available. ''' if not AsyncScheduler._can_add_job(self): return False return bool(self._leaf_nodes) and not self._terminated.is_set()
def _task_exit(self, metadata_process): if metadata_process.returncode != os.EX_OK: self._valid_pkgs.discard(metadata_process.cpv) if not self._terminated_tasks: portage.writemsg("Error processing %s, continuing...\n" % \ (metadata_process.cpv,), noiselevel=-1) if self._consumer is not None: # On failure, still notify the consumer (in this case the metadata # argument is None). self._consumer(metadata_process.cpv, metadata_process.repo_path, metadata_process.metadata, metadata_process.ebuild_hash, metadata_process.eapi_supported) AsyncScheduler._task_exit(self, metadata_process)
def __init__(self, portdb, cp_iter=None, consumer=None, write_auxdb=True, **kwargs): AsyncScheduler.__init__(self, **kwargs) self._portdb = portdb self._write_auxdb = write_auxdb self._global_cleanse = False if cp_iter is None: cp_iter = self._iter_every_cp() # We can globally cleanse stale cache only if we # iterate over every single cp. self._global_cleanse = True self._cp_iter = cp_iter self._consumer = consumer self._valid_pkgs = set() self._cp_set = set() self._process_iter = self._iter_metadata_processes() self._running_tasks = set()