def _upgrade_respawn(self, entropy_client, inst_repo): """ Respawn the upgrade activity if required. """ # It might be an Entropy bug and Entropy was proritized in the # install queue, ignoring the rest of available packages. # So, respawning myself again using execvp() should be a much # better idea. with inst_repo.shared(): outcome = entropy_client.calculate_updates() if outcome['update']: entropy_client.output("%s." % (purple( _("There are more updates to install, " "reloading Entropy")), ), header=teal(" @@ ")) # then spawn a new process entropy_client.shutdown() # hack to tell the resurrected equo to block on # locking acquisition os.environ['__EQUO_LOCKS_BLOCKING__'] = "1" # we will acquire them again in blocking mode, cross # fingers lock = EntropyResourcesLock(output=entropy_client) lock.release() os.execvp("equo", sys.argv)
def _upgrade_respawn(self, entropy_client, inst_repo): """ Respawn the upgrade activity if required. """ # It might be an Entropy bug and Entropy was proritized in the # install queue, ignoring the rest of available packages. # So, respawning myself again using execvp() should be a much # better idea. with inst_repo.shared(): outcome = entropy_client.calculate_updates() if outcome['update']: entropy_client.output( "%s." % ( purple(_("There are more updates to install, " "reloading Entropy")),), header=teal(" @@ ")) # then spawn a new process entropy_client.shutdown() # hack to tell the resurrected equo to block on # locking acquisition os.environ['__EQUO_LOCKS_BLOCKING__'] = "1" # we will acquire them again in blocking mode, cross # fingers lock = EntropyResourcesLock(output=entropy_client) lock.release() os.execvp("equo", sys.argv)
def _call_shared(self, func): """ Execute the given function at func after acquiring Entropy Resources Lock in shared mode, for given repository at repo. The signature of func is: int func(entropy_client). """ client_class = None client = None acquired = False lock = None try: try: client_class = self._entropy_class() except PermissionDenied as err: print_error(err.value) return 1 lock = EntropyResourcesLock(output=client_class) lock.acquire_shared() acquired = True client = client_class() return func(client) finally: if client is not None: client.shutdown() if acquired: lock.release()
def acquire(self): """ Overridden from BaseBinaryResourceLock. """ lock = EntropyResourcesLock(output=Server) if self._blocking: lock.acquire_exclusive() acquired = True else: acquired = lock.wait_exclusive() if not acquired: raise EntropyResourceLock.NotAcquired("unable to acquire lock")
def acquire(self): """ Overridden from BaseBinaryResourceLock. """ lock = EntropyResourcesLock(output=Server) if self._blocking: lock.acquire_exclusive() acquired = True else: acquired = lock.wait_exclusive() if not acquired: raise EntropyResourceLock.NotAcquired( "unable to acquire lock")
def _call_exclusive(self, func, repo): """ Execute the given function at func after acquiring Entropy Resources Lock, for given repository at repo. The signature of func is: int func(entropy_server). """ server = None server_class = None acquired = False lock = None # make possible to avoid dealing with the resources lock. # This is useful if the lock is already acquired by some # parent or controller process. skip_lock = os.getenv("EIT_NO_RESOURCES_LOCK") is not None try: try: server_class = self._entropy_class() except PermissionDenied as err: print_error(err.value) return 1 if not skip_lock: lock = EntropyResourcesLock(output=server_class) acquired = lock.wait_exclusive() if not acquired: server_class.output(darkgreen( _("Another Entropy is currently running.")), level="error", importance=1) return 1 server = server_class(default_repository=repo) # make sure that repositories are closed now # to reset their internal states, which could have # become stale. # We cannot do this inside the API because we don't # know the lifecycle of EntropyRepository objects there. server.close_repositories() ServerRepositoryStatus().reset() return func(server) finally: if server is not None: server.shutdown() if acquired: lock.release()
def _call_shared(self, func, repo): """ Execute the given function at func after acquiring Entropy Resources Lock in shared mode, for given repository at repo. The signature of func is: int func(entropy_server). """ server = None server_class = None acquired = False lock = None # make possible to avoid dealing with the resources lock. # This is useful if the lock is already acquired by some # parent or controller process. skip_lock = os.getenv("EIT_NO_RESOURCES_LOCK") is not None try: try: server_class = self._entropy_class() except PermissionDenied as err: print_error(err.value) return 1 if not skip_lock: lock = EntropyResourcesLock(output=server_class) lock.acquire_shared() acquired = True if not acquired: server_class.output(darkgreen(_("Another Entropy is currently running.")), level="error", importance=1) return 1 server = server_class(default_repository=repo) # make sure that repositories are closed now # to reset their internal states, which could have # become stale. # We cannot do this inside the API because we don't # know the lifecycle of EntropyRepository objects there. server.close_repositories() ServerRepositoryStatus().reset() return func(server) finally: if server is not None: server.shutdown() if acquired: lock.release()
def _call_exclusive(self, func, repo): """ Execute the given function at func after acquiring Entropy Resources Lock, for given repository at repo. The signature of func is: int func(entropy_server). """ server = None server_class = None acquired = False lock = None try: try: server_class = self._entropy_class() except PermissionDenied as err: print_error(err.value) return 1 lock = EntropyResourcesLock(output=server_class) acquired = lock.wait_exclusive() if not acquired: server_class.output(darkgreen(_("Another Entropy is currently running.")), level="error", importance=1) return 1 server = server_class(default_repository=repo) # make sure that repositories are closed now # to reset their internal states, which could have # become stale. # We cannot do this inside the API because we don't # know the lifecycle of EntropyRepository objects there. server.close_repositories() ServerRepositoryStatus().reset() return func(server) finally: if server is not None: server.shutdown() if acquired: lock.release()
def destroy(self, _from_shutdown = False): """ Destroy this Singleton instance, closing repositories, removing SystemSettings plugins added during instance initialization. This method should be always called when instance is not used anymore. """ self.__instance_destroyed = True if self.__post_acquire_hook_idx is not None: EntropyResourcesLock.remove_post_acquire_hook( self.__post_acquire_hook_idx) self.__post_acquire_hook_idx = None if hasattr(self, '_installed_repository'): inst_repo = self.installed_repository() if inst_repo is not None: inst_repo.close(_token = InstalledPackagesRepository.NAME) if hasattr(self, '_real_logger_lock'): with self._real_logger_lock: if self._real_logger is not None: self._real_logger.close() if not _from_shutdown: if hasattr(self, '_real_settings') and \ hasattr(self._real_settings, 'remove_plugin'): # shutdown() will terminate the whole process # so there is no need to remove plugins from # SystemSettings, it wouldn't make any diff. if self._real_settings is not None: try: self._real_settings.remove_plugin( ClientSystemSettingsPlugin.ID) except KeyError: pass self.close_repositories(mask_clear = False)
def destroy(self, _from_shutdown=False): """ Destroy this Singleton instance, closing repositories, removing SystemSettings plugins added during instance initialization. This method should be always called when instance is not used anymore. """ self.__instance_destroyed = True if self.__post_acquire_hook_idx is not None: EntropyResourcesLock.remove_post_acquire_hook( self.__post_acquire_hook_idx) self.__post_acquire_hook_idx = None if hasattr(self, '_installed_repository'): inst_repo = self.installed_repository() if inst_repo is not None: inst_repo.close(_token=InstalledPackagesRepository.NAME) if hasattr(self, '_real_logger_lock'): with self._real_logger_lock: if self._real_logger is not None: self._real_logger.close() if not _from_shutdown: if hasattr(self, '_real_settings') and \ hasattr(self._real_settings, 'remove_plugin'): # shutdown() will terminate the whole process # so there is no need to remove plugins from # SystemSettings, it wouldn't make any diff. if self._real_settings is not None: try: self._real_settings.remove_plugin( ClientSystemSettingsPlugin.ID) except KeyError: pass self.close_repositories(mask_clear=False)
def _call_exclusive(self, func): """ Execute the given function at func after acquiring Entropy Resources Lock, for given repository at repo. The signature of func is: int func(entropy_client). """ client_class = None client = None acquired = False lock = None try: try: client_class = self._entropy_class() except PermissionDenied as err: print_error(err.value) return 1 blocking = os.getenv("__EQUO_LOCKS_BLOCKING__") if blocking: client_class.output(darkgreen( _("Acquiring Entropy Resources " "Lock, please wait...")), back=True) lock = EntropyResourcesLock(output=client_class) if blocking: lock.acquire_exclusive() acquired = True else: acquired = lock.wait_exclusive() if not acquired: client_class.output(darkgreen( _("Another Entropy is currently running.")), level="error", importance=1) return 1 client = client_class() return func(client) finally: if client is not None: client.shutdown() if acquired: lock.release()
def _call_exclusive(self, func): """ Execute the given function at func after acquiring Entropy Resources Lock, for given repository at repo. The signature of func is: int func(entropy_client). """ client_class = None client = None acquired = False lock = None try: try: client_class = self._entropy_class() except PermissionDenied as err: print_error(err.value) return 1 blocking = os.getenv("__EQUO_LOCKS_BLOCKING__") if blocking: client_class.output(darkgreen( _("Acquiring Entropy Resources " "Lock, please wait...")), back=True) lock = EntropyResourcesLock(output=client_class) if blocking: lock.acquire_exclusive() acquired = True else: acquired = lock.wait_exclusive() if not acquired: client_class.output( darkgreen(_("Another Entropy is currently running.")), level="error", importance=1 ) return 1 client = client_class() return func(client) finally: if client is not None: client.shutdown() if acquired: lock.release()
def init_singleton(self, indexing = True, installed_repo = None, xcache = True, user_xcache = False, repo_validation = True, url_fetcher = None, multiple_url_fetcher = None, **kwargs): """ Entropy Client Singleton interface. Your hitchhikers' guide to the Galaxy. @keyword indexing: enable metadata indexing (default is True) @type indexing: bool @keyword installed_repo: open installed packages repository? (default is True). Accepted values: True = open, False = open but consider it not available, -1 = do not even try to open @type installed_repo: bool or int @keyword xcache: enable on-disk cache (default is True) @type xcache: bool @keyword user_xcache: enable on-disk cache even for users not in the entropy group (default is False). Dangerous, could lead to cache inconsistencies. @type user_xcache: bool @keyword repo_validation: validate all the available repositories and automatically exclude the faulty ones @type repo_validation: bool @keyword url_fetcher: override default entropy.fetchers.UrlFetcher class usage. Provide your own implementation of UrlFetcher using this argument. @type url_fetcher: class or None @keyword multiple_url_fetcher: override default entropy.fetchers.MultipleUrlFetcher class usage. Provide your own implementation of MultipleUrlFetcher using this argument. """ self.__post_acquire_hook_idx = None self.__instance_destroyed = False self._repo_error_messages_cache = set() self._repodb_cache = {} self._repodb_cache_mutex = threading.RLock() self._memory_db_instances = {} self._real_installed_repository = None self._real_installed_repository_lock = threading.RLock() self._treeupdates_repos = set() self._can_run_sys_set_hooks = False const_debug_write(__name__, "debug enabled") self.safe_mode = 0 self._indexing = indexing self._repo_validation = repo_validation self._real_cacher = None self._real_cacher_lock = threading.Lock() # setup package settings (masking and other stuff) self._real_settings = None self._real_settings_lock = threading.Lock() self._real_settings_client_plg = None self._real_settings_client_plg_lock = threading.Lock() self._real_logger = None self._real_logger_lock = threading.Lock() self._real_enabled_repos = None self._real_enabled_repos_lock = threading.RLock() # class init LoadersMixin.__init__(self) self._multiple_url_fetcher = multiple_url_fetcher self._url_fetcher = url_fetcher if url_fetcher is None: self._url_fetcher = UrlFetcher if multiple_url_fetcher is None: self._multiple_url_fetcher = MultipleUrlFetcher self._do_open_installed_repo = True self._installed_repo_enable = True if installed_repo in (True, None, 1): self._installed_repo_enable = True elif installed_repo in (False, 0): self._installed_repo_enable = False elif installed_repo == -1: self._installed_repo_enable = False self._do_open_installed_repo = False self.xcache = xcache shell_xcache = os.getenv("ETP_NOCACHE") if shell_xcache: self.xcache = False # now if we are on live, we should disable it # are we running on a livecd? (/proc/cmdline has "cdroot") if entropy.tools.islive(): self.xcache = False elif (not entropy.tools.is_user_in_entropy_group()) and not user_xcache: self.xcache = False # Add Entropy Resources Lock post-acquire hook that cleans # repository caches. hook_ref = EntropyResourcesLock.add_post_acquire_hook( self._resources_post_hook) self.__post_acquire_hook_idx = hook_ref # enable System Settings hooks self._can_run_sys_set_hooks = True const_debug_write(__name__, "singleton loaded")
def _permissions_setup(self): """ Check execution privileges and spawn the Rigo UI. """ if not entropy.tools.is_user_in_entropy_group(): # otherwise the lock handling would potentially # fail. self._show_ok_dialog( None, escape_markup(_("Not authorized")), escape_markup(_("You are not authorized to run Rigo"))) entropy.tools.kill_threads() Gtk.main_quit() return if not self._service.service_available(): self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup(_("RigoDaemon service is not available"))) entropy.tools.kill_threads() Gtk.main_quit() return supported_apis = self._service.supported_apis() daemon_api = self._service.api() if daemon_api not in supported_apis: self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup( _("API mismatch, please update Rigo and RigoDaemon"))) entropy.tools.kill_threads() Gtk.main_quit() return lock = EntropyResourcesLock(output=self._entropy) # always execute this from the MainThread, since the lock uses TLS acquired = lock.try_acquire_shared() is_exclusive = False if not acquired: # check whether RigoDaemon is running in excluive mode # and ignore non-atomicity here (failing with error # is acceptable) if not self._service.exclusive(): self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup(_("Another Application Manager is active"))) entropy.tools.kill_threads() Gtk.main_quit() return is_exclusive = True # otherwise we can go ahead and handle our state later # check RigoDaemon, don't worry about races between Rigo Clients # it is fine to have multiple Rigo Clients connected. Mutual # exclusion is handled via Entropy Resources Lock (which is a file # based rwsem). activity = self._service.activity() if activity != DaemonActivityStates.AVAILABLE: msg = "" show_dialog = True if activity == DaemonActivityStates.NOT_AVAILABLE: msg = _("Background Service is currently not available") elif activity == DaemonActivityStates.UPDATING_REPOSITORIES: show_dialog = False task = ParallelTask( self._service._update_repositories, [], False, master=False) task.daemon = True task.name = "UpdateRepositoriesUnlocked" task.start() elif activity == DaemonActivityStates.MANAGING_APPLICATIONS: show_dialog = False task = ParallelTask( self._service._application_request, None, None, master=False) task.daemon = True task.name = "ApplicationRequestUnlocked" task.start() elif activity == DaemonActivityStates.UPGRADING_SYSTEM: show_dialog = False task = ParallelTask( self._service._upgrade_system, False, master=False) task.daemon = True task.name = "UpgradeSystemUnlocked" task.start() elif activity == DaemonActivityStates.INTERNAL_ROUTINES: msg = _("Background Service is currently busy") else: msg = _("Background Service is incompatible with Rigo") if show_dialog: self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup(msg)) entropy.tools.kill_threads() Gtk.main_quit() return elif is_exclusive: msg = _("Background Service is currently unavailable") # no lock acquired, cannot continue the initialization self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup(msg)) entropy.tools.kill_threads() Gtk.main_quit() return parser = argparse.ArgumentParser( description=_("Rigo Application Browser")) parser.add_argument( "package", nargs='?', type=file, metavar="<path>", help="package path") parser.add_argument( "--install", metavar="<dep string>", help="install given dependency") parser.add_argument( "--remove", metavar="<dep string>", help="remove given dependency") parser.add_argument( "--upgrade", help="upgrade the system", action="store_true", default=False) parser.add_argument( "--dumper", help="enable the main thread dumper (debug)", action="store_true", default=False) parser.add_argument( "--debug", help="enable Entropy Library debug mode", action="store_true", default=False) try: self._nsargs = parser.parse_args(sys.argv[1:]) except IOError as err: self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup("%s" % (err,))) entropy.tools.kill_threads() Gtk.main_quit() return self._thread_dumper() self._pref_view_c.setup() self._group_view_c.setup() self._config_view_c.setup() self._repo_view_c.setup() self._notice_view_c.setup() self._app_view_c.setup() self._avc.setup() self._nc.setup() self._work_view_c.setup() self._service.setup(acquired) self._easter_eggs() self._window.show() managing = self._start_managing() if not managing: self._change_view_state(RigoViewStates.GROUPS_VIEW_STATE) self._service.hello()
def _permissions_setup(self): """ Check execution privileges and spawn the Rigo UI. """ if not entropy.tools.is_user_in_entropy_group(): # otherwise the lock handling would potentially # fail. self._show_ok_dialog( None, escape_markup(_("Not authorized")), escape_markup(_("You are not authorized to run Rigo"))) entropy.tools.kill_threads() Gtk.main_quit() return if not self._service.service_available(): self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup(_("RigoDaemon service is not available"))) entropy.tools.kill_threads() Gtk.main_quit() return supported_apis = self._service.supported_apis() daemon_api = self._service.api() if daemon_api not in supported_apis: self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup( _("API mismatch, please update Rigo and RigoDaemon"))) entropy.tools.kill_threads() Gtk.main_quit() return lock = EntropyResourcesLock(output=self._entropy) # always execute this from the MainThread, since the lock uses TLS acquired = lock.try_acquire_shared() is_exclusive = False if not acquired: # check whether RigoDaemon is running in excluive mode # and ignore non-atomicity here (failing with error # is acceptable) if not self._service.exclusive(): self._show_ok_dialog( None, escape_markup(_("Rigo")), escape_markup(_("Another Application Manager is active"))) entropy.tools.kill_threads() Gtk.main_quit() return is_exclusive = True # otherwise we can go ahead and handle our state later # check RigoDaemon, don't worry about races between Rigo Clients # it is fine to have multiple Rigo Clients connected. Mutual # exclusion is handled via Entropy Resources Lock (which is a file # based rwsem). activity = self._service.activity() if activity != DaemonActivityStates.AVAILABLE: msg = "" show_dialog = True if activity == DaemonActivityStates.NOT_AVAILABLE: msg = _("Background Service is currently not available") elif activity == DaemonActivityStates.UPDATING_REPOSITORIES: show_dialog = False task = ParallelTask(self._service._update_repositories, [], False, master=False) task.daemon = True task.name = "UpdateRepositoriesUnlocked" task.start() elif activity == DaemonActivityStates.MANAGING_APPLICATIONS: show_dialog = False task = ParallelTask(self._service._application_request, None, None, master=False) task.daemon = True task.name = "ApplicationRequestUnlocked" task.start() elif activity == DaemonActivityStates.UPGRADING_SYSTEM: show_dialog = False task = ParallelTask(self._service._upgrade_system, False, master=False) task.daemon = True task.name = "UpgradeSystemUnlocked" task.start() elif activity == DaemonActivityStates.INTERNAL_ROUTINES: msg = _("Background Service is currently busy") else: msg = _("Background Service is incompatible with Rigo") if show_dialog: self._show_ok_dialog(None, escape_markup(_("Rigo")), escape_markup(msg)) entropy.tools.kill_threads() Gtk.main_quit() return elif is_exclusive: msg = _("Background Service is currently unavailable") # no lock acquired, cannot continue the initialization self._show_ok_dialog(None, escape_markup(_("Rigo")), escape_markup(msg)) entropy.tools.kill_threads() Gtk.main_quit() return parser = argparse.ArgumentParser( description=_("Rigo Application Browser")) parser.add_argument("package", nargs='?', type=file, metavar="<path>", help="package path") parser.add_argument("--install", metavar="<dep string>", help="install given dependency") parser.add_argument("--remove", metavar="<dep string>", help="remove given dependency") parser.add_argument("--upgrade", help="upgrade the system", action="store_true", default=False) parser.add_argument("--dumper", help="enable the main thread dumper (debug)", action="store_true", default=False) parser.add_argument("--debug", help="enable Entropy Library debug mode", action="store_true", default=False) try: self._nsargs = parser.parse_args(sys.argv[1:]) except IOError as err: self._show_ok_dialog(None, escape_markup(_("Rigo")), escape_markup("%s" % (err, ))) entropy.tools.kill_threads() Gtk.main_quit() return self._thread_dumper() self._pref_view_c.setup() self._group_view_c.setup() self._config_view_c.setup() self._repo_view_c.setup() self._notice_view_c.setup() self._app_view_c.setup() self._avc.setup() self._nc.setup() self._work_view_c.setup() self._service.setup(acquired) self._easter_eggs() self._window.show() managing = self._start_managing() if not managing: self._change_view_state(RigoViewStates.GROUPS_VIEW_STATE) self._service.hello()
def release(self): """ Overridden from BaseBinaryResourceLock. """ lock = EntropyResourcesLock(output=Server) lock.release()
def init_singleton(self, indexing=True, installed_repo=None, xcache=True, user_xcache=False, repo_validation=True, url_fetcher=None, multiple_url_fetcher=None, **kwargs): """ Entropy Client Singleton interface. Your hitchhikers' guide to the Galaxy. @keyword indexing: enable metadata indexing (default is True) @type indexing: bool @keyword installed_repo: open installed packages repository? (default is True). Accepted values: True = open, False = open but consider it not available, -1 = do not even try to open @type installed_repo: bool or int @keyword xcache: enable on-disk cache (default is True) @type xcache: bool @keyword user_xcache: enable on-disk cache even for users not in the entropy group (default is False). Dangerous, could lead to cache inconsistencies. @type user_xcache: bool @keyword repo_validation: validate all the available repositories and automatically exclude the faulty ones @type repo_validation: bool @keyword url_fetcher: override default entropy.fetchers.UrlFetcher class usage. Provide your own implementation of UrlFetcher using this argument. @type url_fetcher: class or None @keyword multiple_url_fetcher: override default entropy.fetchers.MultipleUrlFetcher class usage. Provide your own implementation of MultipleUrlFetcher using this argument. """ self.__post_acquire_hook_idx = None self.__instance_destroyed = False self._repo_error_messages_cache = set() self._repodb_cache = {} self._repodb_cache_mutex = threading.RLock() self._memory_db_instances = {} self._real_installed_repository = None self._real_installed_repository_lock = threading.RLock() self._treeupdates_repos = set() self._can_run_sys_set_hooks = False const_debug_write(__name__, "debug enabled") self.safe_mode = 0 self._indexing = indexing self._repo_validation = repo_validation self._real_cacher = None self._real_cacher_lock = threading.RLock() # setup package settings (masking and other stuff) self._real_settings = None self._real_settings_lock = threading.RLock() self._real_settings_client_plg = None self._real_settings_client_plg_lock = threading.RLock() self._real_logger = None self._real_logger_lock = threading.RLock() self._real_enabled_repos = None self._real_enabled_repos_lock = threading.RLock() self._multiple_url_fetcher = multiple_url_fetcher self._url_fetcher = url_fetcher if url_fetcher is None: self._url_fetcher = UrlFetcher if multiple_url_fetcher is None: self._multiple_url_fetcher = MultipleUrlFetcher self._do_open_installed_repo = True self._installed_repo_enable = True if installed_repo in (True, None, 1): self._installed_repo_enable = True elif installed_repo in (False, 0): self._installed_repo_enable = False elif installed_repo == -1: self._installed_repo_enable = False self._do_open_installed_repo = False self.xcache = xcache shell_xcache = os.getenv("ETP_NOCACHE") if shell_xcache: self.xcache = False # now if we are on live, we should disable it # are we running on a livecd? (/proc/cmdline has "cdroot") if entropy.tools.islive(): self.xcache = False elif (not entropy.tools.is_user_in_entropy_group() ) and not user_xcache: self.xcache = False # Add Entropy Resources Lock post-acquire hook that cleans # repository caches. hook_ref = EntropyResourcesLock.add_post_acquire_hook( self._resources_post_hook) self.__post_acquire_hook_idx = hook_ref # enable System Settings hooks self._can_run_sys_set_hooks = True const_debug_write(__name__, "singleton loaded")