def __init__(self, on_date=None): self.cp_provider = inj.require(inj.CP_PROVIDER) self.product_dir = inj.require(inj.PROD_DIR) self.entitlement_dir = inj.require(inj.ENT_DIR) self.identity = inj.require(inj.IDENTITY) self.on_date = on_date self.load()
def __init__(self, report=None): self.cp_provider = inj.require(inj.CP_PROVIDER) self.uep = self.cp_provider.get_consumer_auth_cp() self.ent_dir = inj.require(inj.ENT_DIR) self.identity = require(IDENTITY) self.report = EntCertUpdateReport() self.content_access_cache = inj.require(inj.CONTENT_ACCESS_CACHE)
def get_filtered_pools_list(self, active_on, incompatible, overlapping, uninstalled, text, filter_string, future=None, after_date=None): """ Used for CLI --available filtering cuts down on api calls """ self.all_pools = {} self.compatible_pools = {} if active_on and overlapping: self.sorter = ComplianceManager(active_on) elif not active_on and overlapping: self.sorter = require(CERT_SORTER) if incompatible: for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(), self.identity.uuid, active_on=active_on, filter_string=filter_string, future=future, after_date=after_date): self.compatible_pools[pool['id']] = pool else: # --all has been used for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(), self.identity.uuid, list_all=True, active_on=active_on, filter_string=filter_string, future=future, after_date=after_date): self.all_pools[pool['id']] = pool return self._filter_pools(incompatible, overlapping, uninstalled, False, text)
def __init__(self, cache_only=False, apply_overrides=True): self.identity = inj.require(inj.IDENTITY) # These should probably move closer their use self.ent_dir = inj.require(inj.ENT_DIR) self.prod_dir = inj.require(inj.PROD_DIR) self.ent_source = ent_cert.EntitlementDirEntitlementSource() self.cp_provider = inj.require(inj.CP_PROVIDER) self.uep = self.cp_provider.get_consumer_auth_cp() self.manage_repos = 1 self.apply_overrides = apply_overrides self.manage_repos = manage_repos_enabled() self.release = None self.overrides = {} self.override_supported = False try: self.override_supported = bool(self.identity.is_valid() and self.uep and self.uep.supports_resource('content_overrides')) except socket.error as e: # swallow the error to fix bz 1298327 log.exception(e) pass self.written_overrides = WrittenOverrideCache() # FIXME: empty report at the moment, should be changed to include # info about updated repos self.report = RepoActionReport() self.report.name = "Repo updates" # If we are not registered, skip trying to refresh the # data from the server if not self.identity.is_valid(): return # NOTE: if anything in the RepoActionInvoker init blocks, and it # could, yum could still block. The closest thing to an # event loop we have is the while True: sleep() in lock.py:Lock.acquire() # Only attempt to update the overrides if they are supported # by the server. if self.override_supported: self.written_overrides._read_cache() try: override_cache = inj.require(inj.OVERRIDE_STATUS_CACHE) except KeyError: override_cache = OverrideStatusCache() if cache_only: status = override_cache._read_cache() else: status = override_cache.load_status(self.uep, self.identity.uuid) for item in status or []: # Don't iterate through the list if item['contentLabel'] not in self.overrides: self.overrides[item['contentLabel']] = {} self.overrides[item['contentLabel']][item['name']] = item['value']
def __init__(self, facts): super(SystemFactsDialog, self).__init__() #self.consumer = consumer self.identity = inj.require(inj.IDENTITY) self.cp_provider = inj.require(inj.CP_PROVIDER) self.facts = facts self.connect_signals({ "on_system_facts_dialog_delete_event": self._hide_callback, "on_close_button_clicked": self._hide_callback, "on_facts_update_button_clicked": self._update_facts_callback }) # Set up the model self.facts_store = ga_Gtk.TreeStore(str, str) self.facts_view.set_model(self.facts_store) # Set up columns on the view self._add_column(_("Fact"), 0) self._add_column(_("Value"), 1) # set up the signals from the view self.facts_view.connect("row_activated", widgets.expand_collapse_on_row_activated_callback)
def __init__(self): self.release_status_cache = inj.require(inj.RELEASE_STATUS_CACHE) self._expansion = None self.identity = inj.require(inj.IDENTITY) self.cp_provider = inj.require(inj.CP_PROVIDER)
def warnOrGiveUsageMessage(conduit): # XXX: Importing inline as you must be root to read the config file """ either output a warning, or a usage message """ msg = "" # TODO: refactor so there are not two checks for this if os.getuid() != 0: return if ClassicCheck().is_registered_with_classic(): return try: identity = inj.require(inj.IDENTITY) ent_dir = inj.require(inj.ENT_DIR) # Don't warn people to register if we see entitelements, but no identity: if not identity.is_valid() and len(ent_dir.list_valid()) == 0: msg = not_registered_warning elif len(ent_dir.list_valid()) == 0: msg = no_subs_warning if config.in_container() and len(ent_dir.list_valid()) == 0: msg = no_subs_container_warning finally: if msg: conduit.info(2, msg)
def perform(self): mgr = inj.require(inj.INSTALLED_PRODUCTS_MANAGER) consumer_identity = inj.require(inj.IDENTITY) ret = mgr.update_check(self.uep, consumer_identity.uuid) self.report._status = ret return self.report
def __init__(self): self.identity = inj.require(inj.IDENTITY) self.cp_provider = inj.require(inj.CP_PROVIDER) self.ent_dir = inj.require(inj.ENT_DIR) self.pool_cache = inj.require(inj.POOL_STATUS_CACHE) self.pooltype_map = {} self.update()
def __init__(self, uep=None): uep = uep or inj.require(inj.CP_PROVIDER).get_consumer_auth_cp() self.identity = inj.require(inj.IDENTITY) if self.identity.is_valid(): self.prod_status_cache = inj.require(inj.PROD_STATUS_CACHE) self.prod_status = self.prod_status_cache.load_status( uep, self.identity.uuid)
def list_pools(uep, consumer_uuid, list_all=False, active_on=None, filter_string=None, future=None, after_date=None): """ Wrapper around the UEP call to fetch pools, which forces a facts update if anything has changed before making the request. This ensures the rule checks server side will have the most up to date info about the consumer possible. """ # client tells service 'look for facts again' # if service finds new facts: # -emit a signal? # - or just update properties # - and set a 'been_synced' property to False # client waits for facts check to finish # if no changes or been_synced=True, continue # if changes or unsynced: # subman updates candlepin with the latest version of services GetFacts() [blocking] # when finished, subman emit's 'factsSyncFinished' # - then service flops 'been_synced' property # -or- subman calls 'here_are_the_latest_facts_to_the_server()' on service # then service flops 'been_synced' property # subman gets signal that props changed, and that been_synced is now true # since it's been synced, then subman continues require(FACTS).update_check(uep, consumer_uuid) profile_mgr = cache.ProfileManager() profile_mgr.update_check(uep, consumer_uuid) owner = uep.getOwner(consumer_uuid) ownerid = owner['key'] return uep.getPoolsList(consumer=consumer_uuid, listAll=list_all, active_on=active_on, owner=ownerid, filter_string=filter_string, future=future, after_date=after_date)
def get_installed_product_status(product_directory, entitlement_directory, uep): """ Returns the Installed products and their subscription states """ product_status = [] sorter = require(CERT_SORTER) calculator = require(PRODUCT_DATE_RANGE_CALCULATOR, uep) for installed_product in sorter.installed_products: product_cert = sorter.installed_products[installed_product] for product in product_cert.products: begin = "" end = "" prod_status_range = calculator.calculate(product.id) if prod_status_range: # Format the date in user's local time as the date # range is returned in GMT. begin = format_date(prod_status_range.begin()) end = format_date(prod_status_range.end()) data = (product.name, installed_product, product.version, ",".join(product.architectures), sorter.get_status(product.id), sorter.reasons.get_product_reasons(product), begin, end) product_status.append(data) return product_status
def __init__(self, uep): """ Initialization of Unregister instance """ self.identity = inj.require(inj.IDENTITY) self.cp_provider = inj.require(inj.CP_PROVIDER) self.uep = uep
def setUp(self): super(TestPoolTypeCache, self).setUp() self.cp_provider = inj.require(inj.CP_PROVIDER) self.cp_provider.consumer_auth_cp = Mock() self.cp = self.cp_provider.consumer_auth_cp certs = [StubEntitlementCertificate(StubProduct('pid1'), pool=StubPool('someid'))] self.ent_dir = StubEntitlementDirectory(certificates=certs) self.pool_cache = inj.require(inj.POOL_STATUS_CACHE) self.pool_cache.write_cache = Mock()
def __init__(self, cache_only=False, apply_overrides=True): self.identity = inj.require(inj.IDENTITY) # These should probably move closer their use self.ent_dir = inj.require(inj.ENT_DIR) self.prod_dir = inj.require(inj.PROD_DIR) self.ent_source = ent_cert.EntitlementDirEntitlementSource() self.cp_provider = inj.require(inj.CP_PROVIDER) self.uep = self.cp_provider.get_consumer_auth_cp() self.manage_repos = 1 self.apply_overrides = apply_overrides if CFG.has_option('rhsm', 'manage_repos'): self.manage_repos = int(CFG.get('rhsm', 'manage_repos')) self.release = None self.overrides = {} self.override_supported = bool(self.identity.is_valid() and self.uep and self.uep.supports_resource('content_overrides')) self.written_overrides = WrittenOverrideCache() # FIXME: empty report at the moment, should be changed to include # info about updated repos self.report = RepoActionReport() self.report.name = "Repo updates" # If we are not registered, skip trying to refresh the # data from the server if not self.identity.is_valid(): return # Only attempt to update the overrides if they are supported # by the server. if self.override_supported: self.written_overrides._read_cache() try: override_cache = inj.require(inj.OVERRIDE_STATUS_CACHE) except KeyError: override_cache = OverrideStatusCache() if cache_only: status = override_cache._read_cache() else: status = override_cache.load_status(self.uep, self.identity.uuid) for item in status or []: # Don't iterate through the list if item['contentLabel'] not in self.overrides: self.overrides[item['contentLabel']] = {} self.overrides[item['contentLabel']][item['name']] = item['value'] message = "Release API is not supported by the server. Using default." try: result = self.uep.getRelease(self.identity.uuid) self.release = result['releaseVer'] except RemoteServerException, e: log.debug(message)
def update_product_manager(self): if self.is_registered(): cp_provider = inj.require(inj.CP_PROVIDER) consumer_identity = inj.require(inj.IDENTITY) try: self.installed_mgr.update_check(cp_provider.get_consumer_auth_cp(), consumer_identity.uuid) except RestlibException: # Invalid consumer certificate pass
def clean_all_data(backup=True): consumer_dir = cfg.get('rhsm', 'consumerCertDir') if backup: if consumer_dir[-1] == "/": consumer_dir_backup = consumer_dir[0:-1] + ".old" else: consumer_dir_backup = consumer_dir + ".old" # Delete backup dir if it exists: shutil.rmtree(consumer_dir_backup, ignore_errors=True) # Copy current consumer dir: log.debug("Backing up %s to %s.", consumer_dir, consumer_dir_backup) shutil.copytree(consumer_dir, consumer_dir_backup) # FIXME FIXME # Delete current consumer certs: for path in [ConsumerIdentity.keypath(), ConsumerIdentity.certpath()]: if (os.path.exists(path)): log.debug("Removing identity cert: %s" % path) os.remove(path) require(IDENTITY).reload() # Delete all entitlement certs rather than the directory itself: ent_cert_dir = cfg.get('rhsm', 'entitlementCertDir') if os.path.exists(ent_cert_dir): for f in glob.glob("%s/*.pem" % ent_cert_dir): certpath = os.path.join(ent_cert_dir, f) log.debug("Removing entitlement cert: %s" % f) os.remove(certpath) else: log.warn("Entitlement cert directory does not exist: %s" % ent_cert_dir) # Subclasses of cache.CacheManager have a @classmethod delete_cache # for deleting persistent caches cache.ProfileManager.delete_cache() cache.InstalledProductsManager.delete_cache() Facts.delete_cache() # WrittenOverridesCache is also a subclass of cache.CacheManager, but # it is deleted in RepoActionInvoker.delete_repo_file() below. # StatusCache subclasses have a a per instance cache varable # and delete_cache is an instance method, so we need to call # the delete_cache on the instances created in injectioninit. require(ENTITLEMENT_STATUS_CACHE).delete_cache() require(PROD_STATUS_CACHE).delete_cache() require(OVERRIDE_STATUS_CACHE).delete_cache() require(RELEASE_STATUS_CACHE).delete_cache() RepoActionInvoker.delete_repo_file() log.info("Cleaned local data")
def __init__(self): self.identity = require(IDENTITY) self.cp_provider = require(CP_PROVIDER) self.update() self.product_dir = inj.require(inj.PROD_DIR) self.entitlement_dir = inj.require(inj.ENT_DIR) self.certlib = CertLib(uep=self.cp_provider.get_consumer_auth_cp()) self.cs = require(CERT_SORTER)
def __init__(self): ent_dir = inj.require(inj.ENT_DIR) prod_dir = inj.require(inj.PROD_DIR) self.product_tags = prod_dir.get_provided_tags() # populate from ent certs self._entitlements = [] for ent_cert in ent_dir.list_valid(): self._entitlements.append( EntitlementCertEntitlement.from_ent_cert(ent_cert))
def __init__(self): self.identity = require(IDENTITY) self.cp_provider = require(CP_PROVIDER) self.update() self.product_dir = inj.require(inj.PROD_DIR) self.entitlement_dir = inj.require(inj.ENT_DIR) self.certlib = EntCertActionInvoker() self.overrides = Overrides() self.cs = require(CERT_SORTER)
def get_sys_purpose_store(): """ :return: Returns a singleton instance of the syspurpose store if it was imported. Otherwise None. """ global store if store is not None: return store elif SyncedStore is not None: uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp() uuid = inj.require(inj.IDENTITY).uuid store = SyncedStore(uep, consumer_uuid=uuid) return store
def _filter_pools(self, incompatible, overlapping, uninstalled, subscribed, text): """ Return a list of pool hashes, filtered according to the given options. This method does not actually hit the server, filtering is done in memory. """ log.debug("Filtering %d total pools" % len(self.all_pools)) if not incompatible: pools = self.all_pools.values() else: pools = self.compatible_pools.values() log.debug("\tRemoved %d incompatible pools" % len(self.incompatible_pools)) pool_filter = PoolFilter(require(PROD_DIR), require(ENT_DIR), self.sorter) # Filter out products that are not installed if necessary: if uninstalled: prev_length = len(pools) pools = pool_filter.filter_out_uninstalled(pools) log.debug("\tRemoved %d pools for not installed products" % (prev_length - len(pools))) if overlapping: prev_length = len(pools) pools = pool_filter.filter_out_overlapping(pools) log.debug("\tRemoved %d pools overlapping existing entitlements" % (prev_length - len(pools))) # Filter by product name if necessary: if text: prev_length = len(pools) pools = pool_filter.filter_product_name(pools, text) log.debug("\tRemoved %d pools not matching the search string" % (prev_length - len(pools))) if subscribed: prev_length = len(pools) pools = pool_filter.filter_subscribed_pools(pools, self.subscribed_pool_ids, self.compatible_pools) log.debug("\tRemoved %d pools that we're already subscribed to" % (prev_length - len(pools))) log.debug("\t%d pools to display, %d filtered out" % (len(pools), len(self.all_pools) - len(pools))) return pools
def __init__(self, ent_dir=None, prod_dir=None): self.facts = {} self.entitlement_dir = ent_dir or inj.require(inj.ENT_DIR) self.product_dir = prod_dir or inj.require(inj.PROD_DIR) # see bz #627962 # we would like to have this info, but for now, since it # can change constantly on laptops, it makes for a lot of # fact churn, so we report it, but ignore it as an indicator # that we need to update self.graylist = ['cpu.cpu_mhz', 'lscpu.cpu_mhz'] # plugin manager so we can add custom facst via plugin self.plugin_manager = require(PLUGIN_MANAGER)
def __init__(self, product_dir=None, product_db=None): self.pdir = product_dir if not product_dir: self.pdir = inj.require(inj.PROD_DIR) self.db = product_db if not product_db: self.db = ProductDatabase() self.db.read() self.meta_data_errors = [] self.plugin_manager = require(PLUGIN_MANAGER)
def get_available_entitlements(facts, get_all=False, active_on=None, overlapping=False, uninstalled=False, text=None): """ Returns a list of entitlement pools from the server. Facts will be updated if appropriate before making the request, to ensure the rules on the server will pass if appropriate. The 'all' setting can be used to return all pools, even if the rules do not pass. (i.e. show pools that are incompatible for your hardware) """ columns = ['id', 'quantity', 'consumed', 'endDate', 'productName', 'providedProducts', 'productId', 'attributes', 'pool_type', 'service_level', 'service_type', 'suggested', 'contractNumber'] pool_stash = PoolStash(Facts(require(ENT_DIR), require(PROD_DIR))) dlist = pool_stash.get_filtered_pools_list(active_on, not get_all, overlapping, uninstalled, text) for pool in dlist: pool_wrapper = PoolWrapper(pool) pool['providedProducts'] = pool_wrapper.get_provided_products() if allows_multi_entitlement(pool): pool['multi-entitlement'] = "Yes" else: pool['multi-entitlement'] = "No" support_attrs = pool_wrapper.get_product_attributes("support_level", "support_type") pool['service_level'] = support_attrs['support_level'] pool['service_type'] = support_attrs['support_type'] pool['suggested'] = pool_wrapper.get_suggested_quantity() pool['pool_type'] = pool_wrapper.get_pool_type() if pool['suggested'] is None: pool['suggested'] = "" # no default, so default is None if key not found data = [_sub_dict(pool, columns) for pool in dlist] for d in data: if int(d['quantity']) < 0: d['quantity'] = _('Unlimited') else: d['quantity'] = str(int(d['quantity']) - int(d['consumed'])) d['endDate'] = format_date(isodate.parse_date(d['endDate'])) del d['consumed'] return data
def pre_check_status(force_signal): if force_signal is not None: debug("forcing status signal from cli arg") return force_signal if ClassicCheck().is_registered_with_classic(): debug("System is already registered to another entitlement system") return RHN_CLASSIC identity = require(IDENTITY) sorter = require(CERT_SORTER) if not identity.is_valid() and not sorter.has_entitlements(): debug("The system is not currently registered.") return RHSM_REGISTRATION_REQUIRED return None
def update(conduit, cache_only): """ update entitlement certificates """ if os.getuid() != 0: conduit.info(3, 'Not root, Subscription Management repositories not updated') return conduit.info(3, 'Updating Subscription Management repositories.') # XXX: Importing inline as you must be root to read the config file from subscription_manager.identity import ConsumerIdentity cert_file = ConsumerIdentity.certpath() key_file = ConsumerIdentity.keypath() identity = inj.require(inj.IDENTITY) if not identity.is_valid(): conduit.info(3, "Unable to read consumer identity") return try: uep = connection.UEPConnection(cert_file=cert_file, key_file=key_file) #FIXME: catchall exception except Exception: # log conduit.info(2, "Unable to connect to Subscription Management Service") return rl = RepoActionInvoker(uep=uep, cache_only=cache_only) rl.update()
def _update(self, cache_only): """ update entitlement certificates """ logger.info(_('Updating Subscription Management repositories.')) # XXX: Importing inline as you must be root to read the config file from subscription_manager.identity import ConsumerIdentity cert_file = str(ConsumerIdentity.certpath()) key_file = str(ConsumerIdentity.keypath()) identity = inj.require(inj.IDENTITY) # In containers we have no identity, but we may have entitlements inherited # from the host, which need to generate a redhat.repo. if identity.is_valid(): try: connection.UEPConnection(cert_file=cert_file, key_file=key_file) # FIXME: catchall exception except Exception: # log logger.info(_("Unable to connect to Subscription Management Service")) return else: logger.info(_("Unable to read consumer identity")) if config.in_container(): logger.info(_("Subscription Manager is operating in container mode.")) if not cache_only: cert_action_invoker = EntCertActionInvoker() cert_action_invoker.update() repo_action_invoker = RepoActionInvoker(cache_only=cache_only) repo_action_invoker.update()
def __init__(self): # Get widgets we'll need to access super(NetworkConfigDialog, self).__init__() self.org_timeout = socket.getdefaulttimeout() self.progress_bar = None self.cfg = rhsm.config.initConfig() self.cp_provider = inj.require(inj.CP_PROVIDER) # Need to load values before connecting signals because when the dialog # starts up it seems to trigger the signals which overwrites the config # with the blank values. self.set_initial_values() self.enableProxyButton.connect("toggled", self.enable_action) self.enableProxyAuthButton.connect("toggled", self.enable_action) self.enableProxyButton.connect("toggled", self.clear_connection_label) self.enableProxyAuthButton.connect("toggled", self.clear_connection_label) self.enableProxyButton.connect("toggled", self.enable_test_button) self.proxyEntry.connect("changed", self.clear_connection_label) self.proxyUserEntry.connect("changed", self.clear_connection_label) self.proxyPasswordEntry.connect("changed", self.clear_connection_label) self.proxyEntry.connect("focus-out-event", self.clean_proxy_entry) self.cancelButton.connect("clicked", self.on_cancel_clicked) self.saveButton.connect("clicked", self.on_save_clicked) self.testConnectionButton.connect("clicked", self.on_test_connection_clicked) self.networkConfigDialog.connect("delete-event", self.deleted)
def get_compliance_status(self): status_cache = inj.require(inj.ENTITLEMENT_STATUS_CACHE) return status_cache.load_status( self.cp_provider.get_consumer_auth_cp(), self.identity.uuid, self.on_date )
def __init__(self): self.facts = {} # see bz #627962 # we would like to have this info, but for now, since it # can change constantly on laptops, it makes for a lot of # fact churn, so we report it, but ignore it as an indicator # that we need to update self.graylist = ['cpu.cpu_mhz', 'lscpu.cpu_mhz'] # plugin manager so we can add custom facts via plugin self.plugin_manager = require(PLUGIN_MANAGER)
def update_subscriptions(self, update_dbus=True): """ Pulls the entitlement certificates and updates the subscription model. """ self.pooltype_cache.update() sorter = EntitlementCertStackingGroupSorter( self.entitlement_dir.list()) self.store.clear() # FIXME: mapped list store inits are weird for group in sorter.groups: self._add_group(group) self.top_view.expand_all() self._stripe_rows(None, self.store) if update_dbus: inj.require(inj.DBUS_IFACE).update() self.unsubscribe_button.set_property('sensitive', False) # 841396: Select first item in My Subscriptions table by default selection = self.top_view.get_selection() selection.select_path(0)
def clean_up(self, subscribed_channels): # Hack to address BZ 853233 product_dir = inj.require(inj.PROD_DIR) if os.path.isfile(os.path.join(product_dir.path, "68.pem")) and \ os.path.isfile(os.path.join(product_dir.path, "71.pem")): try: os.remove(os.path.join(product_dir.path, "68.pem")) self.db.delete("68") self.db.write() log.info("Removed 68.pem due to existence of 71.pem") except OSError, e: log.info(e)
def perform(self): identity = inj.require(inj.IDENTITY) if not identity.is_valid(): # we could in theory try to update the id in the # case of it being bogus/corrupted, ala #844069, # but that seems unneeded # FIXME: more details self.report._status = 0 return self.report return self._update_cert(identity)
def get_consumer_uuid(self): """ Method for getting UUID of consumer :return: string representing UUID """ identity = inj.require(inj.IDENTITY) if identity.uuid is None: return "" else: return identity.uuid
def update_facts(self): """Sends the current system facts to the UEP server.""" identity = inj.require(inj.IDENTITY) try: self.facts.update_check(self.cp_provider.get_consumer_auth_cp(), identity.uuid, force=True) if self.update_callback: self.update_callback() except Exception, e: log.error("Could not update system facts \nError: %s" % e) handle_gui_exception(e, linkify(str(e)), self.system_facts_dialog)
def list_pools(uep, consumer_uuid, list_all=False, active_on=None, filter_string=None): """ Wrapper around the UEP call to fetch pools, which forces a facts update if anything has changed before making the request. This ensures the rule checks server side will have the most up to date info about the consumer possible. """ # client tells service 'look for facts again' # if service finds new facts: # -emit a signal? # - or just update properties # - and set a 'been_synced' property to False # client waits for facts check to finish # if no changes or been_synced=True, continue # if changes or unsynced: # subman updates candlepin with the latest version of services GetFacts() [blocking] # when finished, subman emit's 'factsSyncFinished' # - then service flops 'been_synced' property # -or- subman calls 'here_are_the_latest_facts_to_the_server()' on service # then service flops 'been_synced' property # subman gets signal that props changed, and that been_synced is now true # since it's been synced, then subman continues require(FACTS).update_check(uep, consumer_uuid) profile_mgr = cache.ProfileManager() profile_mgr.update_check(uep, consumer_uuid) owner = uep.getOwner(consumer_uuid) ownerid = owner['key'] return uep.getPoolsList(consumer=consumer_uuid, listAll=list_all, active_on=active_on, owner=ownerid, filter_string=filter_string)
def _set_enable_for_yum_repositories(setting, *repo_ids): invoker = RepoActionInvoker() repos = invoker.get_repos() repos_to_change = [] for r in repo_ids: matches = set([repo for repo in repos if fnmatch.fnmatch(repo.id, r)]) repos_to_change.extend(matches) if len(repos_to_change) == 0: return 0 # The cache should be primed at this point by the invoker.get_repos() cache = inj.require(inj.OVERRIDE_STATUS_CACHE) identity = inj.require(inj.IDENTITY) cp_provider = inj.require(inj.CP_PROVIDER) if identity.is_valid() and cp_provider.get_consumer_auth_cp().supports_resource('content_overrides'): overrides = [{'contentLabel': repo.id, 'name': 'enabled', 'value': setting} for repo in repos_to_change] cp = cp_provider.get_consumer_auth_cp() results = cp.setContentOverrides(identity.uuid, overrides) cache = inj.require(inj.OVERRIDE_STATUS_CACHE) # Update the cache with the returned JSON cache.server_status = results cache.write_cache() invoker.update() else: for repo in repos_to_change: repo['enabled'] = setting repo_file = RepoFile() repo_file.read() for repo in repos_to_change: repo_file.update(repo) repo_file.write() return len(repos_to_change)
def register(self, credentials, org, environment): # For registering the machine, use the CLI tool to reuse the username/password (because the GUI will prompt for them again) # Prepended a \n so translation can proceed without hitch print("") print(_("Attempting to register system to destination server...")) cmd = ['subscription-manager', 'register'] # Candlepin doesn't want user credentials with activation keys # Auto-attach and environments are also forbidden if self.options.activation_keys: for key in self.options.activation_keys: cmd.append('--activationkey=' + key) else: cmd.append('--username='******'--password='******'--environment=' + environment) if self.options.auto: cmd.append('--auto-attach') if self.options.destination_url: cmd.append('--serverurl=' + self.options.destination_url) if org: cmd.append('--org=' + org) if self.options.five_to_six: if self.consumer_exists(self.consumer_id): cmd.append('--consumerid=' + self.consumer_id) if self.options.service_level: servicelevel = self.select_service_level( org, self.options.service_level) cmd.append('--servicelevel=' + servicelevel) subprocess.call(cmd) identity = inj.require(inj.IDENTITY) identity.reload() if not identity.is_valid(): system_exit( 2, _("\nUnable to register.\nFor further assistance, please contact Red Hat Global Support Services." )) print( _("System '{identity_name}' successfully registered.\n").format( identity_name=identity.name)) return identity
def sync(self): """ Actually do the sync between client and server. Saves the merged changes between client and server in the SyspurposeCache. :return: The synced values """ if not self.uep.has_capability('syspurpose'): log.debug('Server does not support syspurpose, not syncing') return consumer_identity = inj.require(inj.IDENTITY) consumer = self.uep.getConsumer(consumer_identity.uuid) server_sp = {} sp_cache = SyspurposeCache() # Translate from the remote values to the local, filtering out items not known for attr in ATTRIBUTES: server_sp[attr] = consumer.get(LOCAL_TO_REMOTE[attr]) try: filesystem_sp = read_syspurpose(raise_on_error=True) except (os.error, ValueError): self.report._exceptions.append( 'Cannot read local syspurpose, trying to update from server only' ) result = server_sp log.debug( 'Unable to read local system purpose at \'%s\'\nUsing the server values.' % USER_SYSPURPOSE) else: cached_values = sp_cache.read_cache_only() result = three_way_merge(local=filesystem_sp, base=cached_values, remote=server_sp, on_change=self.report.record_change) sp_cache.syspurpose = result sp_cache.write_cache() write_syspurpose(result) self.uep.updateConsumer(consumer_identity.uuid, role=result[ROLE], addons=result[ADDONS], service_level=result[SERVICE_LEVEL], usage=result[USAGE]) self.report._status = 'Successfully synced system purpose' log.debug('Updated syspurpose located at \'%s\'' % USER_SYSPURPOSE) return result
def _filter_pools(self, incompatible, overlapping, uninstalled, subscribed, text): """ Return a list of pool hashes, filtered according to the given options. This method does not actually hit the server, filtering is done in memory. """ log.debug("Filtering %d total pools" % len(self.all_pools)) if not incompatible: pools = self.all_pools.values() else: pools = self.compatible_pools.values() log.debug("\tRemoved %d incompatible pools" % len(self.incompatible_pools)) sorter = require(CERT_SORTER) pool_filter = PoolFilter(self.backend.product_dir, self.backend.entitlement_dir, sorter) # Filter out products that are not installed if necessary: if uninstalled: prev_length = len(pools) pools = pool_filter.filter_out_uninstalled(pools) log.debug("\tRemoved %d pools for not installed products" % (prev_length - len(pools))) if overlapping: prev_length = len(pools) pools = pool_filter.filter_out_overlapping(pools) log.debug("\tRemoved %d pools overlapping existing entitlements" % (prev_length - len(pools))) # Filter by product name if necessary: if text: prev_length = len(pools) pools = pool_filter.filter_product_name(pools, text) log.debug("\tRemoved %d pools not matching the search string" % (prev_length - len(pools))) if subscribed: prev_length = len(pools) pools = pool_filter.filter_subscribed_pools( pools, self.subscribed_pool_ids, self.compatible_pools) log.debug("\tRemoved %d pools that we're already subscribed to" % (prev_length - len(pools))) log.debug("\t%d pools to display, %d filtered out" % (len(pools), len(self.all_pools) - len(pools))) return pools
def __init__(self): # Get widgets we'll need to access super(NetworkConfigDialog, self).__init__() self.org_timeout = socket.getdefaulttimeout() self.progress_bar = None self.cfg = rhsm.config.get_config_parser() self.cp_provider = inj.require(inj.CP_PROVIDER) # Need to load values before connecting signals because when the dialog # starts up it seems to trigger the signals which overwrites the config # with the blank values. self.set_initial_values() self.enableProxyButton.connect("toggled", self.enable_action) self.enableProxyAuthButton.connect("toggled", self.enable_action) self.enableProxyBypassButton.connect("toggled", self.enable_action) self.enableProxyButton.connect("toggled", self.clear_connection_label) self.enableProxyAuthButton.connect("toggled", self.clear_connection_label) self.enableProxyBypassButton.connect("toggled", self.clear_connection_label) self.enableProxyButton.connect("toggled", self.enable_or_disable_test_button) self.enableProxyAuthButton.connect("toggled", self.enable_or_disable_test_button) self.enableProxyBypassButton.connect( "toggled", self.enable_or_disable_test_button) self.proxyEntry.connect("changed", self.clear_connection_label) self.proxyUserEntry.connect("changed", self.clear_connection_label) self.proxyPasswordEntry.connect("changed", self.clear_connection_label) self.noProxyEntry.connect("changed", self.clear_connection_label) self.proxyEntry.connect("changed", self.enable_or_disable_test_button) self.proxyUserEntry.connect("changed", self.enable_or_disable_test_button) self.proxyPasswordEntry.connect("changed", self.enable_or_disable_test_button) self.noProxyEntry.connect("changed", self.enable_or_disable_test_button) self.proxyEntry.connect("focus-out-event", self.clean_proxy_entry) self.cancelButton.connect("clicked", self.on_cancel_clicked) self.saveButton.connect("clicked", self.on_save_clicked) self.testConnectionButton.connect("clicked", self.on_test_connection_clicked) self.networkConfigDialog.connect("delete-event", self.deleted)
def __init__(self, bus, keep_alive, force_signal, loop): name = dbus.service.BusName("com.redhat.SubscriptionManager", bus) dbus.service.Object.__init__(self, name, "/EntitlementStatus") self.has_run = False # this will get set after first invocation self.rhsm_icon_cache = require(RHSM_ICON_CACHE) self.keep_alive = keep_alive self.force_signal = force_signal self.loop = loop # Read process timeout from configuration file self.read_rhsm_config() # the callback function will try to terminate main loop after timout is reached ga_GObject.timeout_add_seconds(self.TIMEOUT, timeout_cb, loop, self)
def __init__(self, product_dir=None): if not product_dir: product_dir = inj.require(inj.PROD_DIR) self.installed = {} for prod_cert in product_dir.list(): prod = prod_cert.products[0] self.installed[prod.id] = {'productId': prod.id, 'productName': prod.name, 'version': prod.version, 'arch': ','.join(prod.architectures) }
def warnOrGiveUsageMessage(conduit): # XXX: Importing inline as you must be root to read the config file """ either output a warning, or a usage message """ msg = "" # TODO: refactor so there are not two checks for this if os.getuid() != 0: return if not ClassicCheck().is_registered_with_classic(): return try: identity = inj.require(inj.IDENTITY) ent_dir = inj.require(inj.ENT_DIR) # Don't warn people to register if we see entitelements, but no identity: if not identity.is_valid() and len(ent_dir.list_valid()) == 0: msg = not_registered_warning elif len(ent_dir.list_valid()) == 0: msg = no_subs_warning finally: if msg: conduit.info(2, msg)
def _warn_expired(): """ Display warning for expired entitlements """ ent_dir = inj.require(inj.ENT_DIR) products = set() for cert in ent_dir.list_expired(): for p in cert.products: m = ' - %s' % p.name products.add(m) if products: msg = expired_warning % '\n'.join(sorted(products)) logger.info(msg)
def refresh(self, active_on): """ Refresh the list of pools from the server, active on the given date. """ if active_on: self.sorter = ComplianceManager(active_on) else: self.sorter = require(CERT_SORTER) self.all_pools = {} self.compatible_pools = {} log.debug("Refreshing pools from server...") for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(), self.identity.uuid, active_on=active_on): self.compatible_pools[pool['id']] = pool self.all_pools[pool['id']] = pool # Filter the list of all pools, removing those we know are compatible. # Sadly this currently requires a second query to the server. self.incompatible_pools = {} for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(), self.identity.uuid, list_all=True, active_on=active_on): if not pool['id'] in self.compatible_pools: self.incompatible_pools[pool['id']] = pool self.all_pools[pool['id']] = pool self.subscribed_pool_ids = self._get_subscribed_pool_ids() # In the gui, cache all pool types so when we attach new ones # we can avoid more api calls require(POOLTYPE_CACHE).update_from_pools(self.all_pools) log.debug("found %s pools:" % len(self.all_pools)) log.debug(" %s compatible" % len(self.compatible_pools)) log.debug(" %s incompatible" % len(self.incompatible_pools)) log.debug(" %s already subscribed" % len(self.subscribed_pool_ids))
def get_status(self, on_date=None): sorter = inj.require(inj.CERT_SORTER, on_date) if self.identity.is_valid(): overall_status = sorter.get_system_status() reasons = sorter.reasons.get_name_message_map() valid = sorter.is_valid() return { 'status': overall_status, 'reasons': reasons, 'valid': valid } else: return {'status': 'Unknown', 'reasons': {}, 'valid': False}
def get_syspurpose_valid_fields(uep=None, identity=None): """ Try to get valid syspurpose fields provided by candlepin server :param uep: connection of candlepin server :param identity: current identity of registered system :return: dictionary with valid fields """ valid_fields = {} cache = inj.require(inj.SYSPURPOSE_VALID_FIELDS_CACHE) syspurpose_valid_fields = cache.read_data(uep, identity) if 'systemPurposeAttributes' in syspurpose_valid_fields: valid_fields = syspurpose_valid_fields['systemPurposeAttributes'] return valid_fields
def timeout(self): """ Compute timeout of cache. Computation of timeout is based on SRT (smoothed response time) of connection to candlepin server. This algorithm is inspired by retransmission timeout used by TCP connection (see: RFC 793) """ uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp() if uep.conn.smoothed_rt is not None: smoothed_rt = uep.conn.smoothed_rt else: smoothed_rt = 0.0 return min(self.UBOUND, max(self.LBOUND, self.BETA * smoothed_rt))
def __init__(self, backend, facts=None, parent=None, callbacks=None): """ Callbacks will be executed when registration status changes. """ widgets.GladeWidget.__init__(self, "registration.glade") self.backend = backend self.identity = require(IDENTITY) self.facts = facts self.parent = parent self.callbacks = callbacks or [] self.async = AsyncBackend(self.backend) dic = {"on_register_cancel_button_clicked": self.cancel, "on_register_button_clicked": self._on_register_button_clicked, "hide": self.cancel, "on_register_dialog_delete_event": self._delete_event, } self.glade.signal_autoconnect(dic) self.window = self.register_dialog self.register_dialog.set_transient_for(self.parent) screen_classes = [ChooseServerScreen, ActivationKeyScreen, CredentialsScreen, OrganizationScreen, EnvironmentScreen, PerformRegisterScreen, SelectSLAScreen, ConfirmSubscriptionsScreen, PerformSubscribeScreen, RefreshSubscriptionsScreen] self._screens = [] for screen_class in screen_classes: screen = screen_class(self, self.backend) self._screens.append(screen) if screen.needs_gui: screen.index = self.register_notebook.append_page( screen.container) self._current_screen = CHOOSE_SERVER_PAGE # values that will be set by the screens self.username = None self.consumername = None self.activation_keys = None self.owner_key = None self.environment = None self.current_sla = None self.dry_run_result = None self.skip_auto_bind = False # XXX needed by firstboot self.password = None
def __init__(self, backend, tab_icon, parent, ent_dir, prod_dir): # The row striping in this TreeView is handled automatically # because we have the rules_hint set to True in the Glade file. super(InstalledProductsTab, self).__init__() self.tab_icon = tab_icon self.identity = inj.require(inj.IDENTITY) self.entitlement_dir = ent_dir self.backend = backend # Product column text_renderer = ga_Gtk.CellRendererText() image_renderer = ga_Gtk.CellRendererPixbuf() column = ga_Gtk.TreeViewColumn(_('Product')) column.set_expand(True) column.pack_start(image_renderer, False) column.pack_start(text_renderer, False) column.add_attribute(image_renderer, 'pixbuf', self.store['image']) column.add_attribute(text_renderer, 'text', self.store['product']) self.top_view.append_column(column) cols = [] cols.append((column, 'text', 'product')) column = self.add_text_column(_('Version'), 'version') cols.append((column, 'text', 'version')) column = self.add_text_column(_('Status'), 'status') cols.append((column, 'text', 'status')) column = self.add_date_column(_('Start Date'), 'start_date') cols.append((column, 'date', 'start_date')) column = self.add_date_column(_('End Date'), 'expiration_date') cols.append((column, 'date', 'expiration_date')) self.set_sorts(self.store, cols) if is_owner_using_golden_ticket(): self.update_certificates_button.set_property("visible", False) self.connect_signals({ "on_update_certificates_button_clicked": parent._update_certificates_button_clicked, "on_register_button_clicked": parent._register_item_clicked, }) self._entries = []
def get_supported_resources(uep=None, identity=None): """ This function tries to get list of supported resources. It uses cache file. It is preferred to use this function instead of connection.get_supported_resources :param uep: connection of candlepin server :param identity: current identity of registered system :return: list fo supported resources """ supported_resources = [] if identity is None: identity = inj.require(inj.IDENTITY) # When identity is not known, then system is not registered if identity.uuid is None: return supported_resources # Try to read supported resources from cache file cache = inj.require(inj.SUPPORTED_RESOURCES_CACHE) data = cache.read_cache_only() if data is not None: if identity.uuid in data: supported_resources = data[identity.uuid] # When valid data are not in cache, then try to load it from candlepin server if len(supported_resources) == 0: if uep is None: cp_provider = inj.require(inj.CP_PROVIDER) uep = cp_provider.get_consumer_auth_cp() supported_resources = uep.get_supported_resources() # Write data to cache data = {identity.uuid: supported_resources} cache.supported_resources = data cache.write_cache(debug=False) return supported_resources
def check_status(force_signal): """ When pre_check_status does not return anything, then status is retrieved from candlepin server using ComplianceManager :param force_signal: When force_signal is not None, then this signal will be returned :return: Some signal """ pre_result = pre_check_status(force_signal) if pre_result is not None: return pre_result sorter = require(CERT_SORTER) return sorter.get_status_for_icon()
def test_mem_cache_pre_cached(self): inj.provide(inj.RELEASE_STATUS_CACHE, Mock()) release_mock = inj.require(inj.RELEASE_STATUS_CACHE) release = "MockServer" mock_release = {'releaseVer': release} release_mock.read_status = Mock(return_value=mock_release) release_source = YumReleaseverSource() cached_release = "CachedMockServer" release_source._expansion = cached_release exp = release_source.get_expansion() self.assertEqual(exp, cached_release) self.assertEqual(release_source._expansion, cached_release)
def test_sync_no_syspurpose_file(self, mock_read_sp, mock_cache, mock_merge, mock_write): """ Ensure that sync updates the cache with the result of a three-way-merge with the values from the server, the values from the local file and the cache as the base. :return: """ self._inject_mock_valid_consumer() # We want the cache instance not the class from which it is created mock_cache = mock_cache.return_value mock_cache.read_cache_only.return_value = self.base self.stub_cp_provider.consumer_auth_cp._capabilities.append('syspurpose') # We shouldn't expect that there are other values than those that are for syspurpose # although the real return value would include many more attributes self.stub_cp_provider.consumer_auth_cp.registered_consumer_info = self.remote_sp mock_read_sp.side_effect = OSError # To illustrate the effect of a three way merge in this case, only local changed the role. expected = { "role": self.remote_sp["role"], "addons": self.remote_sp["addOns"], "service_level_agreement": self.remote_sp["serviceLevel"], "usage": self.remote_sp["usage"], } mock_merge.return_value = expected with mock.patch.object(self.stub_cp_provider.consumer_auth_cp, 'updateConsumer') as update: result = self.command.sync() mock_cache.read_cache_only.assert_not_called() mock_cache.write_cache.assert_called_once() mock_merge.assert_not_called() # The return value of sync should be the return value of the three_way_merge self.assert_equal_dict(result, expected) # The value of the syspurpose attribute is written to the cache on write_cache. # So if these two are the same then the cache will have been updated with the new result. self.assert_equal_dict(mock_cache.syspurpose, expected) mock_write.assert_called_once_with(expected) ident = inj.require(inj.IDENTITY) update.assert_called_once_with(ident.uuid, role=result[ROLE], addons=result[ADDONS], service_level=result[SERVICE_LEVEL], usage=result[USAGE])
def clean_all_data(backup=True): consumer_dir = cfg.get('rhsm', 'consumerCertDir') if backup: if consumer_dir[-1] == "/": consumer_dir_backup = consumer_dir[0:-1] + ".old" else: consumer_dir_backup = consumer_dir + ".old" # Delete backup dir if it exists: shutil.rmtree(consumer_dir_backup, ignore_errors=True) # Copy current consumer dir: log.debug("Backing up %s to %s.", consumer_dir, consumer_dir_backup) shutil.copytree(consumer_dir, consumer_dir_backup) # FIXME FIXME # Delete current consumer certs: for path in [ConsumerIdentity.keypath(), ConsumerIdentity.certpath()]: if (os.path.exists(path)): log.debug("Removing identity cert: %s" % path) os.remove(path) require(IDENTITY).reload() # Delete all entitlement certs rather than the directory itself: ent_cert_dir = cfg.get('rhsm', 'entitlementCertDir') if os.path.exists(ent_cert_dir): for f in glob.glob("%s/*.pem" % ent_cert_dir): certpath = os.path.join(ent_cert_dir, f) log.debug("Removing entitlement cert: %s" % f) os.remove(certpath) else: log.warn("Entitlement cert directory does not exist: %s" % ent_cert_dir) cache.ProfileManager.delete_cache() cache.InstalledProductsManager.delete_cache() Facts.delete_cache() # Must also delete in-memory cache require(ENTITLEMENT_STATUS_CACHE).delete_cache() require(PROD_STATUS_CACHE).delete_cache() require(OVERRIDE_STATUS_CACHE).delete_cache() RepoActionInvoker.delete_repo_file() log.info("Cleaned local data")
def initialize(self): NormalSpoke.initialize(self) self._done = False init_dep_injection() facts = inj.require(inj.FACTS) backend = managergui.Backend() self.info = registergui.RegisterInfo() self.info.connect('notify::register-status', self._on_register_status_change) self._status = self.info.get_property('register-status') self.register_widget = registergui.RegisterWidget( backend, facts, reg_info=self.info, parent_window=self.main_window) self.register_box = self.builder.get_object("register_box") self.button_box = self.builder.get_object('navigation_button_box') self.proceed_button = self.builder.get_object('proceed_button') self.back_button = self.builder.get_object('back_button') self.register_box.pack_start(self.register_widget.register_widget, True, True, 0) # Hook up the nav buttons in the gui # TODO: add a 'start over'? self.proceed_button.connect('clicked', self._on_register_button_clicked) self.back_button.connect('clicked', self._on_back_button_clicked) # initial-setup will likely self.register_widget.connect('finished', self._on_finished) self.register_widget.connect('register-finished', self._on_register_finished) self.register_widget.connect('register-error', self._on_register_error) self.register_widget.connect('register-message', self._on_register_message) # update the 'next/register button on page change' self.register_widget.connect('notify::register-button-label', self._on_register_button_label_change) self.register_widget.connect('notify::screen-ready', self._on_register_screen_ready_change) self.register_box.show_all() self.register_widget.initialize() self.back_button.set_sensitive(False)
def __init__(self): self.identity = require(IDENTITY) self.sorter = None # Pools which passed rules server side for this consumer: self.compatible_pools = {} # Pools which failed a rule check server side: self.incompatible_pools = {} # Pools for which we already have an entitlement: self.subscribed_pool_ids = [] # All pools: self.all_pools = {}