def management_list_channels(self, dict): log_debug(1) self._get_and_validate_session(dict) return map( lambda x: x['label'], rhnSQL.fetchall_dict(self._query_list_config_channels, org_id=self.org_id) or [])
def listChannelForOrg(self, orgId, username, password): log_debug(3) self._auth(username, password) # Red Hat only: if not orgId: orgId = 1 ret = rhnSQL.fetchall_dict("""select label from rhnChannel where org_id = :orgId""", orgId=int(orgId)) or [] return map(lambda x: x['label'], ret)
def management_list_file_revisions(self, dict): log_debug(1) self._get_and_validate_session(dict) config_channel = dict.get('config_channel') # XXX Validate the namespace path = dict.get('path') retval = [x['revision'] for x in rhnSQL.fetchall_dict(self._query_list_file_revisions, org_id=self.org_id, config_channel=config_channel, path=path) or []] if not retval: raise rhnFault(4011, "File %s does not exist in channel %s" % (path, config_channel), explain=0) return retval
def management_list_file_revisions(self, dict): log_debug(1) self._get_and_validate_session(dict) config_channel = dict.get('config_channel') # XXX Validate the namespace path = dict.get('path') retval = map(lambda x: x['revision'], rhnSQL.fetchall_dict(self._query_list_file_revisions, org_id=self.org_id, config_channel=config_channel, path=path) or []) if not retval: raise rhnFault(4011, "File %s does not exist in channel %s" % (path, config_channel), explain=0) return retval
def management_list_file_revisions(self, dict): log_debug(1) self._get_and_validate_session(dict) config_channel = dict.get("config_channel") # XXX Validate the namespace path = dict.get("path") retval = map( lambda x: x["revision"], rhnSQL.fetchall_dict( self._query_list_file_revisions, org_id=self.org_id, config_channel=config_channel, path=path ) or [], ) if not retval: raise rhnFault(4011, "File %s does not exist in channel %s" % (path, config_channel), explain=0) return retval
def sync(self, update_repodata=True): """Trigger a reposync""" failed_packages = 0 sync_error = 0 if not self.urls: sync_error = -1 start_time = datetime.now() for (repo_id, url, repo_label) in self.urls: log(0, "Repo URL: %s" % url) plugin = None # If the repository uses a uln:// URL, switch to the ULN plugin, overriding the command-line if url.startswith("uln://"): self.repo_plugin = self.load_plugin("uln") # pylint: disable=W0703 try: if repo_label: repo_name = repo_label else: # use modified relative_url as name of repo plugin, because # it used as name of cache directory as well relative_url = '_'.join(url.split('://')[1].split('/')[1:]) repo_name = relative_url.replace("?", "_").replace( "&", "_").replace("=", "_") plugin = self.repo_plugin(url, repo_name, org=str(self.org_id or ''), channel_label=self.channel_label) if update_repodata: plugin.clear_cache() if repo_id is not None: keys = rhnSQL.fetchall_dict(""" select k1.key as ca_cert, k2.key as client_cert, k3.key as client_key from rhncontentsource cs inner join rhncontentsourcessl csssl on cs.id = csssl.content_source_id inner join rhncryptokey k1 on csssl.ssl_ca_cert_id = k1.id left outer join rhncryptokey k2 on csssl.ssl_client_cert_id = k2.id left outer join rhncryptokey k3 on csssl.ssl_client_key_id = k3.id where cs.id = :repo_id """, repo_id=int(repo_id)) if keys: ssl_set = get_single_ssl_set( keys, check_dates=self.check_ssl_dates) if ssl_set: plugin.set_ssl_options(ssl_set['ca_cert'], ssl_set['client_cert'], ssl_set['client_key']) else: raise ValueError( "No valid SSL certificates were found for repository." ) if not self.no_packages: ret = self.import_packages(plugin, repo_id, url) failed_packages += ret self.import_groups(plugin, url) if not self.no_errata: self.import_updates(plugin, url) # only for repos obtained from the DB if self.sync_kickstart and repo_label: try: self.import_kickstart(plugin, repo_label) except: rhnSQL.rollback() raise except Exception: e = sys.exc_info()[1] log2(0, 0, "ERROR: %s" % e, stream=sys.stderr) log2disk(0, "ERROR: %s" % e) # pylint: disable=W0104 sync_error = -1 if plugin is not None: plugin.clear_ssl_cache() # Update cache with package checksums rhnCache.set(checksum_cache_filename, self.checksum_cache) if self.regen: taskomatic.add_to_repodata_queue_for_channel_package_subscription( [self.channel_label], [], "server.app.yumreposync") taskomatic.add_to_erratacache_queue(self.channel_label) self.update_date() rhnSQL.commit() # update permissions fileutils.createPath(os.path.join( CFG.MOUNT_POINT, 'rhn')) # if the directory exists update ownership only for root, dirs, files in os.walk(os.path.join(CFG.MOUNT_POINT, 'rhn')): for d in dirs: fileutils.setPermsPath(os.path.join(root, d), group='apache') for f in files: fileutils.setPermsPath(os.path.join(root, f), group='apache') elapsed_time = datetime.now() - start_time log( 0, "Sync of channel completed in %s." % str(elapsed_time).split('.')[0]) # if there is no global problems, but some packages weren't synced if sync_error == 0 and failed_packages > 0: sync_error = failed_packages return elapsed_time, sync_error
def management_list_channels(self, dict): log_debug(1) self._get_and_validate_session(dict) return [x['label'] for x in rhnSQL.fetchall_dict(self._query_list_config_channels, org_id=self.org_id) or []]
def fetch_token(token_string): """ Fetches a token from the database """ log_debug(3, token_string) # A token should always be passed to this function assert token_string tokens = token_string.split(',') h = rhnSQL.prepare(_query_token) result = [] rereg_token_found = 0 num_of_rereg = 0 # Global user_id and org_id user_id = None same_user_id = 1 org_id = None ks_session_id_token = None deploy_configs = None entitlements_base = {} entitlements_extra = {} # List of re-registration entitlements labels (if found): rereg_ents = [] # 0 = default, 1 = ssh, 2 = ssh-tunnel server_contact_method = 0 cm_ranking = {} ranks = rhnSQL.fetchall_dict(_query_contact_method_ranking) for r in ranks: cm_ranking[r['id']] = r['rank'] for token in tokens: h.execute(token=token) token_entry, token_entitlements = _fetch_token_from_cursor(h) if not token_entry: # Unable to find the token log_error("Invalid token '%s'" % token) raise rhnFault(60, _("Could not find token '%s'") % token, explain=0) row = token_entry if row.get('server_id'): rereg_token_found = row num_of_rereg += 1 # Store the re-reg ents: for tup in list(token_entitlements.keys()): rereg_ents.append(tup[0]) # Check user_id token_user_id = row.get('user_id') # 4/27/05 wregglej - Commented this line out 'cause the token_user_id should # be allowed to be None. This line was causing problems when registering with # an activation key whose creator had been deleted. #assert(token_user_id is not None) if same_user_id and user_id is not None and user_id != token_user_id: log_debug(4, "Different user ids: %s, %s" % (same_user_id, user_id)) # This token has a different user id than the rest same_user_id = 0 else: user_id = token_user_id # Check org_id token_org_id = row.get('org_id') assert (token_org_id is not None) if org_id is not None and org_id != token_org_id: # Cannot use activation keys from different orgs raise rhnFault(63, _("Tokens from mismatching orgs"), explain=0) org_id = token_org_id # Check kickstart session ids token_ks_session_id = row.get('kickstart_session_id') if token_ks_session_id is not None: if ks_session_id_token is not None: ks_session_id = ks_session_id_token['kickstart_session_id'] if ks_session_id != token_ks_session_id: # Two tokens with different kickstart sessions raise rhnFault(63, _("Kickstart session mismatch"), explain=0) else: # This token has kickstart session id info ks_session_id_token = row # Iterate through the entitlements from this token # and intead of picking one entitlement, create a union of # all the entitlemts as a list of tuples of (name, label) aka # (token_type, token_desc) _categorize_token_entitlements(token_entitlements, entitlements_base, entitlements_extra) # Deploy configs? deploy_configs = deploy_configs or (row['deploy_configs'] == 'Y') # which contact method? if cm_ranking[ row['contact_method_id']] > cm_ranking[server_contact_method]: server_contact_method = row['contact_method_id'] result.append(row) # One should not stack re-activation tokens if num_of_rereg > 1: raise rhnFault( 63, _("Stacking of re-registration tokens is not supported"), explain=0) entitlements_remove = [] _validate_entitlements(token_string, rereg_ents, entitlements_base, entitlements_extra, entitlements_remove) log_debug(5, "entitlements_base = %s" % entitlements_base) log_debug(5, "entitlements_extra = %s" % entitlements_extra) if ks_session_id_token: ks_session_id = ks_session_id_token['kickstart_session_id'] else: ks_session_id = None # akl add entitles array constructed above to kwargs kwargs = { 'user_id': user_id, 'org_id': org_id, 'kickstart_session_id': ks_session_id, 'entitlements': list(entitlements_base.keys()) + list(entitlements_extra.keys()), 'deploy_configs': deploy_configs, 'contact_method_id': server_contact_method, } log_debug(4, "Values", kwargs) if rereg_token_found and len(result) > 1: log_debug(4, "re-activation stacked with activationkeys") kwargs['remove_entitlements'] = entitlements_remove return ReRegistrationActivationToken(result, **kwargs) elif rereg_token_found: log_debug(4, "simple re-activation") return ReRegistrationToken([rereg_token_found], **kwargs) return ActivationTokens(result, **kwargs)
def listChannel(self, username, password): log_debug(3) self._auth(username, password) ret = rhnSQL.fetchall_dict("select label from rhnChannel") or [] return map(lambda x: x['label'], ret)
def management_list_channels(self, dict): log_debug(1) self._get_and_validate_session() return map(lambda x: x['label'], rhnSQL.fetchall_dict(self._query_list_config_channels, org_id=self.org_id) or [])
def fetch_token(token_string): """ Fetches a token from the database """ log_debug(3, token_string) # A token should always be passed to this function assert token_string tokens = token_string.split(',') h = rhnSQL.prepare(_query_token) result = [] rereg_token_found = 0 num_of_rereg = 0 # Global user_id and org_id user_id = None same_user_id = 1 org_id = None ks_session_id_token = None deploy_configs = None entitlements_base = {} entitlements_extra = {} # List of re-registration entitlements labels (if found): rereg_ents = [] # 0 = default, 1 = ssh, 2 = ssh-tunnel server_contact_method = 0; cm_ranking = {} ranks = rhnSQL.fetchall_dict(_query_contact_method_ranking) for r in ranks: cm_ranking[r['id']] = r['rank'] for token in tokens: h.execute(token=token) token_entry, token_entitlements = _fetch_token_from_cursor(h) if not token_entry: # Unable to find the token log_error("Invalid token '%s'" % token) raise rhnFault(60, _("Could not find token '%s'") % token, explain=0) row = token_entry if row.get('server_id'): rereg_token_found = row num_of_rereg += 1 # Store the re-reg ents: for tup in token_entitlements.keys(): rereg_ents.append(tup[0]) # Check user_id token_user_id = row.get('user_id') # 4/27/05 wregglej - Commented this line out 'cause the token_user_id should # be allowed to be None. This line was causing problems when registering with # an activation key whose creator had been deleted. #assert(token_user_id is not None) if same_user_id and user_id is not None and user_id != token_user_id: log_debug(4, "Different user ids: %s, %s" % (same_user_id, user_id)) # This token has a different user id than the rest same_user_id = 0 else: user_id = token_user_id # Check org_id token_org_id = row.get('org_id') assert(token_org_id is not None) if org_id is not None and org_id != token_org_id: # Cannot use activation keys from different orgs raise rhnFault(63, _("Tokens from mismatching orgs"), explain=0) org_id = token_org_id # Check kickstart session ids token_ks_session_id = row.get('kickstart_session_id') if token_ks_session_id is not None: if ks_session_id_token is not None: ks_session_id = ks_session_id_token['kickstart_session_id'] if ks_session_id != token_ks_session_id: # Two tokens with different kickstart sessions raise rhnFault(63, _("Kickstart session mismatch"), explain=0) else: # This token has kickstart session id info ks_session_id_token = row # Iterate through the entitlements from this token # and intead of picking one entitlement, create a union of # all the entitlemts as a list of tuples of (name, label) aka # (token_type, token_desc) _categorize_token_entitlements(token_entitlements, entitlements_base, entitlements_extra) # Deploy configs? deploy_configs = deploy_configs or (row['deploy_configs'] == 'Y') # which contact method? if cm_ranking[row['contact_method_id']] > cm_ranking[server_contact_method]: server_contact_method = row['contact_method_id'] result.append(row) # One should not stack re-activation tokens if num_of_rereg > 1: raise rhnFault(63, _("Stacking of re-registration tokens is not supported"), explain=0) entitlements_remove = [] _validate_entitlements(token_string, rereg_ents, entitlements_base, entitlements_extra, entitlements_remove) log_debug(5, "entitlements_base = %s" % entitlements_base) log_debug(5, "entitlements_extra = %s" % entitlements_extra) if ks_session_id_token: ks_session_id = ks_session_id_token['kickstart_session_id'] else: ks_session_id = None # akl add entitles array constructed above to kwargs kwargs = { 'user_id': user_id, 'org_id': org_id, 'kickstart_session_id': ks_session_id, 'entitlements': list(entitlements_base.keys()) + list(entitlements_extra.keys()), 'deploy_configs': deploy_configs, 'contact_method_id': server_contact_method, } log_debug(4, "Values", kwargs) if rereg_token_found and len(result) > 1: log_debug(4, "re-activation stacked with activationkeys") kwargs['remove_entitlements'] = entitlements_remove return ReRegistrationActivationToken(result, **kwargs) elif rereg_token_found: log_debug(4, "simple re-activation") return ReRegistrationToken([rereg_token_found], **kwargs) return ActivationTokens(result, **kwargs)