def enableSatelliteRepo(rhn_cert): args = ['rpm', '-q', '--qf', '\'%{version} %{arch}\'', '-f', '/etc/redhat-release'] ret, out, err = fileutils.rhn_popen(args) data = out.read().strip("'") version, arch = data.split() # Read from stdout, strip quotes if any and extract first number version = re.search(r'\d+', version).group() if version not in SUPPORTED_RHEL_VERSIONS: log(0, "WARNING: No Satellite repository available for RHEL version: %s." % version) return arch_str = "server" if arch == "s390x": arch_str = "system-z" sat_cert = satellite_cert.SatelliteCert() sat_cert.load(rhn_cert) sat_version = getattr(sat_cert, 'satellite-version') repo = "rhel-%s-%s-satellite-%s-rpms" % (version, arch_str, sat_version) args = ['/usr/bin/subscription-manager', 'repos', '--enable', repo] ret, out, err = fileutils.rhn_popen(args) if ret: msg_ = "Enabling of Satellite repository failed." msg = ("%s\nReturn value: %s\nStandard-out: %s\n\n" "Standard-error: %s\n" % (msg_, ret, out.read(), err.read())) writeError(msg) raise EnableSatelliteRepositoryException("Enabling of Satellite repository failed. Make sure Satellite " "subscription is attached to this system, both versions of RHEL and " "Satellite are supported or run activation with --disconnected " "option.")
def assign_repositories_to_channel(self, channel_label, delete_repos=None, add_repos=None): backend = SQLBackend() self.unlink_all_repos(channel_label, custom_only=True) repos = self.list_associated_repos(channel_label) changed = 0 if delete_repos: for to_delete in delete_repos: if to_delete in repos: repos.remove(to_delete) log(0, "Removing repository '%s' from channel." % to_delete) changed += 1 else: log2(0, 0, "WARNING: Repository '%s' is not attached to channel." % to_delete, stream=sys.stderr) if add_repos: for to_add in add_repos: if to_add not in repos: repos.append(to_add) log(0, "Attaching repository '%s' to channel." % to_add) changed += 1 else: log2(0, 0, "WARNING: Repository '%s' is already attached to channel." % to_add, stream=sys.stderr) # If there are any repositories intended to be attached to channel if repos: content_sources_batch = self.get_content_sources_import_batch( channel_label, backend, repos=sorted(repos)) for content_source in content_sources_batch: content_source['channels'] = [channel_label] importer = ContentSourcesImport(content_sources_batch, backend) importer.run() else: # Make sure everything is unlinked self.unlink_all_repos(channel_label) return changed
def sync(self, channels=None): # If no channels specified, sync already synced channels if not channels: channels = self.synced_channels # Check channel availability before doing anything not_available = [] for channel in channels: if any(channel not in d for d in [self.channel_metadata, self.channel_to_family]) or ( not self.cdn_repository_manager.check_channel_availability(channel, self.no_kickstarts)): not_available.append(channel) if not_available: raise ChannelNotFoundError(" " + "\n ".join(not_available)) # Need to update channel metadata self._update_channels_metadata(channels) # Finally, sync channel content error_messages = [] total_time = datetime.timedelta() for channel in channels: cur_time, ret_code = self._sync_channel(channel) if ret_code != 0: error_messages.append("Problems occurred during syncing channel %s. Please check " "/var/log/rhn/cdnsync/%s.log for the details\n" % (channel, channel)) total_time += cur_time # Switch back to cdnsync log rhnLog.initLOG(self.log_path, self.log_level) log2disk(0, "Sync of channel completed.") log(0, "Total time: %s" % str(total_time).split('.')[0]) return error_messages
def run(self): size = self.queue.qsize() if size <= 0: return log(1, "Downloading %s files." % str(size)) started_threads = [] for _ in range(self.threads): thread = DownloadThread(self) thread.setDaemon(True) thread.start() started_threads.append(thread) # wait to finish try: while any(t.isAlive() for t in started_threads): time.sleep(1) except KeyboardInterrupt: e = sys.exc_info()[1] self.fail_download(e) while any(t.isAlive() for t in started_threads): time.sleep(1) # raise first detected exception if any if self.exception: raise self.exception # pylint: disable=E0702
def count_packages(self): start_time = int(time.time()) backend = SQLBackend() base_channels = self._list_available_channels() repo_list = [] for base_channel in sorted(base_channels): for channel in sorted(base_channels[base_channel] + [base_channel]): repo_list.extend(self._get_content_sources(channel, backend)) log(0, "Number of repositories: %d" % len(repo_list)) already_downloaded = 0 print_progress_bar(already_downloaded, len(repo_list), prefix='Downloading repodata:', suffix='Complete', bar_length=50) for base_channel in sorted(base_channels): for channel in sorted(base_channels[base_channel] + [base_channel]): family_label = self.channel_to_family[channel] keys = self._get_family_keys(family_label) sources = self._get_content_sources(channel, backend) list_packages = [] for source in sources: list_packages.extend( self._count_packages_in_repo(source['source_url'], keys)) already_downloaded += 1 print_progress_bar(already_downloaded, len(repo_list), prefix='Downloading repodata:', suffix='Complete', bar_length=50) cdn_repodata_path = constants.CDN_REPODATA_ROOT + '/' + channel # create directory for repo data if it doesn't exist try: os.makedirs(cdn_repodata_path) except OSError: exc = sys.exc_info()[1] if exc.errno == errno.EEXIST and os.path.isdir( cdn_repodata_path): pass else: raise f_out = open(cdn_repodata_path + '/' + "packages_num", 'w') try: f_out.write(str(len(set(list_packages)))) finally: if f_out is not None: f_out.close() elapsed_time = int(time.time()) log(0, "Elapsed time: %d seconds" % (elapsed_time - start_time))
def _print_unmapped_channels(self): unmapped_channels = [ch for ch in self.synced_channels if not self.synced_channels[ch] and ch not in self.channel_metadata] if unmapped_channels: log(0, "Previously synced channels not available to update from CDN:") for channel in sorted(unmapped_channels): log(0, " p %s" % channel)
def get_crypto_keys(self, check_dates=False): ssl_query = rhnSQL.prepare(""" select description, key from rhnCryptoKey where id = :id """) keys = {} ssl_query.execute(id=self.ca_cert) row = ssl_query.fetchone_dict() keys['ca_cert'] = (str(row['description']), str(row['key'])) ssl_query.execute(id=self.client_cert) row = ssl_query.fetchone_dict() keys['client_cert'] = (str(row['description']), str(row['key'])) ssl_query.execute(id=self.client_key) row = ssl_query.fetchone_dict() keys['client_key'] = (str(row['description']), str(row['key'])) # Check if SSL certificates are usable if check_dates: failed = 0 for key in (keys['ca_cert'], keys['client_cert']): if not verify_certificate_dates(key[1]): log( 1, "WARNING: Problem with dates in certificate '%s'. " "Please check validity of this certificate." % key[0]) failed += 1 if failed: return {} return keys
def sync(self, channels=None): # If no channels specified, sync already synced channels if not channels: channels = self.synced_channels # Check channel availability before doing anything not_available = [] for channel in channels: if any(channel not in d for d in [self.channel_metadata, self.channel_to_family, self.content_source_mapping]): not_available.append(channel) if not_available: raise ChannelNotFoundError(" " + "\n ".join(not_available)) # Need to update channel metadata self._update_channels_metadata(channels) # Finally, sync channel content total_time = datetime.timedelta() for channel in channels: cur_time = self._sync_channel(channel) total_time += cur_time # Switch back to cdnsync log rhnLog.initLOG(self.log_path, self.log_level) log2disk(0, "Sync of channel completed.") log(0, "Total time: %s" % str(total_time).split('.')[0])
def deactivate(): """Function to remove certificates and manifest repositories from DB""" rhnSQL.initDB() log(0, "Removing certificates...") Activation._remove_certificates() log(0, "Removing manifest repositories...") Activation._remove_repositories()
def _get_content_sources(self, channel, backend): batch = [] sources = [] type_id = backend.lookupContentSourceType('yum') if channel in self.content_source_mapping: sources.extend(self.content_source_mapping[channel]) for source in sources: if not source['pulp_content_category'] == "source": content_source = ContentSource() content_source['label'] = source['pulp_repo_label_v2'] content_source['source_url'] = CFG.CDN_ROOT + source['relative_url'] content_source['org_id'] = None content_source['type_id'] = type_id batch.append(content_source) if channel in self.kickstart_metadata: for tree in self.kickstart_metadata[channel]: tree_label = tree['ks_tree_label'] if tree_label in self.kickstart_source_mapping: sources = self.kickstart_source_mapping[tree_label] # One tree comes from one repo, one repo for each tree is in the mapping, # in future there may be multiple repos for one tree and we will need to select # correct repo source = sources[0] content_source = ContentSource() content_source['label'] = tree_label content_source['source_url'] = CFG.CDN_ROOT + source['relative_url'] content_source['org_id'] = None content_source['type_id'] = type_id batch.append(content_source) else: log(1, "WARN: Kickstart tree not available: %s" % tree_label) return batch
def _list_available_channels(self): # Select from rhnContentSsl to filter cdn-activated channel families h = rhnSQL.prepare(""" select label from rhnChannelFamilyPermissions cfp inner join rhnChannelFamily cf on cfp.channel_family_id = cf.id inner join rhnContentSsl cs on cf.id = cs.channel_family_id where cf.org_id is null """) h.execute() families = h.fetchall_dict() or [] # collect all channel from available families all_channels = [] base_channels = {} for family in families: label = family['label'] family = self.families[label] channels = [c for c in family['channels'] if c is not None] all_channels.extend(channels) # fill base_channel for channel in all_channels: try: # Only base channels as key in dictionary if self.channel_metadata[channel]['parent_channel'] is None: base_channels[channel] = [k for k in all_channels if self.channel_metadata[k]['parent_channel'] == channel] except KeyError: log(1, "Channel %s not found in channel metadata" % channel) continue return base_channels
def clear_cache(): # Clear packages outside channels from DB and disk contentRemove.delete_outside_channels(None) if os.path.isdir(constants.PACKAGE_STAGE_DIRECTORY): log(0, "Cleaning package stage directory.") for pkg in os.listdir(constants.PACKAGE_STAGE_DIRECTORY): os.unlink(os.path.join(constants.PACKAGE_STAGE_DIRECTORY, pkg))
def run(self): size = 0 for queue in self.queues.values(): size += queue.qsize() if size <= 0: return log(1, "Downloading total %d files from %d queues." % (size, len(self.queues))) for index, queue in enumerate(self.queues.values()): log(2, "Downloading %d files from queue #%d." % (queue.qsize(), index)) self.first_in_queue_done = False started_threads = [] for _ in range(self.threads): thread = DownloadThread(self, queue) thread.setDaemon(True) thread.start() started_threads.append(thread) # wait to finish try: while any(t.isAlive() for t in started_threads): time.sleep(1) except KeyboardInterrupt: e = sys.exc_info()[1] self.fail_download(e) while any(t.isAlive() for t in started_threads): time.sleep(1) break # raise first detected exception if any if self.exception: raise self.exception # pylint: disable=E0702
def __init__(self, local_mount_point=None, client_cert_id=None): rhnSQL.initDB() self.local_mount_point = local_mount_point self.repository_tree = CdnRepositoryTree() self._populate_repository_tree(client_cert_id=client_cert_id) f = None try: try: # Channel to repositories mapping f = open(constants.CONTENT_SOURCE_MAPPING_PATH, 'r') self.content_source_mapping = json.load(f) f.close() # Channel to kickstart repositories mapping f = open(constants.KICKSTART_SOURCE_MAPPING_PATH, 'r') self.kickstart_source_mapping = json.load(f) f.close() # Kickstart metadata f = open(constants.KICKSTART_DEFINITIONS_PATH, 'r') self.kickstart_metadata = json.load(f) f.close() except IOError: e = sys.exc_info()[1] log(1, "Ignoring channel mappings: %s" % e) self.content_source_mapping = {} self.kickstart_source_mapping = {} self.kickstart_metadata = {} finally: if f is not None: f.close() self.__init_repository_to_channels_mapping()
def __init__(self, current_manifest=None, username=None, password=None, http_proxy=None, http_proxy_username=None, http_proxy_password=None): self.base_url = current_manifest.get_api_url() if CFG.CANDLEPIN_SERVER_API: log(0, "Overriding Candlepin server to: '%s'" % CFG.CANDLEPIN_SERVER_API) self.base_url = CFG.CANDLEPIN_SERVER_API if self.base_url.startswith('https'): self.protocol = 'https' elif self.base_url.startswith('http'): self.protocol = 'http' else: raise ValueError("Invalid protocol in URL: '%s'" % self.base_url) if not self.base_url.endswith('/'): self.base_url += '/' self.current_manifest = current_manifest # Authentication with upstream consumer certificate or with username and password if self.current_manifest and self.protocol == 'https' and not username: self.username = self.password = None else: log(0, "Candlepin login:") self.username, self.password = getUsernamePassword(username, password) self.http_proxy = http_proxy self.http_proxy_username = http_proxy_username self.http_proxy_password = http_proxy_password
def _rpc_call(self, function_name, params): get_server_obj = self.login() # Try a couple of times fault_count = 0 expired_token = 0 cfg = config.initUp2dateConfig() while fault_count - expired_token < cfg['networkRetries']: try: ret = getattr(get_server_obj, function_name)(*params) except rpclib.xmlrpclib.ProtocolError: e = sys.exc_info()[1] # We have two codes to check: the HTTP error code, and the # combination (failtCode, faultString) encoded in the headers # of the request. http_error_code = e.errcode fault_code, fault_string = rpclib.reportError(e.headers) fault_count += 1 if http_error_code == 401 and fault_code == -34: # Login token expired get_server_obj = self.login(force=1) # allow exactly one respin for expired token expired_token = 1 continue if http_error_code == 404 and fault_code == -17: # File not found self.extinctErrorYN = 1 return None log( -1, 'ERROR: http error code :%s; fault code: %s; %s' % (http_error_code, fault_code, fault_string)) # XXX raise else: return ret raise Exception("Failed after multiple attempts!")
def refresh_manifest(self, uuid=None): if uuid is None: if self.current_manifest: uuid = self.current_manifest.get_uuid() else: raise ValueError("Uuid is not known.") url = "%s%s/certificates" % (self.base_url, uuid) log(1, "URL: '%s'" % url) response = self._call_api(url, method="put") if response is not None: # pylint: disable=E1101 if response.status_code == requests.codes.ok or response.status_code == requests.codes.no_content: return True else: log2(0, 0, "Status code: %s" % response.status_code, stream=sys.stderr) log2(0, 0, "Message: '%s'" % response.text, stream=sys.stderr) return False
def get_crypto_keys(self, check_dates=False): ssl_query = rhnSQL.prepare(""" select description, key, org_id from rhnCryptoKey where id = :id """) keys = {} ssl_query.execute(id=self.ca_cert) row = ssl_query.fetchone_dict() keys['ca_cert'] = (str(row['description']), str(row['key']), row['org_id']) ssl_query.execute(id=self.client_cert) row = ssl_query.fetchone_dict() keys['client_cert'] = (str(row['description']), str(row['key']), row['org_id']) ssl_query.execute(id=self.client_key) row = ssl_query.fetchone_dict() keys['client_key'] = (str(row['description']), str(row['key']), row['org_id']) # Check if SSL certificates are usable if check_dates: failed = 0 for key in (keys['ca_cert'], keys['client_cert']): if not verify_certificate_dates(key[1]): log(1, "WARNING: Problem with dates in certificate '%s'. " "Please check validity of this certificate." % key[0]) failed += 1 if failed: return {} return keys
def sync(self, channels=None): # If no channels specified, sync already synced channels if not channels: channels = self.synced_channels # Check channel availability before doing anything not_available = [] for channel in channels: if any(channel not in d for d in [ self.channel_metadata, self.channel_to_family, self.content_source_mapping ]): not_available.append(channel) if not_available: raise ChannelNotFoundError(" " + "\n ".join(not_available)) # Need to update channel metadata self._update_channels_metadata(channels) # Finally, sync channel content total_time = datetime.timedelta() for channel in channels: cur_time = self._sync_channel(channel) total_time += cur_time # Switch back to cdnsync log rhnLog.initLOG(self.log_path, self.log_level) log2disk(0, "Sync of channel completed.") log(0, "Total time: %s" % str(total_time).split('.')[0])
def setup_repos_and_sync(self, channels=None, add_repos=None, delete_repos=None): # Fix format of relative url if add_repos: for index, repo in enumerate(add_repos): repo = repo.replace(CFG.CDN_ROOT, '') repo = os.path.join('/', repo) add_repos[index] = repo if delete_repos: for index, repo in enumerate(delete_repos): repo = repo.replace(CFG.CDN_ROOT, '') repo = os.path.join('/', repo) delete_repos[index] = repo # We need single custom channel if not channels or len(channels) > 1: raise CustomChannelSyncError("Single custom channel needed.") channel = list(channels)[0] db_channel = channel_info(channel) if add_repos and not self._can_add_repos(db_channel, add_repos): raise CustomChannelSyncError("Unable to attach requested repositories to this channel.") # Add custom repositories to custom channel new_repos_count = self.cdn_repository_manager.assign_repositories_to_channel(channel, delete_repos=delete_repos, add_repos=add_repos) if new_repos_count: # Add to synced channels if there are any repos if channel not in self.synced_channels: self.synced_channels[channel] = db_channel['org_id'] error_messages = self.sync(channels=channels) else: log(0, "No repositories attached to channel. Skipping sync.") error_messages = None return error_messages
def _list_available_channels(self): # Select from rhnContentSsl to filter cdn-activated channel families h = rhnSQL.prepare(""" select label from rhnChannelFamilyPermissions cfp inner join rhnChannelFamily cf on cfp.channel_family_id = cf.id inner join rhnContentSsl cs on cf.id = cs.channel_family_id where cf.org_id is null """) h.execute() families = h.fetchall_dict() or [] # collect all channel from available families all_channels = [] base_channels = {} for family in families: label = family['label'] family = self.families[label] channels = [c for c in family['channels'] if c is not None] all_channels.extend(channels) # fill base_channel for channel in all_channels: try: # Only base channels as key in dictionary if self.channel_metadata[channel]['parent_channel'] is None: base_channels[channel] = [ k for k in all_channels if self.channel_metadata[k] ['parent_channel'] == channel ] except KeyError: log(1, "Channel %s not found in channel metadata" % channel) continue return base_channels
def import_channel_families(self): """Insert channel family data into DB.""" log(1, "Channel families in manifest: %d" % len(self.sat5_cert.channel_families)) # pylint: disable=E1101 batch = [] for cf in self.sat5_cert.channel_families: # pylint: disable=E1101 label = cf.name try: family = self.families[label] family_object = ChannelFamily() for k in family.keys(): family_object[k] = family[k] family_object['label'] = label batch.append(family_object) self.families_to_import.append(label) except KeyError: # While channel mappings are not consistent with certificate generated on RHN... msg = ("WARNING: Channel family '%s' is provided by manifest but " "was not found in cdn-sync mappings." % label) log2(0, 1, msg, stream=sys.stderr) log(1, "Channel families to import: %d" % len(batch)) # Perform import backend = SQLBackend() importer = ChannelFamilyImport(batch, backend) importer.run()
def log(self, *_): self.lock.acquire() self.status += 1 self._print_progress_bar(self.status, self.total, prefix=self.msg, bar_length=50) if time.time() > int(self.last_log + 90): self.last_log = time.time() log(0, '%s %s' % (round(100.00 * (self.status / float(self.total)), 2), '%')) self.lock.release()
def log(self, success, param): self.lock.acquire() self.status += 1 if success: log(0, "%d/%d : %s" % (self.status, self.total, str(param))) else: log2(0, 0, "%d/%d : %s (failed)" % (self.status, self.total, str(param)), stream=sys.stderr) self.lock.release()
def sync(self, channels=None): # If no channels specified, sync already synced channels if not channels: channels = list(self.synced_channels) # Check channel availability before doing anything not_available = [] available = [] for channel in channels: if not self._is_channel_available(channel): not_available.append(channel) else: available.append(channel) channels = available error_messages = [] # if we have not_available channels log the error immediately if not_available: msg = "ERROR: these channels either do not exist or are not available:\n " + "\n ".join(not_available) error_messages.append(msg) # BZ 1434913 - let user know satellite may not be activated if all channels are in not_available if not available: msg = "WARNING: Is your Red Hat Satellite activated for CDN?\n" msg += "(to see details about currently used SSL certificates for accessing CDN:" msg += " /usr/bin/cdn-sync --cdn-certs)" error_messages.append(msg) # Need to update channel metadata self._update_channels_metadata([ch for ch in channels if ch in self.channel_metadata]) # Make sure custom channels are properly connected with repos for channel in channels: if channel in self.synced_channels and self.synced_channels[channel]: self.cdn_repository_manager.assign_repositories_to_channel(channel) reposync.clear_ssl_cache() # Finally, sync channel content total_time = timedelta() for channel in channels: cur_time, failed_packages = self._sync_channel(channel) if failed_packages < 0: error_messages.append("Problems occurred during syncing channel %s. Please check " "/var/log/rhn/cdnsync/%s.log for the details\n" % (channel, channel)) if failed_packages > 0: error_messages.append("%d packages in channel %s failed to sync. Please check " "/var/log/rhn/cdnsync/%s.log for the details\n" % (failed_packages, channel, channel)) total_time += cur_time # Switch back to cdnsync log rhnLog.initLOG(self.log_path, self.log_level) log2disk(0, "Sync of channel completed.") log(0, "Total time: %s" % str(total_time).split('.')[0]) return error_messages
def print_channel_line(ch): if ch in self.synced_channels: sync_status = 'p' else: sync_status = '.' space = " " offset = longest_label - len(ch) space += " " * offset log(0, " %s %s%s%s" % (sync_status, channel, space, eol_channels[channel].strftime("%Y-%m-%d")))
def get_content_sources_kickstart(self, channel_label): if channel_label in self.kickstart_metadata: for tree in self.kickstart_metadata[channel_label]: tree_label = tree['ks_tree_label'] if tree_label in self.kickstart_source_mapping: return self.kickstart_source_mapping[tree_label] else: log(1, "WARN: Kickstart tree not available: %s" % tree_label) return []
def _sync_channel(self, channel): excluded_urls = [] kickstart_trees = [] if channel in self.kickstart_metadata: kickstart_trees = self.kickstart_metadata[channel] excluded_urls.extend(self.cdn_repository_manager.excluded_urls) if self.no_kickstarts: kickstart_repos = self.cdn_repository_manager.get_content_sources_kickstart( channel) excluded_urls.extend([x['relative_url'] for x in kickstart_repos]) log(0, "======================================") log(0, "| Channel: %s" % channel) log(0, "======================================") # Print note if channel is already EOL if self._is_channel_eol(channel): log( 0, "NOTE: This channel reached end-of-life on %s." % datetime.strptime(self.channel_metadata[channel]['eol'], "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")) log(0, "Sync of channel started.") log2disk( 0, "Please check 'cdnsync/%s.log' for sync log of this channel." % channel, notimeYN=True) sync = reposync.RepoSync(channel, repo_type="yum", url=None, fail=False, filters=False, no_packages=self.no_packages, no_errata=self.no_errata, sync_kickstart=(not self.no_kickstarts), force_all_errata=self.force_all_errata, force_kickstart=self.force_kickstarts, latest=False, metadata_only=self.no_rpms, excluded_urls=excluded_urls, strict=self.consider_full, log_dir="cdnsync", log_level=self.log_level, check_ssl_dates=True, force_null_org_content=True) sync.set_ks_tree_type('rhn-managed') if self.import_batch_size: sync.set_import_batch_size(self.import_batch_size) if kickstart_trees: # Assuming all trees have same install type sync.set_ks_install_type(kickstart_trees[0]['ks_install_type']) sync.set_urls_prefix(self.mount_point) return sync.sync()
def _print_unmapped_channels(self): unmapped_channels = [ ch for ch in self.synced_channels if not self.synced_channels[ch] and ch not in self.channel_metadata ] if unmapped_channels: log( 0, "Previously synced channels not available to update from CDN:") for channel in sorted(unmapped_channels): log(0, " p %s" % channel)
def validateSatCert(cert): """ validating (i.e., verifing sanity of) this product. I.e., makes sure the product Certificate is a sane certificate """ sat_cert = satellite_cert.SatelliteCert() sat_cert.load(cert) for key in ['generation', 'product', 'owner', 'issued', 'expires', 'slots']: if not getattr(sat_cert, key): writeError("Your satellite certificate is not valid. Field %s is not defined.\n" "Please contact your support representative." % key) raise RHNCertGeneralSanityException("RHN Entitlement Certificate failed " "to validate.") signature = sat_cert.signature # copy cert to temp location (it may be gzipped). fd, certTmpFile = tempfile.mkstemp(prefix="/tmp/cert-") fo = os.fdopen(fd, 'wb') fo.write(getCertChecksumString(sat_cert)) fo.flush() fo.close() fd, signatureTmpFile = tempfile.mkstemp(prefix="/tmp/cert-signature-") fo = os.fdopen(fd, 'wb') fo.write(signature) fo.flush() fo.close() args = ['gpg', '--verify', '-q', '--keyring', DEFAULT_WEBAPP_GPG_KEY_RING, signatureTmpFile, certTmpFile] log(1, "Checking cert XML sanity and GPG signature: %s" % repr(' '.join(args))) ret, out, err = fileutils.rhn_popen(args) err = err.read() out = out.read() # nuke temp cert os.unlink(certTmpFile) os.unlink(signatureTmpFile) if err.find('Ohhhh jeeee: ... this is a bug') != -1 or err.find('verify err') != -1 or ret: msg = "%s Entitlement Certificate failed to validate.\n" % PRODUCT_NAME msg += "MORE INFORMATION:\n" msg = msg + " Return value: %s\n" % ret +\ " Standard-out: %s\n" % out +\ " Standard-error: %s" % err writeError(msg) raise RHNCertGeneralSanityException("RHN Entitlement Certificate failed " "to validate.") return 0
def _xmlrpc(function, params): try: retval = getattr(BaseWireSource.serverObj, function)(*params) except TypeError: e = sys.exc_info()[1] log(-1, 'ERROR: during "getattr(BaseWireSource.serverObj, %s)(*(%s))"' % (function, params)) raise except rpclib.xmlrpclib.ProtocolError: e = sys.exc_info()[1] log2(-1, 2, 'ERROR: ProtocolError: %s' % e, stream=sys.stderr) raise return retval
def print_channel_line(ch): if ch in self.synced_channels: sync_status = 'p' else: sync_status = '.' space = " " offset = longest_label - len(ch) space += " " * offset log( 0, " %s %s%s%s" % (sync_status, channel, space, eol_channels[channel].strftime("%Y-%m-%d")))
def log(self, *_): self.lock.acquire() self.status += 1 self._print_progress_bar(self.status, self.total, prefix=self.msg, bar_length=50) if time.time() > int(self.last_log + 90): self.last_log = time.time() log( 0, '%s %s' % (round(100.00 * (self.status / float(self.total)), 2), '%')) self.lock.release()
def count_packages(self): start_time = int(time.time()) channel_tree, not_available_channels = self._list_available_channels() repo_list = [] for base_channel in sorted(channel_tree): channel_list = channel_tree[base_channel] if base_channel not in not_available_channels: channel_list.append(base_channel) for channel in sorted(channel_list): repo_list.extend(self.cdn_repository_manager.get_content_sources(channel)) log(0, "Number of repositories: %d" % len(repo_list)) already_downloaded = 0 print_progress_bar(already_downloaded, len(repo_list), prefix='Downloading repodata:', suffix='Complete', bar_length=50) for base_channel in sorted(channel_tree): channel_list = channel_tree[base_channel] if base_channel not in not_available_channels: channel_list.append(base_channel) for channel in sorted(channel_list): sources = self.cdn_repository_manager.get_content_sources(channel) list_packages = [] for source in sources: list_packages.extend(self._count_packages_in_repo(source)) already_downloaded += 1 print_progress_bar(already_downloaded, len(repo_list), prefix='Downloading repodata:', suffix='Complete', bar_length=50) cdn_repodata_path = constants.CDN_REPODATA_ROOT + '/' + channel # create directory for repo data if it doesn't exist try: os.makedirs(cdn_repodata_path) except OSError: exc = sys.exc_info()[1] if exc.errno == errno.EEXIST and os.path.isdir(cdn_repodata_path): pass else: raise f_out = open(cdn_repodata_path + '/' + "packages_num", 'w') try: f_out.write(str(len(set(list_packages)))) finally: if f_out is not None: f_out.close() elapsed_time = int(time.time()) log(0, "Elapsed time: %d seconds" % (elapsed_time - start_time))
def export_manifest(self, uuid=None, ownerid=None, satellite_version=None): """Performs export request to Candlepin API and saves exported manifest to target file. Can take required parameters from current manifest or override them with parameters of this method.""" if uuid is None: if self.current_manifest: uuid = self.current_manifest.get_uuid() else: raise ValueError("Uuid is not known.") if ownerid is None: if self.current_manifest: ownerid = self.current_manifest.get_ownerid() else: raise ValueError("Ownerid is not known.") if satellite_version is None: if self.current_manifest: satellite_version = self.current_manifest.get_satellite_version( ) else: raise ValueError("Satellite version is not known.") url = "%s%s/export" % (self.base_url, uuid) params = { "ext": ["ownerid:%s" % ownerid, "version:%s" % satellite_version] } log(1, "URL: '%s'" % url) log(1, "Parameters: '%s'" % str(params)) response = self._call_api(url, params=params, method="get") if response is not None: # pylint: disable=E1101 if response.status_code == requests.codes.ok: fd, downloaded_manifest = tempfile.mkstemp( prefix="/tmp/manifest-", suffix=".zip") fo = os.fdopen(fd, 'wb') for chunk in response: fo.write(chunk) fo.flush() fo.close() return downloaded_manifest else: log2(0, 0, "Status code: %s" % response.status_code, stream=sys.stderr) log2(0, 0, "Message: '%s'" % response.text, stream=sys.stderr) return None
def get_content_sources_kickstart(self, channel_label): repositories = [] if channel_label in self.kickstart_metadata: for tree in self.kickstart_metadata[channel_label]: tree_label = tree['ks_tree_label'] if tree_label in self.kickstart_source_mapping: # One tree comes from one repo, one repo for each tree is in the mapping, # in future there may be multiple repos for one tree and we will need to select # correct repo repository = self.kickstart_source_mapping[tree_label][0] repository['ks_tree_label'] = tree_label repositories.append(repository) else: log(1, "WARN: Kickstart tree not available: %s" % tree_label) return repositories
def run(self): size = self.queue.qsize() if size <= 0: return log(1, "Downloading %s files." % str(size)) started_threads = [] for _ in range(self.threads): thread = DownloadThread(self) thread.setDaemon(True) thread.start() started_threads.append(thread) # wait to finish while any(t.isAlive() for t in started_threads): time.sleep(1)
def _sync_channel(self, channel): excluded_urls = [] kickstart_trees = [] if channel in self.kickstart_metadata: kickstart_trees = self.kickstart_metadata[channel] if self.no_kickstarts: kickstart_repos = self.cdn_repository_manager.get_content_sources_kickstart(channel) excluded_urls.extend([x['relative_url'] for x in kickstart_repos]) log(0, "======================================") log(0, "| Channel: %s" % channel) log(0, "======================================") # Print note if channel is already EOL if self._is_channel_eol(channel): log(0, "NOTE: This channel reached end-of-life on %s." % datetime.strptime(self.channel_metadata[channel]['eol'], "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")) log(0, "Sync of channel started.") log2disk(0, "Please check 'cdnsync/%s.log' for sync log of this channel." % channel, notimeYN=True) sync = reposync.RepoSync(channel, repo_type="yum", url=None, fail=False, filters=False, no_packages=self.no_packages, no_errata=self.no_errata, sync_kickstart=(not self.no_kickstarts), force_all_errata=self.force_all_errata, force_kickstart=self.force_kickstarts, latest=False, metadata_only=self.no_rpms, excluded_urls=excluded_urls, strict=self.consider_full, log_dir="cdnsync", log_level=self.log_level, check_ssl_dates=True, force_null_org_content=True) sync.set_ks_tree_type('rhn-managed') if self.import_batch_size: sync.set_import_batch_size(self.import_batch_size) if kickstart_trees: # Assuming all trees have same install type sync.set_ks_install_type(kickstart_trees[0]['ks_install_type']) sync.set_urls_prefix(self.mount_point) return sync.sync()
def fix(self): self.backend.lookupPackageArches(self.package_arches) self.backend.lookupChannels(self.channels) self.backend.lookupErrataFileTypes(self.file_types) for erratum in self.batch: for ef in erratum['files']: eft = ef['file_type'] if eft not in self.file_types: raise Exception("Unknown file type %s" % eft) ef['type'] = self.file_types[eft] for label, aid in self.package_arches.items(): self.package_type = self.backend.lookupPackageArchType(aid) if self.package_type: break self._fixCVE() self.backend.lookupPackageNames(self.names) self.backend.lookupEVRs(self.evrs, self.package_type) self.backend.lookupChecksums(self.checksums) for erratum in self.batch: if erratum.ignored: # Skip it continue self._fix_erratum_channels(erratum) self._fix_erratum_packages_lookup(erratum) self._fix_erratum_file_packages(erratum) # fix severity stuff self._fix_erratum_severity(erratum) # fix oval info to populate the relevant dbtables self._fix_erratum_oval_info(erratum) self.backend.lookupPackages(list(self.packages.values()), self.checksums, self.ignoreMissing) for erratum in self.batch: if erratum.ignored: # Skip it continue self._fix_erratum_packages(erratum) self._fix_erratum_file_channels(erratum) # remove erratas that have been ignored ignored_erratas = list([x for x in self.batch if x.ignored]) if len(ignored_erratas) > 0: log(0, "Ignoring %d old, superseded erratas" % len(ignored_erratas)) self.batch = list([x for x in self.batch if not x.ignored])
def sync(self, channels=None): # If no channels specified, sync already synced channels if not channels: channels = list(self.synced_channels) # Check channel availability before doing anything not_available = [] for channel in channels: if not self._is_channel_available(channel): not_available.append(channel) if not_available: raise ChannelNotFoundError(" " + "\n ".join(not_available)) # Need to update channel metadata self._update_channels_metadata( [ch for ch in channels if ch in self.channel_metadata]) # Make sure custom channels are properly connected with repos for channel in channels: if channel in self.synced_channels and self.synced_channels[ channel]: self.cdn_repository_manager.assign_repositories_to_channel( channel) # Finally, sync channel content error_messages = [] total_time = timedelta() for channel in channels: cur_time, failed_packages = self._sync_channel(channel) if failed_packages < 0: error_messages.append( "Problems occurred during syncing channel %s. Please check " "/var/log/rhn/cdnsync/%s.log for the details\n" % (channel, channel)) if failed_packages > 0: error_messages.append( "%d packages in channel %s failed to sync. Please check " "/var/log/rhn/cdnsync/%s.log for the details\n" % (failed_packages, channel, channel)) total_time += cur_time # Switch back to cdnsync log rhnLog.initLOG(self.log_path, self.log_level) log2disk(0, "Sync of channel completed.") log(0, "Total time: %s" % str(total_time).split('.')[0]) return error_messages
def count_packages(self): start_time = int(time.time()) backend = SQLBackend() base_channels = self._list_available_channels() repo_list = [] for base_channel in sorted(base_channels): for channel in sorted(base_channels[base_channel] + [base_channel]): repo_list.extend(self._get_content_sources(channel, backend)) log(0, "Number of repositories: %d" % len(repo_list)) already_downloaded = 0 print_progress_bar(already_downloaded, len(repo_list), prefix='Downloading repodata:', suffix='Complete', bar_length=50) for base_channel in sorted(base_channels): for channel in sorted(base_channels[base_channel] + [base_channel]): family_label = self.channel_to_family[channel] keys = self._get_family_keys(family_label) sources = self._get_content_sources(channel, backend) list_packages = [] for source in sources: list_packages.extend(self._count_packages_in_repo(source['source_url'], keys)) already_downloaded += 1 print_progress_bar(already_downloaded, len(repo_list), prefix='Downloading repodata:', suffix='Complete', bar_length=50) cdn_repodata_path = constants.CDN_REPODATA_ROOT + '/' + channel # create directory for repo data if it doesn't exist try: os.makedirs(cdn_repodata_path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(cdn_repodata_path): pass else: raise with open(cdn_repodata_path + '/' + "packages_num", 'w') as f_out: f_out.write(str(len(set(list_packages)))) elapsed_time = int(time.time()) log(0, "Elapsed time: %d seconds" % (elapsed_time - start_time))
def run(self): size = self.queue.qsize() if size <= 0: return log(1, "Downloading %s files." % str(size)) started_threads = [] for _ in range(self.threads): thread = DownloadThread(self) thread.setDaemon(True) thread.start() started_threads.append(thread) # wait to finish while any(t.isAlive() for t in started_threads): time.sleep(1) # raise first detected exception from child threads if any if self.exception: raise self.exception # pylint: disable=E0702
def _list_available_channels(self): # Select channel families in DB h = rhnSQL.prepare(""" select label from rhnChannelFamilyPermissions cfp inner join rhnChannelFamily cf on cfp.channel_family_id = cf.id where cf.org_id is null """) h.execute() families = h.fetchall_dict() or [] # collect all channel from available families all_channels = [] base_channels = {} for family in families: label = family['label'] try: family = self.families[label] except KeyError: log2stderr(0, "ERROR: Unknown channel family: %s" % label) continue channels = [c for c in family['channels'] if c is not None] all_channels.extend(channels) # filter available channels all_channels = [ x for x in all_channels if self.cdn_repository_manager.check_channel_availability(x) ] # fill base_channel for channel in all_channels: try: # Only base channels as key in dictionary if self.channel_metadata[channel]['parent_channel'] is None: base_channels[channel] = [ k for k in all_channels if self.channel_metadata[k] ['parent_channel'] == channel ] except KeyError: log(1, "Channel %s not found in channel metadata" % channel) continue return base_channels
def export_manifest(self, uuid=None, ownerid=None, satellite_version=None): """Performs export request to Candlepin API and saves exported manifest to target file. Can take required parameters from current manifest or override them with parameters of this method.""" if uuid is None: if self.current_manifest: uuid = self.current_manifest.get_uuid() else: raise ValueError("Uuid is not known.") if ownerid is None: if self.current_manifest: ownerid = self.current_manifest.get_ownerid() else: raise ValueError("Ownerid is not known.") if satellite_version is None: if self.current_manifest: satellite_version = self.current_manifest.get_satellite_version() else: raise ValueError("Satellite version is not known.") url = "%s%s/export" % (self.base_url, uuid) params = {"ext": ["ownerid:%s" % ownerid, "version:%s" % satellite_version]} log(1, "URL: '%s'" % url) log(1, "Parameters: '%s'" % str(params)) response = self._call_api(url, params=params, method="get") if response is not None: # pylint: disable=E1101 if response.status_code == requests.codes.ok: fd, downloaded_manifest = tempfile.mkstemp(prefix="/tmp/manifest-", suffix=".zip") fo = os.fdopen(fd, 'wb') for chunk in response: fo.write(chunk) fo.flush() fo.close() return downloaded_manifest else: log2(0, 0, "Status code: %s" % response.status_code, stream=sys.stderr) log2(0, 0, "Message: '%s'" % response.text, stream=sys.stderr) return None
def fix(self): self.backend.lookupChannels(self.channels) self.backend.lookupErrataFileTypes(self.file_types) for erratum in self.batch: for ef in erratum['files']: eft = ef['file_type'] if eft not in self.file_types: raise Exception("Unknown file type %s" % eft) ef['type'] = self.file_types[eft] self._fixCVE() self.backend.lookupPackageNames(self.names) self.backend.lookupEVRs(self.evrs) self.backend.lookupChecksums(self.checksums) self.backend.lookupPackageArches(self.package_arches) for erratum in self.batch: if erratum.ignored: # Skip it continue self._fix_erratum_channels(erratum) self._fix_erratum_packages_lookup(erratum) self._fix_erratum_file_packages(erratum) # fix severity stuff self._fix_erratum_severity(erratum) # fix oval info to populate the relevant dbtables self._fix_erratum_oval_info(erratum) self.backend.lookupPackages(list(self.packages.values()), self.checksums, self.ignoreMissing) for erratum in self.batch: if erratum.ignored: # Skip it continue self._fix_erratum_packages(erratum) self._fix_erratum_file_channels(erratum) # remove erratas that have been ignored ignored_erratas = list(filter(lambda x: x.ignored, self.batch)) if len(ignored_erratas) > 0: log(0, "Ignoring %d old, superseded erratas" % len(ignored_erratas)) self.batch = list(filter(lambda x: not x.ignored, self.batch))
def sync(self, channels=None): # If no channels specified, sync already synced channels if not channels: channels = self.synced_channels # Check channel availability before doing anything not_available = [] for channel in channels: if any( channel not in d for d in [self.channel_metadata, self.channel_to_family] ) or (not self.cdn_repository_manager.check_channel_availability( channel, self.no_kickstarts)): not_available.append(channel) if not_available: raise ChannelNotFoundError(" " + "\n ".join(not_available)) # Need to update channel metadata self._update_channels_metadata(channels) # Finally, sync channel content error_messages = [] total_time = timedelta() for channel in channels: cur_time, failed_packages = self._sync_channel(channel) if failed_packages < 0: error_messages.append( "Problems occurred during syncing channel %s. Please check " "/var/log/rhn/cdnsync/%s.log for the details\n" % (channel, channel)) if failed_packages > 0: error_messages.append( "%d packages in channel %s failed to sync. Please check " "/var/log/rhn/cdnsync/%s.log for the details\n" % (failed_packages, channel, channel)) total_time += cur_time # Switch back to cdnsync log rhnLog.initLOG(self.log_path, self.log_level) log2disk(0, "Sync of channel completed.") log(0, "Total time: %s" % str(total_time).split('.')[0]) return error_messages
def _set_ssl_trusted_certs(self, serverObj): if not self.sslYN: return None # Check certificate caChain = CFG.CA_CHAIN if caChain: # require SSL CA file to be able to authenticate the SSL # connections. if not os.access(caChain, os.R_OK): message = "ERROR: can not find SUSE Manager CA file: %s" % caChain log(-1, message, stream=sys.stderr) raise Exception(message) # force the validation of the SSL cert serverObj.add_trusted_cert(caChain) return caChain message = '--- Warning: SSL connection made but no CA certificate used' log(1, message, stream=sys.stderr) return None
def __processChannel(self, channel): # Processes a package arch = channel['channel_arch'] if not self.arches.has_key(arch): self.arches[arch] = None for family in channel['families']: self.families[family['label']] = None # Dists if channel.has_key('dists') and channel['dists'] is not None: for dist in channel['dists']: self.arches[dist['channel_arch']] = None # Product Names if channel.has_key('release') and channel['release'] is not None: for release in channel['release']: self.arches[release['channel_arch']] = None if not channel.has_key('receiving_updates') or channel['receiving_updates'] is None: channel['receiving_updates'] = 'N' # Yum repo checksum type if (channel['checksum_type'] and channel['checksum_type'] not in self.checksum_types): self.checksum_types[channel['checksum_type']] = None # bug #528227 # Print a warning in case the sync would move the channel between orgs if channel.has_key('org_id') and channel['org_id']: org_id = self.backend.lookupChannelOrg(channel['label']) if org_id and int(channel['org_id']) != org_id['org_id']: log(1, "WARNING: Channel %s is already present in orgid %s." % (channel['label'], org_id['org_id'])) log(1, " Running synchronization will move the channel to orgid %s." % channel['org_id']) log(1, '')
def __processChannel(self, channel): # Processes a package arch = channel['channel_arch'] if arch not in self.arches: self.arches[arch] = None for family in channel['families']: self.families[family['label']] = None # Dists if 'dists' in channel and channel['dists'] is not None: for dist in channel['dists']: self.arches[dist['channel_arch']] = None # Product Names if 'release' in channel and channel['release'] is not None: for release in channel['release']: self.arches[release['channel_arch']] = None if 'receiving_updates' not in channel or channel['receiving_updates'] is None: channel['receiving_updates'] = 'N' # Yum repo checksum type if (channel['checksum_type'] and channel['checksum_type'] not in self.checksum_types): self.checksum_types[channel['checksum_type']] = None # bug #528227 # Print a warning in case the sync would move the channel between orgs if 'org_id' in channel and channel['org_id']: org_id = self.backend.lookupChannelOrg(channel['label']) if org_id and int(channel['org_id']) != org_id['org_id']: log(1, "WARNING: Channel %s is already present in orgid %s." % (channel['label'], org_id['org_id'])) log(1, " Running synchronization will move the channel to orgid %s." % channel['org_id']) log(1, '')
def __init__(self, manifest_path): rhnSQL.initDB() self.manifest = Manifest(manifest_path) self.sat5_cert = SatelliteCert() self.sat5_cert.load(self.manifest.get_satellite_certificate()) verify_mappings() f = None # Channel families metadata try: try: f = open(constants.CHANNEL_FAMILY_MAPPING_PATH, 'r') self.families = json.load(f) f.close() except IOError: e = sys.exc_info()[1] log(1, "Ignoring channel mappings: %s" % e) self.families = {} finally: if f is not None: f.close() self.families_to_import = []
def activate(self): if self.manifest.check_signature(): log(0, "Populating channel families...") self.import_channel_families() log(0, "Updating certificates...") self._update_certificates() log(0, "Updating manifest repositories...") self._update_repositories() else: raise ManifestValidationError("Manifest validation failed! Make sure the specified manifest is correct.")