def _openSocketStream(self, method, params): """Wraps the gzipstream.GzipStream instantiation in a test block so we can open normally if stream is not gzipped.""" stream = None retryYN = 0 wait = 0.33 lastErrorMsg = '' cfg = config.initUp2dateConfig() for i in range(cfg['networkRetries']): server = self.getServer(retryYN) if server is None: log2(-1, 2, 'ERROR: server unable to initialize, attempt %s' % i, stream=sys.stderr) retryYN = 1 time.sleep(wait) continue func = getattr(server, method) try: stream = func(*params) return stream except rpclib.xmlrpclib.ProtocolError, e: p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) retryYN = 1 time.sleep(wait) # do not reraise this exception! except (KeyboardInterrupt, SystemExit): raise
def _xmlrpc(function, params): try: retval = getattr(BaseWireSource.serverObj, function)(*params) except TypeError: e = sys.exc_info()[1] log(-1, 'ERROR: during "getattr(BaseWireSource.serverObj, %s)(*(%s))"' % (function, params)) raise except rpclib.xmlrpclib.ProtocolError: e = sys.exc_info()[1] log2(-1, 2, 'ERROR: ProtocolError: %s' % e, stream=sys.stderr) raise return retval
def _login(self): if not self.systemid: raise Exception("systemid not set!") # Set the URL to the one for regular XML-RPC calls self.setServer(CFG.RHN_XMLRPC_HANDLER) try: login_token = self.getServer().authentication.login(self.systemid) except rpclib.xmlrpclib.ProtocolError, e: log2(-1, 2, 'ERROR: ProtocolError: %s' % e, stream=sys.stderr) raise
def _openSocketStream(self, method, params): """Wraps the gzipstream.GzipStream instantiation in a test block so we can open normally if stream is not gzipped.""" stream = None retryYN = 0 wait = 0.33 lastErrorMsg = '' cfg = config.initUp2dateConfig() for i in range(cfg['networkRetries']): server = self.getServer(retryYN) if server is None: log2(-1, 2, 'ERROR: server unable to initialize, attempt %s' % i, stream=sys.stderr) retryYN = 1 time.sleep(wait) continue func = getattr(server, method) try: stream = func(*params) if CFG.SYNC_TO_TEMP: import tempfile cached = tempfile.NamedTemporaryFile() stream.read_to_file(cached) cached.seek(0) return cached else: return stream except rpclib.xmlrpclib.ProtocolError: e = sys.exc_info()[1] p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) retryYN = 1 time.sleep(wait) # do not reraise this exception! except (KeyboardInterrupt, SystemExit): raise except rpclib.xmlrpclib.Fault: e = sys.exc_info()[1] lastErrorMsg = e.faultString break except Exception: # pylint: disable=E0012, W0703 e = sys.exc_info()[1] p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) break # do not reraise this exception! if lastErrorMsg: raise_with_tb(RhnSyncException(lastErrorMsg), sys.exc_info()[2]) # Returns a stream # Should never be reached return stream
class XMLRPCWireSource(BaseWireSource): "Base class for all the XMLRPC calls" @staticmethod def _xmlrpc(function, params): try: retval = getattr(BaseWireSource.serverObj, function)(*params) except TypeError, e: log( -1, 'ERROR: during "getattr(BaseWireSource.serverObj, %s)(*(%s))"' % (function, params)) raise except rpclib.xmlrpclib.ProtocolError, e: log2(-1, 2, 'ERROR: ProtocolError: %s' % e, stream=sys.stderr) raise
def import_packages(self, plug, source_id, url): failed_packages = 0 if (not self.filters) and source_id: h = rhnSQL.prepare(""" select flag, filter from rhnContentSourceFilter where source_id = :source_id order by sort_order """) h.execute(source_id=source_id) filter_data = h.fetchall_dict() or [] filters = [(row['flag'], re.split(r'[,\s]+', row['filter'])) for row in filter_data] else: filters = self.filters packages = plug.list_packages(filters, self.latest) self.all_packages.extend(packages) to_process = [] num_passed = len(packages) log(0, "Packages in repo: %5d" % plug.num_packages) if plug.num_excluded: log(0, "Packages passed filter rules: %5d" % num_passed) channel_id = int(self.channel['id']) for pack in packages: db_pack = rhnPackage.get_info_for_package( [pack.name, pack.version, pack.release, pack.epoch, pack.arch], channel_id, self.org_id) to_download = True to_link = True # Package exists in DB if db_pack: # Path in filesystem is defined if db_pack['path']: pack.path = os.path.join(CFG.MOUNT_POINT, db_pack['path']) else: pack.path = "" if self.metadata_only or self.match_package_checksum( db_pack['path'], pack.path, pack.checksum_type, pack.checksum): # package is already on disk or not required to_download = False if db_pack['channel_id'] == channel_id: # package is already in the channel to_link = False # just pass data from DB, they will be used in strict channel # linking if there is no new RPM downloaded pack.checksum = db_pack['checksum'] pack.checksum_type = db_pack['checksum_type'] pack.epoch = db_pack['epoch'] elif db_pack['channel_id'] == channel_id: # different package with SAME NVREA self.disassociate_package(db_pack) if to_download or to_link: to_process.append((pack, to_download, to_link)) num_to_process = len(to_process) if num_to_process == 0: log(0, "No new packages to sync.") # If we are just appending, we can exit if not self.strict: return failed_packages else: log( 0, "Packages already synced: %5d" % (num_passed - num_to_process)) log(0, "Packages to sync: %5d" % num_to_process) is_non_local_repo = (url.find("file:/") < 0) downloader = ThreadedDownloader() to_download_count = 0 for what in to_process: pack, to_download, to_link = what if to_download: target_file = os.path.join( plug.repo.pkgdir, os.path.basename(pack.unique_id.relativepath)) pack.path = target_file params = {} if self.metadata_only: bytes_range = (0, pack.unique_id.hdrend) checksum_type = None checksum = None else: bytes_range = None checksum_type = pack.checksum_type checksum = pack.checksum plug.set_download_parameters(params, pack.unique_id.relativepath, target_file, checksum_type=checksum_type, checksum_value=checksum, bytes_range=bytes_range) downloader.add(params) to_download_count += 1 if num_to_process != 0: log(0, "New packages to download: %5d" % to_download_count) logger = TextLogger(None, to_download_count) downloader.set_log_obj(logger) downloader.run() log2disk(0, "Importing packages started.") progress_bar = ProgressBarLogger("Importing packages: ", to_download_count) for (index, what) in enumerate(to_process): pack, to_download, to_link = what if not to_download: continue localpath = pack.path # pylint: disable=W0703 try: if os.path.exists(localpath): pack.load_checksum_from_header() rel_package_path = pack.upload_package( self.org_id, metadata_only=self.metadata_only) # Save uploaded package to cache with repository checksum type if rel_package_path: self.checksum_cache[rel_package_path] = { pack.checksum_type: pack.checksum } # we do not want to keep a whole 'a_pkg' object for every package in memory, # because we need only checksum. see BZ 1397417 pack.checksum = pack.a_pkg.checksum pack.checksum_type = pack.a_pkg.checksum_type pack.epoch = pack.a_pkg.header['epoch'] pack.a_pkg = None else: raise Exception progress_bar.log(True, None) except KeyboardInterrupt: raise except Exception: failed_packages += 1 e = str(sys.exc_info()[1]) if e: log2(0, 1, e, stream=sys.stderr) if self.fail: raise to_process[index] = (pack, False, False) self.all_packages.remove(pack) progress_bar.log(False, None) finally: if is_non_local_repo and localpath and os.path.exists( localpath): os.remove(localpath) log2disk(0, "Importing packages finished.") if self.strict: # Need to make sure all packages from all repositories are associated with channel import_batch = [ self.associate_package(pack) for pack in self.all_packages ] else: # Only packages from current repository are appended to channel import_batch = [ self.associate_package(pack) for (pack, to_download, to_link) in to_process if to_link ] # Do not re-link if nothing was marked to link if any([to_link for (pack, to_download, to_link) in to_process]): log(0, "Linking packages to channel.") backend = SQLBackend() caller = "server.app.yumreposync" importer = ChannelPackageSubscription(import_batch, backend, caller=caller, repogen=False, strict=self.strict) importer.run() backend.commit() self.regen = True return failed_packages
def sync(self, update_repodata=True): """Trigger a reposync""" failed_packages = 0 sync_error = 0 if not self.urls: sync_error = -1 start_time = datetime.now() for (repo_id, url, repo_label) in self.urls: log(0, "Repo URL: %s" % url) plugin = None # If the repository uses a uln:// URL, switch to the ULN plugin, overriding the command-line if url.startswith("uln://"): self.repo_plugin = self.load_plugin("uln") # pylint: disable=W0703 try: if repo_label: repo_name = repo_label else: # use modified relative_url as name of repo plugin, because # it used as name of cache directory as well relative_url = '_'.join(url.split('://')[1].split('/')[1:]) repo_name = relative_url.replace("?", "_").replace( "&", "_").replace("=", "_") plugin = self.repo_plugin(url, repo_name, org=str(self.org_id or ''), channel_label=self.channel_label) if update_repodata: plugin.clear_cache() if repo_id is not None: keys = rhnSQL.fetchall_dict(""" select k1.key as ca_cert, k2.key as client_cert, k3.key as client_key from rhncontentsource cs inner join rhncontentsourcessl csssl on cs.id = csssl.content_source_id inner join rhncryptokey k1 on csssl.ssl_ca_cert_id = k1.id left outer join rhncryptokey k2 on csssl.ssl_client_cert_id = k2.id left outer join rhncryptokey k3 on csssl.ssl_client_key_id = k3.id where cs.id = :repo_id """, repo_id=int(repo_id)) if keys: ssl_set = get_single_ssl_set( keys, check_dates=self.check_ssl_dates) if ssl_set: plugin.set_ssl_options(ssl_set['ca_cert'], ssl_set['client_cert'], ssl_set['client_key']) else: raise ValueError( "No valid SSL certificates were found for repository." ) if not self.no_packages: ret = self.import_packages(plugin, repo_id, url) failed_packages += ret self.import_groups(plugin, url) if not self.no_errata: self.import_updates(plugin, url) # only for repos obtained from the DB if self.sync_kickstart and repo_label: try: self.import_kickstart(plugin, repo_label) except: rhnSQL.rollback() raise except Exception: e = sys.exc_info()[1] log2(0, 0, "ERROR: %s" % e, stream=sys.stderr) log2disk(0, "ERROR: %s" % e) # pylint: disable=W0104 sync_error = -1 if plugin is not None: plugin.clear_ssl_cache() # Update cache with package checksums rhnCache.set(checksum_cache_filename, self.checksum_cache) if self.regen: taskomatic.add_to_repodata_queue_for_channel_package_subscription( [self.channel_label], [], "server.app.yumreposync") taskomatic.add_to_erratacache_queue(self.channel_label) self.update_date() rhnSQL.commit() # update permissions fileutils.createPath(os.path.join( CFG.MOUNT_POINT, 'rhn')) # if the directory exists update ownership only for root, dirs, files in os.walk(os.path.join(CFG.MOUNT_POINT, 'rhn')): for d in dirs: fileutils.setPermsPath(os.path.join(root, d), group='apache') for f in files: fileutils.setPermsPath(os.path.join(root, f), group='apache') elapsed_time = datetime.now() - start_time log( 0, "Sync of channel completed in %s." % str(elapsed_time).split('.')[0]) # if there is no global problems, but some packages weren't synced if sync_error == 0 and failed_packages > 0: sync_error = failed_packages return elapsed_time, sync_error
def __init__(self, channel_label, repo_type, url=None, fail=False, filters=None, no_errata=False, sync_kickstart=False, latest=False, metadata_only=False, strict=0, excluded_urls=None, no_packages=False, log_dir="reposync", log_level=None, force_kickstart=False, force_all_errata=False, check_ssl_dates=False, force_null_org_content=False): self.regen = False self.fail = fail self.filters = filters or [] self.no_packages = no_packages self.no_errata = no_errata self.sync_kickstart = sync_kickstart self.force_all_errata = force_all_errata self.force_kickstart = force_kickstart self.latest = latest self.metadata_only = metadata_only self.ks_tree_type = 'externally-managed' self.ks_install_type = None initCFG('server.satellite') rhnSQL.initDB() # setup logging log_filename = channel_label + '.log' log_path = default_log_location + log_dir + '/' + log_filename if log_level is None: log_level = 0 CFG.set('DEBUG', log_level) rhnLog.initLOG(log_path, log_level) # os.fchown isn't in 2.4 :/ if isSUSE(): os.system("chgrp www " + log_path) else: os.system("chgrp apache " + log_path) log2disk(0, "Command: %s" % str(sys.argv)) log2disk(0, "Sync of channel started.") self.channel_label = channel_label self.channel = self.load_channel() if not self.channel: log(0, "Channel %s does not exist." % channel_label) if not self.channel['org_id'] or force_null_org_content: self.org_id = None else: self.org_id = int(self.channel['org_id']) if not url: # TODO:need to look at user security across orgs h = rhnSQL.prepare("""select s.id, s.source_url, s.label from rhnContentSource s, rhnChannelContentSource cs where s.id = cs.source_id and cs.channel_id = :channel_id""") h.execute(channel_id=int(self.channel['id'])) source_data = h.fetchall_dict() self.urls = [] if excluded_urls is None: excluded_urls = [] if source_data: for row in source_data: if row['source_url'] not in excluded_urls: self.urls.append( (row['id'], row['source_url'], row['label'])) else: self.urls = [(None, u, None) for u in url] if not self.urls: log2(0, 0, "Channel %s has no URL associated" % channel_label, stream=sys.stderr) self.repo_plugin = self.load_plugin(repo_type) self.strict = strict self.all_packages = [] self.check_ssl_dates = check_ssl_dates # Init cache for computed checksums to not compute it on each reposync run again self.checksum_cache = rhnCache.get(checksum_cache_filename) if self.checksum_cache is None: self.checksum_cache = {}
except rpclib.xmlrpclib.ProtocolError, e: p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) retryYN = 1 time.sleep(wait) # do not reraise this exception! except (KeyboardInterrupt, SystemExit): raise except rpclib.xmlrpclib.Fault, e: lastErrorMsg = e.faultString break except Exception, e: p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) break # do not reraise this exception! if lastErrorMsg: raise RhnSyncException, lastErrorMsg, sys.exc_info()[2] # Returns a stream # Should never be reached return stream def setServerHandler(self, isIss=0): if isIss: self.server_handler = CFG.RHN_ISS_METADATA_HANDLER else: self.server_handler = CFG.RHN_METADATA_HANDLER class MetadataWireSource(BaseWireSource):
except rpclib.xmlrpclib.ProtocolError, e: p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) retryYN = 1 time.sleep(wait) # do not reraise this exception! except (KeyboardInterrupt, SystemExit): raise except rpclib.xmlrpclib.Fault, e: lastErrorMsg = e.faultString break except Exception, e: p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) break # do not reraise this exception! if lastErrorMsg: raise RhnSyncException, lastErrorMsg, sys.exc_info()[2] # Returns a stream # Should never be reached return stream def setServerHandler(self, isIss=0): if isIss: self.server_handler = CFG.RHN_ISS_METADATA_HANDLER else: self.server_handler = CFG.RHN_METADATA_HANDLER