def main(self): parser = OptionParser(option_list=options_table) (self.options, args) = parser.parse_args() if not self.options.db: print "--db not specified" return 1 initCFG('server') rhnSQL.initDB(self.options.db) self._channels_hash = self._get_channels() package_ids = self._get_packages() if package_ids is None: return 1 if self.options.backup_file: self._backup_packages(package_ids, self.options.backup_file) try: self._add_package_header_values(package_ids) except: rhnSQL.rollback() raise if self.options.commit: print "Commiting work" rhnSQL.commit() else: print "Rolling back" rhnSQL.rollback()
def main(): initCFG("server.app") rhnSQL.initDB('rhnuser/rhnuser@webdev') channel = { 'label' : 'mibanescu-test2' } orgid = 1198839 package_template = { 'name' : 'useless', 'version' : '1.0.0', 'arch' : 'noarch', 'org_id' : orgid, } batch = [] p = importLib.IncompletePackage() p.populate(package_template) p['release'] = '2' p['channels'] = [channel] batch.append(p) p = importLib.IncompletePackage() p.populate(package_template) p['release'] = '3' p['channels'] = [channel] batch.append(p) backend = backendOracle.OracleBackend() cps = packageImport.ChannelPackageSubscription(batch, backend, caller="misa.testing", strict=1) cps.run() print cps.affected_channel_packages
def headerParserHandler(self, req): log_setreq(req) # init configuration options with proper component options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if not options.has_key("RHNComponentType"): # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) if req.method == 'GET': # This is the ping method return apache.OK self.servers = rhnImport.load("upload_server/handlers", interface_signature='upload_class') if not options.has_key('SERVER'): log_error("SERVER not set in the apache config files!") return apache.HTTP_INTERNAL_SERVER_ERROR server_name = options['SERVER'] if not self.servers.has_key(server_name): log_error("Unable to load server %s from available servers %s" % (server_name, self.servers)) return apache.HTTP_INTERNAL_SERVER_ERROR server_class = self.servers[server_name] self.server = server_class(req) return self._wrapper(req, "headerParserHandler")
def __init__(self): jabber_lib.Runner.__init__(self) initCFG("osa-dispatcher") self._tcp_server = None self._poll_interval = None self._next_poll_interval = None # Cache states self._state_ids = {}
def main(): if len(sys.argv) < 2: print "Usage: %s <db-connect-string>" % sys.argv[0] return 1 db_connect_string = sys.argv[1] initCFG("server.redhat-xmlrpc") rhnSQL.initDB(db_connect_string) test_1()
def headerParserHandler(self, req): ##We need to init CFG and Logging options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if not options.has_key("RHNComponentType"): # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) """ parse the request, init database and figure out what can we call """ log_debug(2, req.the_request) # call method from inherited class ret = apacheSession.headerParserHandler(self, req) if ret != apache.OK: return ret # make sure we have DB connection if not CFG.SEND_MESSAGE_TO_ALL: try: rhnSQL.initDB() except rhnSQL.SQLConnectError: rhnTB.Traceback(mail=1, req=req, severity="schema") return apache.HTTP_INTERNAL_SERVER_ERROR else: # If in outage mode, close the DB connections rhnSQL.closeDB() # Store client capabilities client_cap_header = 'X-RHN-Client-Capability' if req.headers_in.has_key(client_cap_header): client_caps = req.headers_in[client_cap_header] client_caps = filter(None, map(string.strip, string.split(client_caps, ",")) ) rhnCapability.set_client_capabilities(client_caps) #Enabling the input header flags associated with the redirects/newer clients redirect_support_flags = ['X-RHN-Redirect', 'X-RHN-Transport-Capability'] for flag in redirect_support_flags: if req.headers_in.has_key( flag ): rhnFlags.set(flag, str(req.headers_in[flag]) ) return apache.OK
def __call__(self, req): # NOTE: all imports done here due to required initialization of # of the configuration module before all others. # Initialization is dependent on RHNComponentType in the # req object. if self.__init: from apacheHandler import getComponentType # We cannot trust the config files to tell us if we are in the # broker or in the redirect because we try to always pass # upstream all requests componentType = getComponentType(req) initCFG(componentType) initLOG(CFG.LOG_FILE, CFG.DEBUG) log_debug(1, 'New request, component %s' % (componentType, )) # Instantiate the handlers if HandlerWrap.svrHandlers is None: HandlerWrap.svrHandlers = self.get_handler_factory(req)() if self.__init: # Set the component type HandlerWrap.svrHandlers.set_component(componentType) try: log_setreq(req) if hasattr(HandlerWrap.svrHandlers, self.__name): f = getattr(HandlerWrap.svrHandlers, self.__name) ret = f(req) else: raise Exception("Class has no attribute %s" % self.__name) except: Traceback(self.__name, req, extra = "Unhandled exception type", severity="unhandled") return apache.HTTP_INTERNAL_SERVER_ERROR else: return ret
def headerParserHandler(self, req): log_setreq(req) self.start_time = time.time() # init configuration options with proper component options = req.get_options() # if we are initializing out of a <Location> handler don't # freak out if not options.has_key("RHNComponentType"): # clearly nothing to do return apache.OK initCFG(options["RHNComponentType"]) initLOG(CFG.LOG_FILE, CFG.DEBUG) # short-circuit everything if sending a system-wide message. if CFG.SEND_MESSAGE_TO_ALL: # Drop the database connection try: rhnSQL.closeDB() except: pass # Fetch global message being sent to clients if applicable. msg = open(CFG.MESSAGE_TO_ALL).read() log_debug(3, "Sending message to all clients: %s" % msg) return self._send_xmlrpc(req, rhnFault(-1, _("IMPORTANT MESSAGE FOLLOWS:\n%s") % msg, explain=0)) rhnSQL.initDB(CFG.DEFAULT_DB) self.server = options["SERVER"] self.server_classes = rhnImport.load("satellite_exporter/handlers") if not self.server_classes.has_key(self.server): # XXX do something interesting here log_error("Missing server", self.server) return apache.HTTP_NOT_FOUND return self._wrapper(req, self._headerParserHandler)
def setUp(self): initCFG("server.xmlrpc") rhnSQL.initDB(DB)
class ShortPackageCache(BasePackageCache): _subdir = "short-packages" _compressed = 0 class PackageCache(BasePackageCache): _subdir = "packages" class SourcePackageCache(BasePackageCache): _subdir = "source-packages" class ErratumCache(BaseCache): _subdir = "errata" def _get_key(self, object_id): hash_val = hash_object_id(object_id, 1) return os.path.join("satsync", self._subdir, hash_val, str(object_id)) class KickstartableTreesCache(BaseCache): _subdir = "kickstartable-trees" def _get_key(self, object_id): return os.path.normpath(os.path.join("satsync", self._subdir, object_id)) if __name__ == '__main__': from spacewalk.common import initCFG initCFG("server.satellite") c = PackageCache() pid = 'package-12345' c.cache_set(pid, {'a' : 1, 'b' : 2}) print c.cache_get(pid)
# This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # from spacewalk.server import rhnSQL from spacewalk.common import initCFG, initLOG from spacewalk.server.xmlrpc import queue initLOG("stderr", 4) initCFG('server.xmlrpc') rhnSQL.initDB('rhnuser/rhnuser@webdev') q = queue.Queue() if 1: systemid = open("../../test/backend/checks/systemid-farm06").read() print q.get(systemid, version=2) else: q.server_id = 1003485791 q._invalidate_failed_prereq_actions() #rhnSQL.rollback() rhnSQL.commit()
def main(): optionsTable = [ Option('-s','--server', action='store', help="Server to take the dumps from"), Option('-m','--mountpoint', action='store', help="Mount point for saving data"), Option( '--snapshot-tag', action='store', help='Use this snapshot tag (for snapshotting or dumping channels'), Option( '--incremental', action='store_true', help='Incremental channel dump (relative to the snapshot)'), Option('-c','--channel', action='append', help='Process packages for this channel only'), Option('-f','--force', action='store_true', help="Force the overwrite of contents"), Option( '--blacklists', action='store_true', help="Dump blacklists only"), Option( '--arches', action='store_true', help="Dump arches only"), Option( '--channelfamilies', action='store_true', help="Dump channel families only"), Option( '--snapshot', action='store_true', help="Snapshot only"), Option( '--no-snapshot', action='store_true', help="Do _not_ re-snapshot"), Option( '--channels', action='store_true', help="Dump channels only"), Option( '--shortpackages', action='store_true', help="Dump short package information only"), Option( '--packages', action='store_true', help="Dump package information only"), Option( '--source-packages', action='store_true', help="Dump source package information only", dest="source_packages"), Option( '--rpms', action='store_true', help="Dump binary rpms"), Option( '--srpms', action='store_true', help="Dump source rpms"), Option( '--errata', action='store_true', help="Dump errata only"), Option( '--ksdata', action='store_true', help='Dump kickstart data only'), Option( '--ksfiles', action='store_true', help='Dump kickstart files only'), ] initCFG('server') optionParser = OptionParser(option_list=optionsTable) options, args = optionParser.parse_args() if not options.server: print "Error: --server not specified" return mountPoint = options.mountpoint if not mountPoint: print "Error: mount point not specified. Please use -m or --mountpoint" return options.channel = options.channel or all_channels() # Max compression compression = 9 # Figure out which actions to execute all_actions = { "arches" : None, "blacklists" : None, "channelfamilies" : None, "snapshot" : None, "channels" : None, "packages" : ["channels", "shortpackages"], "shortpackages" : ["channels"], "source_packages" : ["channels"], "errata" : ["channels"], "rpms" : ["channels", "shortpackages"], "srpms" : ["channels"], "ksdata" : ["channels"], "ksfiles" : ["ksdata"], } # Get the list of actions they've checked cmdline_actions = filter(lambda x, options=options: getattr(options, x), all_actions.keys()) if cmdline_actions: actions = {} while cmdline_actions: action = cmdline_actions.pop() if actions.has_key(action): # We've processed this action already continue # Add all the actions this one depends upon cmdline_actions.extend(all_actions[action] or []) actions[action] = all_actions[action] else: # No action specified, default to all actions = all_actions if (options.incremental or options.no_snapshot) and 'snapshot' in actions: # Explicitly required not to snapshot del actions['snapshot'] if 'channels' in actions or 'snapshot' in actions: # Did we get a snapshot? if not options.snapshot_tag: print "Error: need to specify a snapshot tag" return dumper = Dumper(options.server, actions, options=options, compression=compression) dumper.init() dumper.run()
def setUp(self): initCFG("server") rhnSQL.initDB(DB)
return params[0] def __getattr__(self, name): log_debug(6, name) return _Method(self.__request, name) def __str__(self): return "<Remote-Shelf instance at %s>" % id(self) #------------------------------------------------------------------------------- # test code if __name__ == '__main__': from spacewalk.common import initCFG initCFG("proxy.broker") s = Shelf(('localhost', 9999)) s['1234'] = [1, 2, 3, 4, None, None] s['blah'] = 'testing 1 2 3' print 'Cached object s["1234"] = %s' % str(s['1234']) print 'Cached object s["blah"] = %s' % str(s['blah']) print s.has_key("asdfrasdf") # print # print 'And this will bomb (attempt to get non-existant data:' # s["DOESN'T EXIST!!!"] #-------------------------------------------------------------------------------
# # Copyright (c) 2008--2010 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # from spacewalk.common import initCFG from spacewalk.server import rhnSQL, rhnServer initCFG("server.xmlrpc") rhnSQL.initDB("rhnuser/rhnuser@webdev") print rhnServer.search(1003485567).fetch_registration_message() print rhnServer.search(1003485558).fetch_registration_message() print rhnServer.search(1003485584).fetch_registration_message()
def main(arglist): optionsTable = [ Option('-m', '--mountpoint', action='store', help="mount point"), Option('-o', '--output', action='store', help='output directory'), Option('-c', '--channel', action='append', help='process data for this channel only'), Option('-p', '--printconf', action='store_true', help='print the configuration and exit'), Option('-f', '--force', action='store_true', help="force the overwrite of contents"), Option( '--arches', action='store_true', help="copy arches only"), Option( '--arches-extra', action='store_true', help="copy extra arches only"), Option( '--blacklists', action='store_true', help="copy blacklists only"), Option( '--channelfamilies', action='store_true', help="copy channel families only"), Option( '--channels', action='store_true', help="copy channels only"), Option( '--packages', action='store_true', help="copy package information only"), Option( '--shortpackages', action='store_true', help="copy short package information only"), Option( '--sourcepackages', action='store_true', help="copy source package information only"), Option( '--errata', action='store_true', help="copy errata only"), Option( '--rpms', action='store_true', help="copy only the rpm packages"), Option( '--srpms', action='store_true', help="copy only the source rpm packages"), Option( '--ksdata', action='store_true', help="copy only the kickstart metainformation"), Option( '--ksfiles', action='store_true', help="copy only the kickstart files"), ] optionParser = OptionParser(option_list=optionsTable) options, args = optionParser.parse_args() # Init the config initCFG("server.satellite") if options.printconf: CFG.show() return # Figure out which actions to execute allactions = ["arches", "arches_extra", "blacklists", "channelfamilies", "channels", "shortpackages", "packages", "errata", "rpms", "ksdata", "ksfiles"] actions = filter(lambda x, options=options: getattr(options, x), allactions) # If nothing specified on the command line, default to all the actions if not actions: actions = allactions mountPoint = options.mountpoint if not mountPoint: print "Error: mount point not specified. Please use -m or --mountpoint" return destMountPoint = options.output if not destMountPoint: print "Error: output directory not specified. Please use -o or --output" return force = options.force mappings = { 'arches' : ( xmlDiskSource.ArchesDiskSource, xmlDiskDumper.ArchesDumper, ), 'arches_extra' : ( xmlDiskSource.ArchesExtraDiskSource, xmlDiskDumper.ArchesExtraDumper, ), 'blacklists' : ( xmlDiskSource.BlacklistsDiskSource, xmlDiskDumper.BlacklistsDumper, ), 'channelfamilies' : ( xmlDiskSource.ChannelFamilyDiskSource, xmlDiskDumper.ChannelFamilyDumper, ), } for action in mappings.keys(): if action in actions: print "Copying %s information" % action source_class, dumper_class = mappings[action] source = source_class(mountPoint) stream = source.load() dumper = dumper_class(destMountPoint, compression=9, inputStream=stream) dumper.dump(force=force) channels = options.channel if not channels: # No channel specified on the command line. Poke at the metadata # repository to see what we have available channel_source = xmlDiskSource.ChannelDiskSource(mountPoint) channels = channel_source.list() if "channels" in actions: print "Copying channels: %s" % (channels, ) channel_source = xmlDiskSource.ChannelDiskSource(mountPoint) dumper = xmlDiskDumper.ChannelDumper(destMountPoint, compression=9) for channel in channels: channel_source.setChannel(channel) stream = channel_source.load() dumper.setChannel(channel) dumper.setInputStream(stream) dumper.dump(force=force) handler = getHandler() while 1: if "shortpackages" in actions: print "Copying short packages for channels: %s" % (channels, ) ps = xmlDiskSource.ShortPackageDiskSource(mountPoint) dumper = xmlDiskDumper.ShortPackageDumper(destMountPoint, compression=9) actions.remove("shortpackages") _dump_channel_objects(dumper, ps, mountPoint, channels, handler, sources=0, all=1, force=force) elif "packages" in actions: print "Copying packages for channels: %s" % (channels, ) ps = xmlDiskSource.PackageDiskSource(mountPoint) dumper = xmlDiskDumper.PackageDumper(destMountPoint, compression=9) actions.remove("packages") _dump_channel_objects(dumper, ps, mountPoint, channels, handler, sources=0, all=0, force=force) elif "sourcepackages" in actions: print "Copying source packages for channels: %s" % (channels, ) ps = xmlDiskSource.SourcePackageDiskSource(mountPoint) dumper = xmlDiskDumper.SourcePackageDumper(destMountPoint, compression=9) actions.remove("sourcepackages") _dump_channel_objects(dumper, ps, mountPoint, channels, handler, sources=1, all=0, force=force) else: # We're done break while 1: if "rpms" in actions: action = "rpms" sources = 0 elif "srpms" in actions: action = "srpms" sources = 1 else: break actions.remove(action) print "Copying %s for channels: %s" % (action, channels, ) ids = {} for channel in channels: # List the packages for this channel plist = listChannelPackages(mountPoint, channel, handler, sources=sources) for pkgid in plist: ids[pkgid] = None pkgIds = ids.keys() pkgIds.sort() del ids for pkg in pkgIds: srcfile = rpmsPath(pkg, mountPoint, sources=sources) if not os.path.exists(srcfile): print "File %s does not exist!" % srcfile continue destfile = rpmsPath(pkg, destMountPoint, sources=sources) dirname = os.path.dirname(destfile) if not os.path.isdir(dirname): os.makedirs(dirname) if force and os.path.exists(destfile): os.unlink(destfile) if not os.path.exists(destfile): # Hard-link the rpm itself, to avoid wasting disk space os.link(srcfile, destfile) if "errata" in actions: print "Copying errata for channels: %s" % (channels, ) errata = {} for channel in channels: for err in listChannelErrata(mountPoint, channel, handler): errata[err] = None errata = errata.keys() errata.sort() errata_source = xmlDiskSource.ErrataDiskSource(mountPoint) dumper = xmlDiskDumper.ErrataDumper(destMountPoint, compression=9) dumper.prune(errata) for err in errata: errata_source.setID(err) stream = errata_source.load() dumper.setID(err) dumper.setInputStream(stream) dumper.dump(force=force) if "ksdata" in actions: print "Copying kickstart data: %s" % (channels, ) ksdata_source = xmlDiskSource.KickstartDataDiskSource(mountPoint) dumper = xmlDiskDumper.KickstartDataDumper(destMountPoint, compression=9) ks_tree_labels = get_kickstart_labels(mountPoint, channels) for ks_tree_label in ks_tree_labels: ksdata_source.setID(ks_tree_label) stream = ksdata_source.load() dumper.setID(ks_tree_label) dumper.setInputStream(stream) dumper.dump(force=force) if "ksfiles" in actions: print "Copying kickstart files: %s" % (channels, ) ks_files_src = xmlDiskSource.KickstartFileDiskSource(mountPoint) ks_files_dest = xmlDiskSource.KickstartFileDiskSource(destMountPoint) # Load data from disk ks_tree_labels = get_kickstart_labels(mountPoint, channels) handler = xmlSource.getHandler() for ks_tree_label in ks_tree_labels: ks_tree = getKickstartTree(mountPoint, ks_tree_label, handler) if ks_tree is None: continue ks_label = ks_tree['label'] ks_files_src.setID(ks_label) ks_files_dest.setID(ks_label) for ks_file in (ks_tree.get('files') or []): relative_path = ks_file['relative_path'] ks_files_src.set_relative_path(relative_path) src_path = ks_files_src._getFile() if not os.path.exists(src_path): print "Could not find file %s" % src_path continue ks_files_dest.set_relative_path(relative_path) dest_path = ks_files_dest._getFile(create=1) if force and os.path.exists(dest_path): os.unlink(dest_path) if not os.path.exists(dest_path): # Hard-link the file, to avoid wasting disk space os.link(src_path, dest_path) handler.close()
""" import os from spacewalk.server import rhnSQL from spacewalk.common import initCFG, CFG from spacewalk.satellite_tools.satCerts import store_rhnCryptoKey from spacewalk.satellite_tools.satCerts import _querySelectCryptoCertInfo print "NOTE: has to be performed on an RHN Satellite or server" description = 'RHN-ORG-TRUSTED-SSL-CERT' initCFG('server.satellite') rhnSQL.initDB(CFG.DEFAULT_DB) def deleteCertRow(): # get rhn_cryptokey_id (there can only be one, bugzilla: 120297) h = rhnSQL.prepare(_querySelectCryptoCertInfo) h.execute(description=description, org_id=1) row = h.fetchone_dict() if row: rhn_cryptokey_id = int(row['id']) print 'found a cert, nuking it! id:', rhn_cryptokey_id h = rhnSQL.prepare('delete rhnCryptoKey where id=:rhn_cryptokey_id') h.execute(rhn_cryptokey_id=rhn_cryptokey_id) rhnSQL.commit() # bugzilla: 127324 - segfaults if you remove next line (if no delete in
def main(self): initCFG('server') db_string = CFG.DEFAULT_DB #"rhnsat/rhnsat@rhnsat" rhnSQL.initDB(db_string) (options, args) = self.process_args() log_filename = 'reposync.log' if options.channel_label: date = time.localtime() datestr = '%d.%02d.%02d-%02d:%02d:%02d' % (date.tm_year, date.tm_mon, date.tm_mday, date.tm_hour, date.tm_min, date.tm_sec) log_filename = options.channel_label + '-' + datestr + '.log' rhnLog.initLOG(default_log_location + log_filename) #os.fchown isn't in 2.4 :/ os.system("chgrp apache " + default_log_location + log_filename) quit = False if not options.url: if options.channel_label: # TODO:need to look at user security across orgs h = rhnSQL.prepare("""select s.source_url from rhnContentSource s, rhnChannelContentSource cs, rhnChannel c where s.id = cs.source_id and cs.channel_id = c.id and c.label = :label""") h.execute(label=options.channel_label) source_urls = h.fetchall_dict() or [] if source_urls: self.urls = [row['source_url'] for row in source_urls] else: quit = True self.error_msg("Channel has no URL associated") else: self.urls = [options.url] if not options.channel_label: quit = True self.error_msg("--channel must be specified") self.log_msg("\nSync started: %s" % (time.asctime(time.localtime()))) self.log_msg(str(sys.argv)) if quit: sys.exit(1) self.type = options.type self.channel_label = options.channel_label self.fail = options.fail self.quiet = options.quiet self.channel = self.load_channel() if not self.channel or not rhnChannel.isCustomChannel(self.channel['id']): print "Channel does not exist or is not custom" sys.exit(1) for url in self.urls: plugin = self.load_plugin()(url, self.channel_label) self.import_packages(plugin, url) if self.regen: taskomatic.add_to_repodata_queue_for_channel_package_subscription( [self.channel_label], [], "server.app.yumreposync") self.update_date() rhnSQL.commit() self.print_msg("Sync complete")
# system imports import os import sys import string import shutil from operator import truth from rhnpush.uploadLib import UploadError try: from optparse import Option, OptionParser except ImportError: from optik import Option, OptionParser # RHN imports from spacewalk.common import CFG, initCFG from spacewalk.common.rhnLib import parseUrl initCFG('proxy.package_manager') # local imports import uploadLib # globals PREFIX = 'rhn' def main(): # Initialize a command-line processing object with a table of options optionsTable = [ Option('-v','--verbose', action='count', help='Increase verbosity'), Option('-d','--dir', action='store', help='Process packages from this directory'), Option('-c','--channel', action='append', help='Manage this channel'), Option('-n','--count', action='store', help='Process this number of headers per call', type='int'),